diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index b3d91855bb0..f534d25ff3e 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -384,6 +384,11 @@ impl LifetimeTracker { for buffer in self.ready_to_map.drain(..) { match buffer.map(snatch_guard) { Some(cb) => pending_callbacks.push(cb), + // `None` means the mapping was cancelled between when this + // buffer was added to `ready_to_map` and now — typically + // because `unmap` was called on it. `unmap_inner` already fired + // the callback with `MapAborted` in that case, so there is + // nothing left to do here. None => continue, } } diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 602a1b107a0..5c31445e21c 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -281,6 +281,12 @@ impl Drop for Queue { self.maintain(last_successful_submission_index, &snatch_guard); drop(snatch_guard); + // `wait_for_idle` above ensures all in-flight submissions have + // completed, so `maintain` should have drained every active + // submission. A false result here indicates that something went + // wrong — most likely the device was lost and `wait_for_idle` + // did not surface that as an error, leaving submissions + // unprocessed and their `mapped` buffer callbacks unfired. assert!(queue_empty); let closures = crate::device::UserClosures { diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index aa672dc2661..84f5242b5ae 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -438,7 +438,16 @@ pub type BufferAccessResult = Result<(), BufferAccessError>; pub(crate) struct BufferPendingMapping { pub(crate) range: Range, pub(crate) op: BufferMapOperation, - // hold the parent alive while the mapping is active + + /// A strong reference to the parent buffer, to hold it alive + /// while the mapping is pending. + /// + /// This creates a temporary reference cycle: [`Buffer::map_state`] owns + /// this `BufferPendingMapping`, which in turn holds an `Arc` back + /// to the same buffer. The cycle is intentional — it keeps the buffer alive + /// while it sits in `LifetimeTracker::ready_to_map` with no other owner — + /// and is broken by `Buffer::map` when it moves the `BufferPendingMapping` + /// out of [`Buffer::map_state`]. pub(crate) _parent_buffer: Arc, } @@ -716,6 +725,12 @@ impl Buffer { } if let Some(queue) = device.get_queue().as_ref() { + // Flush pending writes to this buffer before scheduling the map. + // + // Such writes get added to `queue.life_tracker.lock().active`, so + // that `lock_life().map` below will find the buffer in that + // submission and wait for it, rather than placing it in + // `ready_to_map` prematurely. match queue.flush_writes_for_buffer(self, snatch_guard) { Err(err) => { let state = mem::replace(&mut *self.map_state.lock(), BufferMapState::Idle); @@ -725,7 +740,7 @@ impl Buffer { return Err((op, err)); } Ok(()) => { - // Schedule the buffer map in the lifetime tracker. + // Schedule the buffer map in the lifetime tracker. // // This call searches for use of the buffer by pending submissions. // If we just flushed pending writes, that search is redundant; we