866: Streghten the multi-ref-count on BGLs r=cwfitzgerald a=kvark

**Connections**
Fixes #834

**Description**
Bind group layouts (BGLs) have to be somewhat uniquely tracked, and there was a synchronization issue with their use of `MultiRefCount`. What would happen in multi-thread environment is that we'd see `bind_group_layout_drop`, which first decreases the refcount, and then adds the ID to the list of suspected resources for deletion. But between these operations, something else may have triggered the triage of suspected resources, and if BGL was already there previously, it would be removed earlier than expected.

The solution I came up with is deferring the "dec()" call until the triage itself. That guarantees no gaps, and in fact it goes in line with the other resources we are tracking. I'm fairly confident that the new method works correctly at all times.

**Testing**
Tested extensively on https://github.com/gfx-rs/wgpu/issues/834#issuecomment-669362572

Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
This commit is contained in:
bors[bot]
2020-08-05 21:46:20 +00:00
committed by GitHub
3 changed files with 10 additions and 18 deletions

View File

@@ -519,8 +519,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
}
if !self.suspected_resources.pipeline_layouts.is_empty() {
let (mut guard, mut token) = hub.pipeline_layouts.write(token);
let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
let (mut guard, _) = hub.pipeline_layouts.write(token);
for Stored {
value: id,
@@ -533,23 +532,23 @@ impl<B: GfxBackend> LifetimeTracker<B> {
trace.map(|t| t.lock().add(trace::Action::DestroyPipelineLayout(id.0)));
let layout = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard);
for &bgl_id in layout.bind_group_layout_ids.iter() {
bgl_guard[bgl_id].multi_ref_count.dec();
}
self.suspected_resources
.bind_group_layouts
.extend_from_slice(&layout.bind_group_layout_ids);
self.free_resources.pipeline_layouts.push(layout.raw);
}
}
}
if !self.suspected_resources.bind_group_layouts.is_empty() {
self.suspected_resources.bind_group_layouts.sort();
self.suspected_resources.bind_group_layouts.dedup();
let (mut guard, _) = hub.bind_group_layouts.write(token);
for id in self.suspected_resources.bind_group_layouts.drain(..) {
//Note: this has to happen after all the suspected pipelines are destroyed
//Note: nothing else can bump the refcount since the guard is locked exclusively
if guard[id].multi_ref_count.is_empty() {
//Note: same BGL can appear multiple times in the list, but only the last
// encounter could drop the refcount to 0.
if guard[id].multi_ref_count.dec_and_check_empty() {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroupLayout(id.0)));
let layout = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard);

View File

@@ -1525,10 +1525,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let device_id = {
let (mut bind_group_layout_guard, _) = hub.bind_group_layouts.write(&mut token);
match bind_group_layout_guard.get_mut(bind_group_layout_id) {
Ok(layout) => {
layout.multi_ref_count.dec();
layout.device_id.value
}
Ok(layout) => layout.device_id.value,
Err(InvalidId) => {
hub.bind_group_layouts
.unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard);

View File

@@ -152,12 +152,8 @@ impl MultiRefCount {
unsafe { self.0.as_ref() }.fetch_add(1, Ordering::AcqRel);
}
fn dec(&self) {
unsafe { self.0.as_ref() }.fetch_sub(1, Ordering::AcqRel);
}
fn is_empty(&self) -> bool {
unsafe { self.0.as_ref() }.load(Ordering::Acquire) == 0
fn dec_and_check_empty(&self) -> bool {
unsafe { self.0.as_ref() }.fetch_sub(1, Ordering::AcqRel) == 1
}
}