224: Fix descriptor sets free validation error r=kvark a=rukai

Seeing as kvark believes this change to be correct I've opened this PR.
Still doesnt fix https://github.com/gfx-rs/wgpu/issues/221 though

Co-authored-by: Rukai <rubickent@gmail.com>
This commit is contained in:
bors[bot]
2019-06-17 13:45:22 +00:00
4 changed files with 25 additions and 3 deletions

View File

@@ -5,6 +5,7 @@ use crate::{
BufferId,
DeviceId,
LifeGuard,
RefCount,
SamplerId,
Stored,
TextureViewId,
@@ -14,6 +15,8 @@ use arrayvec::ArrayVec;
use bitflags::bitflags;
use rendy_descriptor::{DescriptorRanges, DescriptorSet};
use std::borrow::Borrow;
pub const MAX_BIND_GROUPS: usize = 4;
bitflags! {
@@ -113,3 +116,9 @@ pub struct BindGroup<B: hal::Backend> {
pub(crate) used: TrackerSet,
pub(crate) dynamic_count: usize,
}
impl<B: hal::Backend> Borrow<RefCount> for BindGroup<B> {
fn borrow(&self) -> &RefCount {
&self.life_guard.ref_count
}
}

View File

@@ -132,6 +132,7 @@ impl CommandBufferHandle {
}
});
base.views.merge_extend(&head.views).unwrap();
base.bind_groups.merge_extend(&head.bind_groups).unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {

View File

@@ -211,7 +211,12 @@ pub extern "C" fn wgpu_render_pass_set_bind_group(
let mut pass_guard = HUB.render_passes.write();
let pass = &mut pass_guard[pass_id];
let bind_group_guard = HUB.bind_groups.read();
let bind_group = &bind_group_guard[bind_group_id];
let bind_group = pass
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.unwrap();
assert_eq!(bind_group.dynamic_count, offsets_length);
let offsets = if offsets_length != 0 {

View File

@@ -236,7 +236,7 @@ impl<B: hal::Backend> PendingResources<B> {
device.destroy_framebuffer(raw);
},
NativeResource::DescriptorSet(raw) => unsafe {
descriptor_allocator.free(Some(raw).into_iter());
descriptor_allocator.free(iter::once(raw));
},
}
}
@@ -1264,6 +1264,7 @@ pub extern "C" fn wgpu_queue_submit(
let buffer_guard = HUB.buffers.read();
let texture_guard = HUB.textures.read();
let texture_view_guard = HUB.texture_views.read();
let bind_group_guard = HUB.bind_groups.read();
// finish all the command buffers first
for &cmb_id in command_buffer_ids {
@@ -1274,7 +1275,7 @@ pub extern "C" fn wgpu_queue_submit(
if frame.need_waiting.swap(false, Ordering::AcqRel) {
assert_eq!(frame.acquired_epoch, Some(link.epoch),
"{}. Image index {} with epoch {} != current epoch {:?}",
"Attempting to rendering to a swapchain output that has already been presented",
"Attempting to render to a swapchain output that has already been presented",
link.image_index, link.epoch, frame.acquired_epoch);
wait_semaphores.push((
&frame.sem_available,
@@ -1307,6 +1308,12 @@ pub extern "C" fn wgpu_queue_submit(
.submission_index
.store(submit_index, Ordering::Release);
}
for id in comb.trackers.bind_groups.used() {
bind_group_guard[id]
.life_guard
.submission_index
.store(submit_index, Ordering::Release);
}
// execute resource transitions
let mut transit = device.com_allocator.extend(comb);