diff --git a/wgpu-bindings/wgpu.h b/wgpu-bindings/wgpu.h index 889b639dcf..a575851e08 100644 --- a/wgpu-bindings/wgpu.h +++ b/wgpu-bindings/wgpu.h @@ -5,6 +5,8 @@ #define WGPUBITS_PER_BYTE 8 +#define WGPUMAX_BIND_GROUPS 4 + typedef enum { WGPUAddressMode_ClampToEdge = 0, WGPUAddressMode_Repeat = 1, diff --git a/wgpu-native/src/command/bind.rs b/wgpu-native/src/command/bind.rs index 492dcb1980..d66d5ae5f8 100644 --- a/wgpu-native/src/command/bind.rs +++ b/wgpu-native/src/command/bind.rs @@ -1,6 +1,12 @@ use crate::{BindGroupHandle, BindGroupId, BindGroupLayoutId, PipelineLayoutId, Stored}; -use copyless::VecHelper as _; +use log::trace; + +use std::convert::identity; + + +pub const MAX_BIND_GROUPS: usize = 4; +type BindGroupMask = u8; pub struct BindGroupPair { @@ -14,6 +20,28 @@ pub enum Expectation { Mismatch, } +pub enum Provision { + Unchanged, + Changed { + was_compatible: bool, + now_compatible: bool, + }, +} + + +struct TakeSome { + iter: I, +} +impl Iterator for TakeSome +where + I: Iterator>, +{ + type Item = T; + fn next(&mut self) -> Option { + self.iter.next().and_then(identity) + } +} + #[derive(Default)] pub struct BindGroupEntry { expected_layout_id: Option, @@ -21,27 +49,30 @@ pub struct BindGroupEntry { } impl BindGroupEntry { - fn provide(&mut self, bind_group_id: BindGroupId, bind_group: &BindGroupHandle) -> bool { - if let Some(BindGroupPair { - ref layout_id, - ref group_id, - }) = self.provided - { - if group_id.value == bind_group_id { - assert_eq!(*layout_id, bind_group.layout_id); - return false; + fn provide(&mut self, bind_group_id: BindGroupId, bind_group: &BindGroupHandle) -> Provision { + let was_compatible = match self.provided { + Some(BindGroupPair { layout_id, ref group_id }) => { + if group_id.value == bind_group_id { + assert_eq!(layout_id, bind_group.layout_id); + return Provision::Unchanged; + } + self.expected_layout_id == Some(layout_id) } - } + None => true + }; self.provided = Some(BindGroupPair { - layout_id: bind_group.layout_id.clone(), + layout_id: bind_group.layout_id, group_id: Stored { value: bind_group_id, ref_count: bind_group.life_guard.ref_count.clone(), }, }); - self.expected_layout_id == Some(bind_group.layout_id.clone()) + Provision::Changed { + was_compatible, + now_compatible: self.expected_layout_id == Some(bind_group.layout_id), + } } pub fn expect_layout( @@ -63,38 +94,89 @@ impl BindGroupEntry { } } - pub fn _info(&self) -> (BindGroupLayoutId, Option<(BindGroupLayoutId, BindGroupId)>) { - ( - self.expected_layout_id.unwrap(), - self.provided.as_ref().map(|pair| (pair.layout_id, pair.group_id.value)), - ) + fn is_valid(&self) -> bool { + match (self.expected_layout_id, self.provided.as_ref()) { + (None, _) => true, + (Some(_), None) => false, + (Some(layout), Some(pair)) => layout == pair.layout_id, + } + } + + fn actual_value(&self) -> Option { + self.expected_layout_id + .and_then(|layout_id| self.provided.as_ref().and_then(|pair| { + if pair.layout_id == layout_id { + Some(pair.group_id.value) + } else { + None + } + })) } } #[derive(Default)] pub struct Binder { pub(crate) pipeline_layout_id: Option, //TODO: strongly `Stored` - pub(crate) entries: Vec, + pub(crate) entries: [BindGroupEntry; MAX_BIND_GROUPS], } impl Binder { - pub fn ensure_length(&mut self, length: usize) { - while self.entries.len() < length { - self.entries.alloc().init(BindGroupEntry::default()); + pub(crate) fn cut_expectations(&mut self, length: usize) { + for entry in self.entries[length ..].iter_mut() { + entry.expected_layout_id = None; } } - pub(crate) fn provide_entry( - &mut self, + /// Attemt to set the value of the specified bind group index. + /// Returns Some() when the new bind group is ready to be actually bound + /// (i.e. compatible with current expectations). Also returns an iterator + /// of bind group IDs to be bound with it: those are compatible bind groups + /// that were previously blocked because the current one was incompatible. + pub(crate) fn provide_entry<'a>( + &'a mut self, index: usize, bind_group_id: BindGroupId, bind_group: &BindGroupHandle, - ) -> Option { - self.ensure_length(index + 1); - if self.entries[index].provide(bind_group_id, bind_group) { - self.pipeline_layout_id.as_ref().cloned() - } else { - None + ) -> Option<(PipelineLayoutId, impl 'a + Iterator)> { + trace!("\tBinding [{}] = group {:?}", index, bind_group_id); + match self.entries[index].provide(bind_group_id, bind_group) { + Provision::Unchanged => { + None + } + Provision::Changed { now_compatible: false, .. } => { + trace!("\t\tnot compatible"); + None + } + Provision::Changed { was_compatible, .. } => { + if self.entries[.. index].iter().all(|entry| entry.is_valid()) { + self.pipeline_layout_id.map(move |pipeline_layout_id| { + let end = if was_compatible { + trace!("\t\tgenerating follow-up sequence"); + MAX_BIND_GROUPS + } else { + index + 1 + }; + (pipeline_layout_id, TakeSome { + iter: self.entries[index + 1 .. end] + .iter() + .map(|entry| entry.actual_value()), + }) + }) + } else { + trace!("\t\tbehind an incompatible"); + None + } + } } } + + pub(crate) fn invalid_mask(&self) -> BindGroupMask { + self.entries.iter().enumerate().fold(0, |mask, (i, entry)| { + if entry.is_valid() { + mask + } else { + mask | 1u8 << i + } + }) + } } diff --git a/wgpu-native/src/command/compute.rs b/wgpu-native/src/command/compute.rs index 23fc96afe9..fb770302dc 100644 --- a/wgpu-native/src/command/compute.rs +++ b/wgpu-native/src/command/compute.rs @@ -73,20 +73,22 @@ pub extern "C" fn wgpu_compute_pass_set_bind_group( &*HUB.textures.read(), ); - if let Some(pipeline_layout_id) = + if let Some((pipeline_layout_id, follow_up)) = pass.binder .provide_entry(index as usize, bind_group_id, bind_group) { let pipeline_layout_guard = HUB.pipeline_layouts.read(); + let bind_groups = iter::once(&bind_group.raw) + .chain(follow_up.map(|bg_id| &bind_group_guard[bg_id].raw)); unsafe { pass.raw.bind_compute_descriptor_sets( &pipeline_layout_guard[pipeline_layout_id].raw, index as usize, - iter::once(&bind_group.raw), + bind_groups, &[], ); } - } + }; } #[no_mangle] @@ -112,8 +114,7 @@ pub extern "C" fn wgpu_compute_pass_set_pipeline( let bing_group_guard = HUB.bind_groups.read(); pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone()); - pass.binder - .ensure_length(pipeline_layout.bind_group_layout_ids.len()); + pass.binder.cut_expectations(pipeline_layout.bind_group_layout_ids.len()); for (index, (entry, &bgl_id)) in pass .binder diff --git a/wgpu-native/src/command/render.rs b/wgpu-native/src/command/render.rs index ea2e2cd066..4446722ad6 100644 --- a/wgpu-native/src/command/render.rs +++ b/wgpu-native/src/command/render.rs @@ -14,7 +14,6 @@ use hal::command::RawCommandBuffer; use std::{iter, slice}; -type BindGroupMask = u8; #[derive(Debug, PartialEq)] enum BlendColorStatus { Unused, @@ -38,7 +37,6 @@ pub struct RenderPass { context: RenderPassContext, binder: Binder, trackers: TrackerSet, - incompatible_bind_group_mask: BindGroupMask, blend_color_status: BlendColorStatus, } @@ -54,18 +52,17 @@ impl RenderPass { context, binder: Binder::default(), trackers: TrackerSet::new(), - incompatible_bind_group_mask: 0, blend_color_status: BlendColorStatus::Unused, } } fn is_ready(&self) -> Result<(), DrawError> { //TODO: vertex buffers - if self.incompatible_bind_group_mask != 0 { - let index = self.incompatible_bind_group_mask.trailing_zeros() as u32; + let bind_mask = self.binder.invalid_mask(); + if bind_mask != 0 { //let (expected, provided) = self.binder.entries[index as usize].info(); return Err(DrawError::IncompatibleBindGroup { - index, + index: bind_mask.trailing_zeros() as u32, }); } if self.blend_color_status == BlendColorStatus::Required { @@ -218,22 +215,22 @@ pub extern "C" fn wgpu_render_pass_set_bind_group( pass.trackers.consume_by_extend(&bind_group.used); - if let Some(pipeline_layout_id) = + if let Some((pipeline_layout_id, follow_up)) = pass.binder .provide_entry(index as usize, bind_group_id, bind_group) { - pass.incompatible_bind_group_mask &= !(1u8 << index); let pipeline_layout_guard = HUB.pipeline_layouts.read(); - let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id]; + let bind_groups = iter::once(&bind_group.raw) + .chain(follow_up.map(|bg_id| &bind_group_guard[bg_id].raw)); unsafe { pass.raw.bind_graphics_descriptor_sets( - &pipeline_layout.raw, + &&pipeline_layout_guard[pipeline_layout_id].raw, index as usize, - iter::once(&bind_group.raw), + bind_groups, &[], ); } - } + }; } #[no_mangle] @@ -267,10 +264,8 @@ pub extern "C" fn wgpu_render_pass_set_pipeline( let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id]; let bind_group_guard = HUB.bind_groups.read(); - pass.incompatible_bind_group_mask &= (1u8 << pipeline_layout.bind_group_layout_ids.len()) - 1; pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone()); - pass.binder - .ensure_length(pipeline_layout.bind_group_layout_ids.len()); + pass.binder.cut_expectations(pipeline_layout.bind_group_layout_ids.len()); for (index, (entry, &bgl_id)) in pass .binder @@ -279,22 +274,15 @@ pub extern "C" fn wgpu_render_pass_set_pipeline( .zip(&pipeline_layout.bind_group_layout_ids) .enumerate() { - match entry.expect_layout(bgl_id) { - Expectation::Unchanged => {} - Expectation::Mismatch => { - pass.incompatible_bind_group_mask |= 1u8 << index; - } - Expectation::Match(bg_id) => { - pass.incompatible_bind_group_mask &= !(1u8 << index); - let desc_set = &bind_group_guard[bg_id].raw; - unsafe { - pass.raw.bind_graphics_descriptor_sets( - &pipeline_layout.raw, - index, - iter::once(desc_set), - &[], - ); - } + if let Expectation::Match(bg_id) = entry.expect_layout(bgl_id) { + let desc_set = &bind_group_guard[bg_id].raw; + unsafe { + pass.raw.bind_graphics_descriptor_sets( + &pipeline_layout.raw, + index, + iter::once(desc_set), + &[], + ); } } }