diff --git a/wgpu-core/Cargo.toml b/wgpu-core/Cargo.toml index 90e934229f..3768046590 100644 --- a/wgpu-core/Cargo.toml +++ b/wgpu-core/Cargo.toml @@ -50,7 +50,7 @@ gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "0244e3401e9 [target.'cfg(all(not(target_arch = "wasm32"), windows))'.dependencies] gfx-backend-dx12 = { git = "https://github.com/gfx-rs/gfx", rev = "0244e3401e9f127617cb8636397048584e7bfe8a" } gfx-backend-dx11 = { git = "https://github.com/gfx-rs/gfx", rev = "0244e3401e9f127617cb8636397048584e7bfe8a" } -gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "0244e3401e9f127617cb8636397048584e7bfe8a" } +gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "0244e3401e9f127617cb8636397048584e7bfe8a", features = ["naga"] } [target.'cfg(target_arch = "wasm32")'.dependencies] gfx-backend-gl = { git = "https://github.com/gfx-rs/gfx", rev = "0244e3401e9f127617cb8636397048584e7bfe8a", features = ["naga"] } diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index d9c3857108..4618ef158b 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -240,7 +240,7 @@ impl Global { let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); let cmd_buf = - CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id).map_pass_err(scope)?; + CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id).map_pass_err(scope)?; let raw = cmd_buf.raw.last_mut().unwrap(); #[cfg(feature = "trace")] diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 867696a9ea..c53c1913a9 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -54,6 +54,17 @@ pub struct CommandBuffer { impl CommandBuffer { fn get_encoder( + storage: &Storage, + id: id::CommandEncoderId, + ) -> Result<&Self, CommandEncoderError> { + match storage.get(id) { + Ok(cmd_buf) if cmd_buf.is_recording => Ok(cmd_buf), + Ok(_) => Err(CommandEncoderError::NotRecording), + Err(_) => Err(CommandEncoderError::Invalid), + } + } + + fn get_encoder_mut( storage: &mut Storage, id: id::CommandEncoderId, ) -> Result<&mut Self, CommandEncoderError> { @@ -201,7 +212,7 @@ impl Global { //TODO: actually close the last recorded command buffer let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token); - let error = match CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id) { + let error = match CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id) { Ok(cmd_buf) => { cmd_buf.is_recording = false; // stop tracking the swapchain image, if used @@ -232,7 +243,7 @@ impl Global { let mut token = Token::root(); let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id)?; let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap(); unsafe { @@ -252,7 +263,7 @@ impl Global { let mut token = Token::root(); let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id)?; let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap(); unsafe { @@ -271,7 +282,7 @@ impl Global { let mut token = Token::root(); let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id)?; let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap(); unsafe { diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index d724bca260..2a1b2bbbc2 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -11,13 +11,13 @@ use crate::{ }, conv, device::{ - AttachmentData, AttachmentDataVec, FramebufferKey, RenderPassCompatibilityError, + AttachmentData, AttachmentDataVec, Device, FramebufferKey, RenderPassCompatibilityError, RenderPassContext, RenderPassKey, MAX_COLOR_TARGETS, MAX_VERTEX_BUFFERS, }, - hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token}, + hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token}, id, pipeline::PipelineFlags, - resource::{BufferUse, TextureUse, TextureView, TextureViewInner}, + resource::{BufferUse, Texture, TextureUse, TextureView, TextureViewInner}, span, track::{TextureSelector, TrackerSet, UsageConflict}, validation::{ @@ -387,6 +387,8 @@ pub enum RenderPassErrorInner { Encoder(#[from] CommandEncoderError), #[error("attachment texture view {0:?} is invalid")] InvalidAttachment(id::TextureViewId), + #[error("there are attachments")] + NoAttachments, #[error("attachments have different sizes")] MismatchAttachments, #[error("attachment's sample count {0} is invalid")] @@ -488,6 +490,555 @@ fn check_device_features( } } +struct RenderAttachment<'a> { + texture_id: &'a Stored, + selector: &'a TextureSelector, + previous_use: Option, + new_use: TextureUse, +} + +type UsedSwapChainInfo = Option<(Stored, F)>; + +struct RenderPassInfo<'a, B: hal::Backend> { + context: RenderPassContext, + trackers: TrackerSet, + render_attachments: AttachmentDataVec>, + used_swapchain_with_framebuffer: UsedSwapChainInfo, + is_ds_read_only: bool, + extent: wgt::Extent3d, +} + +impl<'a, B: GfxBackend> RenderPassInfo<'a, B> { + fn start( + raw: &mut B::CommandBuffer, + color_attachments: &[ColorAttachmentDescriptor], + depth_stencil_attachment: Option<&DepthStencilAttachmentDescriptor>, + cmd_buf: &CommandBuffer, + device: &Device, + view_guard: &'a Storage, id::TextureViewId>, + ) -> Result { + let sample_count_limit = device.hal_limits.framebuffer_color_sample_counts; + + // We default to false intentionally, even if depth-stencil isn't used at all. + // This allows us to use the primary raw pipeline in `RenderPipeline`, + // instead of the special read-only one, which would be `None`. + let mut is_ds_read_only = false; + + let mut render_attachments = AttachmentDataVec::::new(); + + let mut attachment_width = None; + let mut attachment_height = None; + let mut valid_attachment = true; + + let mut extent = None; + let mut sample_count = 0; + let mut depth_stencil_aspects = hal::format::Aspects::empty(); + let mut used_swap_chain = None::>; + let mut trackers = TrackerSet::new(B::VARIANT); + let mut used_swapchain_with_framebuffer = None; + + let mut add_view = |view: &TextureView| { + if let Some(ex) = extent { + if ex != view.extent { + return Err(RenderPassErrorInner::ExtentStateMismatch { + state_extent: ex, + view_extent: view.extent, + }); + } + } else { + extent = Some(view.extent); + } + if sample_count == 0 { + sample_count = view.samples; + } else if sample_count != view.samples { + return Err(RenderPassErrorInner::SampleCountMismatch { + actual: view.samples, + expected: sample_count, + }); + } + Ok(()) + }; + + let rp_key = { + let depth_stencil = match depth_stencil_attachment { + Some(at) => { + let view = trackers + .views + .use_extend(&*view_guard, at.attachment, (), ()) + .map_err(|_| RenderPassErrorInner::InvalidAttachment(at.attachment))?; + add_view(view)?; + depth_stencil_aspects = view.aspects; + + let source_id = match view.inner { + TextureViewInner::Native { ref source_id, .. } => source_id, + TextureViewInner::SwapChain { .. } => { + return Err(RenderPassErrorInner::SwapChainImageAsDepthStencil); + } + }; + + // Using render pass for transition. + let previous_use = cmd_buf + .trackers + .textures + .query(source_id.value, view.selector.clone()); + let new_use = if at.is_read_only(view.aspects)? { + is_ds_read_only = true; + TextureUse::ATTACHMENT_READ + } else { + TextureUse::ATTACHMENT_WRITE + }; + render_attachments.push(RenderAttachment { + texture_id: source_id, + selector: &view.selector, + previous_use, + new_use, + }); + + let new_layout = conv::map_texture_state(new_use, view.aspects).1; + let old_layout = match previous_use { + Some(usage) => conv::map_texture_state(usage, view.aspects).1, + None => new_layout, + }; + + let ds_at = hal::pass::Attachment { + format: Some(conv::map_texture_format( + view.format, + device.private_features, + )), + samples: view.samples, + ops: conv::map_load_store_ops(&at.depth), + stencil_ops: conv::map_load_store_ops(&at.stencil), + layouts: old_layout..new_layout, + }; + Some((ds_at, new_layout)) + } + None => None, + }; + + let mut colors = ArrayVec::new(); + let mut resolves = ArrayVec::new(); + + for at in color_attachments { + let view = trackers + .views + .use_extend(&*view_guard, at.attachment, (), ()) + .map_err(|_| RenderPassErrorInner::InvalidAttachment(at.attachment))?; + add_view(view)?; + + valid_attachment &= *attachment_width.get_or_insert(view.extent.width) + == view.extent.width + && *attachment_height.get_or_insert(view.extent.height) == view.extent.height; + + let layouts = match view.inner { + TextureViewInner::Native { ref source_id, .. } => { + let previous_use = cmd_buf + .trackers + .textures + .query(source_id.value, view.selector.clone()); + let new_use = TextureUse::ATTACHMENT_WRITE; + render_attachments.push(RenderAttachment { + texture_id: source_id, + selector: &view.selector, + previous_use, + new_use, + }); + + let new_layout = + conv::map_texture_state(new_use, hal::format::Aspects::COLOR).1; + let old_layout = match previous_use { + Some(usage) => { + conv::map_texture_state(usage, hal::format::Aspects::COLOR).1 + } + None => new_layout, + }; + old_layout..new_layout + } + TextureViewInner::SwapChain { ref source_id, .. } => { + if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain { + if source_id.value != sc_id.value { + return Err(RenderPassErrorInner::SwapChainMismatch); + } + } else { + assert!(used_swap_chain.is_none()); + used_swap_chain = Some(source_id.clone()); + } + + let end = hal::image::Layout::Present; + let start = match at.channel.load_op { + LoadOp::Clear => hal::image::Layout::Undefined, + LoadOp::Load => end, + }; + start..end + } + }; + + let color_at = hal::pass::Attachment { + format: Some(conv::map_texture_format( + view.format, + device.private_features, + )), + samples: view.samples, + ops: conv::map_load_store_ops(&at.channel), + stencil_ops: hal::pass::AttachmentOps::DONT_CARE, + layouts, + }; + colors.push((color_at, hal::image::Layout::ColorAttachmentOptimal)); + } + + if !valid_attachment { + return Err(RenderPassErrorInner::MismatchAttachments); + } + + for resolve_target in color_attachments.iter().flat_map(|at| at.resolve_target) { + let view = trackers + .views + .use_extend(&*view_guard, resolve_target, (), ()) + .map_err(|_| RenderPassErrorInner::InvalidAttachment(resolve_target))?; + if extent != Some(view.extent) { + return Err(RenderPassErrorInner::ExtentStateMismatch { + state_extent: extent.unwrap_or_default(), + view_extent: view.extent, + }); + } + if view.samples != 1 { + return Err(RenderPassErrorInner::InvalidResolveTargetSampleCount); + } + if sample_count == 1 { + return Err(RenderPassErrorInner::InvalidResolveSourceSampleCount); + } + + let layouts = match view.inner { + TextureViewInner::Native { ref source_id, .. } => { + let previous_use = cmd_buf + .trackers + .textures + .query(source_id.value, view.selector.clone()); + let new_use = TextureUse::ATTACHMENT_WRITE; + render_attachments.push(RenderAttachment { + texture_id: source_id, + selector: &view.selector, + previous_use, + new_use, + }); + + let new_layout = + conv::map_texture_state(new_use, hal::format::Aspects::COLOR).1; + let old_layout = match previous_use { + Some(usage) => { + conv::map_texture_state(usage, hal::format::Aspects::COLOR).1 + } + None => new_layout, + }; + old_layout..new_layout + } + TextureViewInner::SwapChain { ref source_id, .. } => { + if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain { + if source_id.value != sc_id.value { + return Err(RenderPassErrorInner::SwapChainMismatch); + } + } else { + assert!(used_swap_chain.is_none()); + used_swap_chain = Some(source_id.clone()); + } + hal::image::Layout::Undefined..hal::image::Layout::Present + } + }; + + let resolve_at = hal::pass::Attachment { + format: Some(conv::map_texture_format( + view.format, + device.private_features, + )), + samples: view.samples, + ops: hal::pass::AttachmentOps::new( + hal::pass::AttachmentLoadOp::DontCare, + hal::pass::AttachmentStoreOp::Store, + ), + stencil_ops: hal::pass::AttachmentOps::DONT_CARE, + layouts, + }; + resolves.push((resolve_at, hal::image::Layout::ColorAttachmentOptimal)); + } + + RenderPassKey { + colors, + resolves, + depth_stencil, + } + }; + + if sample_count & sample_count_limit == 0 { + return Err(RenderPassErrorInner::InvalidSampleCount(sample_count)); + } + + let mut render_pass_cache = device.render_passes.lock(); + let render_pass = match render_pass_cache.entry(rp_key.clone()) { + Entry::Occupied(e) => e.into_mut(), + Entry::Vacant(entry) => { + let color_ids: [hal::pass::AttachmentRef; MAX_COLOR_TARGETS] = [ + (0, hal::image::Layout::ColorAttachmentOptimal), + (1, hal::image::Layout::ColorAttachmentOptimal), + (2, hal::image::Layout::ColorAttachmentOptimal), + (3, hal::image::Layout::ColorAttachmentOptimal), + ]; + + let mut resolve_ids = ArrayVec::<[_; MAX_COLOR_TARGETS]>::new(); + let mut attachment_index = color_attachments.len(); + if color_attachments + .iter() + .any(|at| at.resolve_target.is_some()) + { + for ((i, at), &(_, layout)) in color_attachments + .iter() + .enumerate() + .zip(entry.key().resolves.iter()) + { + let real_attachment_index = match at.resolve_target { + Some(_) => attachment_index + i, + None => hal::pass::ATTACHMENT_UNUSED, + }; + resolve_ids.push((real_attachment_index, layout)); + } + attachment_index += color_attachments.len(); + } + + let depth_id = depth_stencil_attachment.map(|_| { + let usage = if is_ds_read_only { + TextureUse::ATTACHMENT_READ + } else { + TextureUse::ATTACHMENT_WRITE + }; + ( + attachment_index, + conv::map_texture_state(usage, depth_stencil_aspects).1, + ) + }); + + let subpass = hal::pass::SubpassDesc { + colors: &color_ids[..color_attachments.len()], + resolves: &resolve_ids, + depth_stencil: depth_id.as_ref(), + inputs: &[], + preserves: &[], + }; + let all = entry + .key() + .all() + .map(|(at, _)| at) + .collect::>(); + + let pass = unsafe { device.raw.create_render_pass(all, iter::once(subpass), &[]) } + .unwrap(); + entry.insert(pass) + } + }; + + let mut framebuffer_cache; + let fb_key = FramebufferKey { + colors: color_attachments + .iter() + .map(|at| id::Valid(at.attachment)) + .collect(), + resolves: color_attachments + .iter() + .filter_map(|at| at.resolve_target) + .map(id::Valid) + .collect(), + depth_stencil: depth_stencil_attachment.map(|at| id::Valid(at.attachment)), + }; + let context = RenderPassContext { + attachments: AttachmentData { + colors: fb_key + .colors + .iter() + .map(|&at| view_guard[at].format) + .collect(), + resolves: fb_key + .resolves + .iter() + .map(|&at| view_guard[at].format) + .collect(), + depth_stencil: fb_key.depth_stencil.map(|at| view_guard[at].format), + }, + sample_count, + }; + + let framebuffer = match used_swap_chain.take() { + Some(sc_id) => { + assert!(cmd_buf.used_swap_chain.is_none()); + // Always create a new framebuffer and delete it after presentation. + let attachments = fb_key + .all() + .map(|&id| match view_guard[id].inner { + TextureViewInner::Native { ref raw, .. } => raw, + TextureViewInner::SwapChain { ref image, .. } => Borrow::borrow(image), + }) + .collect::>(); + let framebuffer = unsafe { + device + .raw + .create_framebuffer(&render_pass, attachments, extent.unwrap()) + .or(Err(RenderPassErrorInner::OutOfMemory))? + }; + used_swapchain_with_framebuffer = Some((sc_id, framebuffer)); + &mut used_swapchain_with_framebuffer.as_mut().unwrap().1 + } + None => { + // Cache framebuffers by the device. + framebuffer_cache = device.framebuffers.lock(); + match framebuffer_cache.entry(fb_key) { + Entry::Occupied(e) => e.into_mut(), + Entry::Vacant(e) => { + let fb = { + let attachments = e + .key() + .all() + .map(|&id| match view_guard[id].inner { + TextureViewInner::Native { ref raw, .. } => raw, + TextureViewInner::SwapChain { ref image, .. } => { + Borrow::borrow(image) + } + }) + .collect::>(); + unsafe { + device + .raw + .create_framebuffer(&render_pass, attachments, extent.unwrap()) + .or(Err(RenderPassErrorInner::OutOfMemory))? + } + }; + e.insert(fb) + } + } + } + }; + + let rect = { + let ex = extent.unwrap(); + hal::pso::Rect { + x: 0, + y: 0, + w: ex.width as _, + h: ex.height as _, + } + }; + + let clear_values = color_attachments + .iter() + .zip(&rp_key.colors) + .flat_map(|(at, (rat, _layout))| { + match at.channel.load_op { + LoadOp::Load => None, + LoadOp::Clear => { + use hal::format::ChannelType; + //TODO: validate sign/unsign and normalized ranges of the color values + let value = match rat.format.unwrap().base_format().1 { + ChannelType::Unorm + | ChannelType::Snorm + | ChannelType::Ufloat + | ChannelType::Sfloat + | ChannelType::Uscaled + | ChannelType::Sscaled + | ChannelType::Srgb => hal::command::ClearColor { + float32: conv::map_color_f32(&at.channel.clear_value), + }, + ChannelType::Sint => hal::command::ClearColor { + sint32: conv::map_color_i32(&at.channel.clear_value), + }, + ChannelType::Uint => hal::command::ClearColor { + uint32: conv::map_color_u32(&at.channel.clear_value), + }, + }; + Some(hal::command::ClearValue { color: value }) + } + } + }) + .chain(depth_stencil_attachment.and_then(|at| { + match (at.depth.load_op, at.stencil.load_op) { + (LoadOp::Load, LoadOp::Load) => None, + (LoadOp::Clear, _) | (_, LoadOp::Clear) => { + let value = hal::command::ClearDepthStencil { + depth: at.depth.clear_value, + stencil: at.stencil.clear_value, + }; + Some(hal::command::ClearValue { + depth_stencil: value, + }) + } + } + })) + .collect::>(); + + unsafe { + raw.begin_render_pass( + render_pass, + framebuffer, + rect, + clear_values, + hal::command::SubpassContents::Inline, + ); + raw.set_scissors(0, iter::once(&rect)); + raw.set_viewports( + 0, + iter::once(hal::pso::Viewport { + rect, + depth: 0.0..1.0, + }), + ); + } + + Ok(Self { + context, + trackers, + render_attachments, + used_swapchain_with_framebuffer, + is_ds_read_only, + extent: wgt::Extent3d { + width: attachment_width.ok_or(RenderPassErrorInner::NoAttachments)?, + height: attachment_height.ok_or(RenderPassErrorInner::NoAttachments)?, + depth: 1, + }, + }) + } + + fn finish( + mut self, + texture_guard: &Storage, id::TextureId>, + ) -> Result<(TrackerSet, UsedSwapChainInfo), RenderPassErrorInner> { + for ra in self.render_attachments { + let texture = &texture_guard[ra.texture_id.value]; + check_texture_usage(texture.usage, TextureUsage::RENDER_ATTACHMENT)?; + + // the tracker set of the pass is always in "extend" mode + self.trackers + .textures + .change_extend( + ra.texture_id.value, + &ra.texture_id.ref_count, + ra.selector.clone(), + ra.new_use, + ) + .unwrap(); + + if let Some(usage) = ra.previous_use { + // Make the attachment tracks to be aware of the internal + // transition done by the render pass, by registering the + // previous usage as the initial state. + self.trackers + .textures + .prepend( + ra.texture_id.value, + &ra.texture_id.ref_count, + ra.selector.clone(), + usage, + ) + .unwrap(); + } + } + Ok((self.trackers, self.used_swapchain_with_framebuffer)) + } +} + // Common routines between render/compute impl Global { @@ -519,13 +1070,762 @@ impl Global { let mut token = Token::root(); let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); - let mut trackers = TrackerSet::new(B::VARIANT); + let (cmd_buf_raw, trackers, used_swapchain_with_framebuffer) = { + // read-only lock guard + let (cmb_guard, mut token) = hub.command_buffers.read(&mut token); + + let cmd_buf = + CommandBuffer::get_encoder(&*cmb_guard, encoder_id).map_pass_err(scope)?; + let device = &device_guard[cmd_buf.device_id.value]; + let mut raw = device.cmd_allocator.extend(cmd_buf); + unsafe { + if let Some(ref label) = base.label { + // cmd_buf.has_labels = true; this is done later + device.raw.set_command_buffer_name(&mut raw, label); + } + raw.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT); + } + + let (bundle_guard, mut token) = hub.render_bundles.read(&mut token); + let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); + let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); + let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token); + let (buffer_guard, mut token) = hub.buffers.read(&mut token); + let (texture_guard, mut token) = hub.textures.read(&mut token); + let (view_guard, _) = hub.texture_views.read(&mut token); + + tracing::trace!( + "Encoding render pass begin in command buffer {:?}", + encoder_id + ); + + let mut info = RenderPassInfo::start( + &mut raw, + color_attachments, + depth_stencil_attachment, + cmd_buf, + device, + &*view_guard, + ) + .map_pass_err(scope)?; + + let mut state = State { + pipeline_flags: PipelineFlags::empty(), + binder: Binder::new(cmd_buf.limits.max_bind_groups), + blend_color: OptionalState::Unused, + stencil_reference: 0, + pipeline: StateChange::new(), + index: IndexState::default(), + vertex: VertexState::default(), + debug_scope_depth: 0, + }; + let mut temp_offsets = Vec::new(); + + for command in base.commands { + match *command { + RenderCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group_id, + } => { + let scope = PassErrorScope::SetBindGroup(bind_group_id); + let max_bind_groups = device.limits.max_bind_groups; + if (index as u32) >= max_bind_groups { + return Err(RenderCommandError::BindGroupIndexOutOfRange { + index, + max: max_bind_groups, + }) + .map_pass_err(scope); + } + + temp_offsets.clear(); + temp_offsets.extend_from_slice( + &base.dynamic_offsets[..num_dynamic_offsets as usize], + ); + base.dynamic_offsets = + &base.dynamic_offsets[num_dynamic_offsets as usize..]; + + let bind_group = info + .trackers + .bind_groups + .use_extend(&*bind_group_guard, bind_group_id, (), ()) + .unwrap(); + bind_group + .validate_dynamic_bindings(&temp_offsets) + .map_pass_err(scope)?; + + info.trackers + .merge_extend(&bind_group.used) + .map_pass_err(scope)?; + + if let Some((pipeline_layout_id, follow_ups)) = state.binder.provide_entry( + index as usize, + id::Valid(bind_group_id), + bind_group, + &temp_offsets, + ) { + let bind_groups = iter::once(bind_group.raw.raw()) + .chain( + follow_ups + .clone() + .map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()), + ) + .collect::>(); + temp_offsets.extend(follow_ups.flat_map(|(_, offsets)| offsets)); + unsafe { + raw.bind_graphics_descriptor_sets( + &pipeline_layout_guard[pipeline_layout_id].raw, + index as usize, + bind_groups, + &temp_offsets, + ); + } + }; + } + RenderCommand::SetPipeline(pipeline_id) => { + let scope = PassErrorScope::SetPipelineRender(pipeline_id); + if state.pipeline.set_and_check_redundant(pipeline_id) { + continue; + } + + let pipeline = info + .trackers + .render_pipes + .use_extend(&*pipeline_guard, pipeline_id, (), ()) + .map_err(|_| RenderCommandError::InvalidPipeline(pipeline_id)) + .map_pass_err(scope)?; + + info.context + .check_compatible(&pipeline.pass_context) + .map_err(RenderCommandError::IncompatiblePipeline) + .map_pass_err(scope)?; + + state.pipeline_flags = pipeline.flags; + + if pipeline.flags.contains(PipelineFlags::WRITES_DEPTH_STENCIL) + && info.is_ds_read_only + { + return Err(RenderCommandError::IncompatibleReadOnlyDepthStencil) + .map_pass_err(scope); + } + + state + .blend_color + .require(pipeline.flags.contains(PipelineFlags::BLEND_COLOR)); + + unsafe { + raw.bind_graphics_pipeline(&pipeline.raw); + } + + if pipeline.flags.contains(PipelineFlags::STENCIL_REFERENCE) { + unsafe { + raw.set_stencil_reference( + hal::pso::Face::all(), + state.stencil_reference, + ); + } + } + + // Rebind resource + if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) { + let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value]; + + state.binder.change_pipeline_layout( + &*pipeline_layout_guard, + pipeline.layout_id.value, + ); + + let mut is_compatible = true; + + for (index, (entry, &bgl_id)) in state + .binder + .entries + .iter_mut() + .zip(&pipeline_layout.bind_group_layout_ids) + .enumerate() + { + match entry.expect_layout(bgl_id) { + LayoutChange::Match(bg_id, offsets) if is_compatible => { + let desc_set = bind_group_guard[bg_id].raw.raw(); + unsafe { + raw.bind_graphics_descriptor_sets( + &pipeline_layout.raw, + index, + iter::once(desc_set), + offsets.iter().cloned(), + ); + } + } + LayoutChange::Match(..) | LayoutChange::Unchanged => {} + LayoutChange::Mismatch => { + is_compatible = false; + } + } + } + + // Clear push constant ranges + let non_overlapping = super::bind::compute_nonoverlapping_ranges( + &pipeline_layout.push_constant_ranges, + ); + for range in non_overlapping { + let offset = range.range.start; + let size_bytes = range.range.end - offset; + super::push_constant_clear( + offset, + size_bytes, + |clear_offset, clear_data| unsafe { + raw.push_graphics_constants( + &pipeline_layout.raw, + conv::map_shader_stage_flags(range.stages), + clear_offset, + clear_data, + ); + }, + ); + } + } + + state.index.pipeline_format = pipeline.index_format; + + let vertex_strides_len = pipeline.vertex_strides.len(); + state.vertex.buffers_required = vertex_strides_len as u32; + + while state.vertex.inputs.len() < vertex_strides_len { + state.vertex.inputs.push(VertexBufferState::EMPTY); + } + + // Update vertex buffer limits + for (vbs, &(stride, rate)) in + state.vertex.inputs.iter_mut().zip(&pipeline.vertex_strides) + { + vbs.stride = stride; + vbs.rate = rate; + } + for vbs in state.vertex.inputs.iter_mut().skip(vertex_strides_len) { + vbs.stride = 0; + vbs.rate = InputStepMode::Vertex; + } + state.vertex.update_limits(); + } + RenderCommand::SetIndexBuffer { + buffer_id, + index_format, + offset, + size, + } => { + let scope = PassErrorScope::SetIndexBuffer(buffer_id); + let buffer = info + .trackers + .buffers + .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX) + .map_err(|e| RenderCommandError::Buffer(buffer_id, e)) + .map_pass_err(scope)?; + check_buffer_usage(buffer.usage, BufferUsage::INDEX).map_pass_err(scope)?; + let &(ref buf_raw, _) = buffer + .raw + .as_ref() + .ok_or(RenderCommandError::DestroyedBuffer(buffer_id)) + .map_pass_err(scope)?; + + let end = match size { + Some(s) => offset + s.get(), + None => buffer.size, + }; + state.index.bound_buffer_view = Some((id::Valid(buffer_id), offset..end)); + + state.index.format = Some(index_format); + state.index.update_limit(); + + let range = hal::buffer::SubRange { + offset, + size: Some(end - offset), + }; + let index_type = conv::map_index_format(index_format); + unsafe { + raw.bind_index_buffer(buf_raw, range, index_type); + } + } + RenderCommand::SetVertexBuffer { + slot, + buffer_id, + offset, + size, + } => { + let scope = PassErrorScope::SetVertexBuffer(buffer_id); + let buffer = info + .trackers + .buffers + .use_extend(&*buffer_guard, buffer_id, (), BufferUse::VERTEX) + .map_err(|e| RenderCommandError::Buffer(buffer_id, e)) + .map_pass_err(scope)?; + check_buffer_usage(buffer.usage, BufferUsage::VERTEX) + .map_pass_err(scope)?; + let &(ref buf_raw, _) = buffer + .raw + .as_ref() + .ok_or(RenderCommandError::DestroyedBuffer(buffer_id)) + .map_pass_err(scope)?; + + let empty_slots = + (1 + slot as usize).saturating_sub(state.vertex.inputs.len()); + state + .vertex + .inputs + .extend(iter::repeat(VertexBufferState::EMPTY).take(empty_slots)); + let vertex_state = &mut state.vertex.inputs[slot as usize]; + vertex_state.total_size = match size { + Some(s) => s.get(), + None => buffer.size - offset, + }; + vertex_state.bound = true; + + let range = hal::buffer::SubRange { + offset, + size: size.map(|s| s.get()), + }; + unsafe { + raw.bind_vertex_buffers(slot, iter::once((buf_raw, range))); + } + state.vertex.update_limits(); + } + RenderCommand::SetBlendColor(ref color) => { + state.blend_color = OptionalState::Set; + unsafe { + raw.set_blend_constants(conv::map_color_f32(color)); + } + } + RenderCommand::SetStencilReference(value) => { + state.stencil_reference = value; + if state + .pipeline_flags + .contains(PipelineFlags::STENCIL_REFERENCE) + { + unsafe { + raw.set_stencil_reference(hal::pso::Face::all(), value); + } + } + } + RenderCommand::SetViewport { + ref rect, + depth_min, + depth_max, + } => { + let scope = PassErrorScope::SetViewport; + use std::{convert::TryFrom, i16}; + if rect.w <= 0.0 + || rect.h <= 0.0 + || depth_min < 0.0 + || depth_min > 1.0 + || depth_max < 0.0 + || depth_max > 1.0 + { + return Err(RenderCommandError::InvalidViewport).map_pass_err(scope); + } + let r = hal::pso::Rect { + x: i16::try_from(rect.x.round() as i64).unwrap_or(0), + y: i16::try_from(rect.y.round() as i64).unwrap_or(0), + w: i16::try_from(rect.w.round() as i64).unwrap_or(i16::MAX), + h: i16::try_from(rect.h.round() as i64).unwrap_or(i16::MAX), + }; + unsafe { + raw.set_viewports( + 0, + iter::once(hal::pso::Viewport { + rect: r, + depth: depth_min..depth_max, + }), + ); + } + } + RenderCommand::SetPushConstant { + stages, + offset, + size_bytes, + values_offset, + } => { + let scope = PassErrorScope::SetPushConstant; + let values_offset = values_offset + .ok_or(RenderPassErrorInner::InvalidValuesOffset) + .map_pass_err(scope)?; + + let end_offset_bytes = offset + size_bytes; + let values_end_offset = + (values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize; + let data_slice = + &base.push_constant_data[(values_offset as usize)..values_end_offset]; + + let pipeline_layout_id = state + .binder + .pipeline_layout_id + .ok_or(DrawError::MissingPipeline) + .map_pass_err(scope)?; + let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id]; + + pipeline_layout + .validate_push_constant_ranges(stages, offset, end_offset_bytes) + .map_err(RenderCommandError::from) + .map_pass_err(scope)?; + + unsafe { + raw.push_graphics_constants( + &pipeline_layout.raw, + conv::map_shader_stage_flags(stages), + offset, + data_slice, + ) + } + } + RenderCommand::SetScissor(ref rect) => { + let scope = PassErrorScope::SetScissorRect; + use std::{convert::TryFrom, i16}; + if rect.w == 0 + || rect.h == 0 + || rect.x + rect.w > info.extent.width + || rect.y + rect.h > info.extent.height + { + return Err(RenderCommandError::InvalidScissorRect).map_pass_err(scope); + } + let r = hal::pso::Rect { + x: i16::try_from(rect.x).unwrap_or(0), + y: i16::try_from(rect.y).unwrap_or(0), + w: i16::try_from(rect.w).unwrap_or(i16::MAX), + h: i16::try_from(rect.h).unwrap_or(i16::MAX), + }; + unsafe { + raw.set_scissors(0, iter::once(r)); + } + } + RenderCommand::Draw { + vertex_count, + instance_count, + first_vertex, + first_instance, + } => { + let scope = PassErrorScope::Draw; + state.is_ready().map_pass_err(scope)?; + let last_vertex = first_vertex + vertex_count; + let vertex_limit = state.vertex.vertex_limit; + if last_vertex > vertex_limit { + return Err(DrawError::VertexBeyondLimit { + last_vertex, + vertex_limit, + slot: state.vertex.vertex_limit_slot, + }) + .map_pass_err(scope); + } + let last_instance = first_instance + instance_count; + let instance_limit = state.vertex.instance_limit; + if last_instance > instance_limit { + return Err(DrawError::InstanceBeyondLimit { + last_instance, + instance_limit, + slot: state.vertex.instance_limit_slot, + }) + .map_pass_err(scope); + } + + unsafe { + raw.draw( + first_vertex..first_vertex + vertex_count, + first_instance..first_instance + instance_count, + ); + } + } + RenderCommand::DrawIndexed { + index_count, + instance_count, + first_index, + base_vertex, + first_instance, + } => { + let scope = PassErrorScope::DrawIndexed; + state.is_ready().map_pass_err(scope)?; + + //TODO: validate that base_vertex + max_index() is within the provided range + let last_index = first_index + index_count; + let index_limit = state.index.limit; + if last_index > index_limit { + return Err(DrawError::IndexBeyondLimit { + last_index, + index_limit, + }) + .map_pass_err(scope); + } + let last_instance = first_instance + instance_count; + let instance_limit = state.vertex.instance_limit; + if last_instance > instance_limit { + return Err(DrawError::InstanceBeyondLimit { + last_instance, + instance_limit, + slot: state.vertex.instance_limit_slot, + }) + .map_pass_err(scope); + } + + unsafe { + raw.draw_indexed( + first_index..first_index + index_count, + base_vertex, + first_instance..first_instance + instance_count, + ); + } + } + RenderCommand::MultiDrawIndirect { + buffer_id, + offset, + count, + indexed, + } => { + let scope = if indexed { + PassErrorScope::DrawIndexedIndirect + } else { + PassErrorScope::DrawIndirect + }; + state.is_ready().map_pass_err(scope)?; + + let stride = match indexed { + false => 16, + true => 20, + }; + + if count.is_some() { + check_device_features( + device.features, + wgt::Features::MULTI_DRAW_INDIRECT, + ) + .map_pass_err(scope)?; + } + + let indirect_buffer = info + .trackers + .buffers + .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT) + .map_err(|e| RenderCommandError::Buffer(buffer_id, e)) + .map_pass_err(scope)?; + check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT) + .map_pass_err(scope)?; + let &(ref indirect_raw, _) = indirect_buffer + .raw + .as_ref() + .ok_or(RenderCommandError::DestroyedBuffer(buffer_id)) + .map_pass_err(scope)?; + + let actual_count = count.map_or(1, |c| c.get()); + + let begin_offset = offset; + let end_offset = offset + stride * actual_count as u64; + if end_offset > indirect_buffer.size { + return Err(RenderPassErrorInner::IndirectBufferOverrun { + offset, + count, + begin_offset, + end_offset, + buffer_size: indirect_buffer.size, + }) + .map_pass_err(scope); + } + + match indexed { + false => unsafe { + raw.draw_indirect( + indirect_raw, + offset, + actual_count, + stride as u32, + ); + }, + true => unsafe { + raw.draw_indexed_indirect( + indirect_raw, + offset, + actual_count, + stride as u32, + ); + }, + } + } + RenderCommand::MultiDrawIndirectCount { + buffer_id, + offset, + count_buffer_id, + count_buffer_offset, + max_count, + indexed, + } => { + let scope = if indexed { + PassErrorScope::DrawIndexedIndirect + } else { + PassErrorScope::DrawIndirect + }; + state.is_ready().map_pass_err(scope)?; + + let stride = match indexed { + false => 16, + true => 20, + }; + + check_device_features( + device.features, + wgt::Features::MULTI_DRAW_INDIRECT_COUNT, + ) + .map_pass_err(scope)?; + + let indirect_buffer = info + .trackers + .buffers + .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT) + .map_err(|e| RenderCommandError::Buffer(buffer_id, e)) + .map_pass_err(scope)?; + check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT) + .map_pass_err(scope)?; + let &(ref indirect_raw, _) = indirect_buffer + .raw + .as_ref() + .ok_or(RenderCommandError::DestroyedBuffer(buffer_id)) + .map_pass_err(scope)?; + + let count_buffer = info + .trackers + .buffers + .use_extend(&*buffer_guard, count_buffer_id, (), BufferUse::INDIRECT) + .map_err(|e| RenderCommandError::Buffer(count_buffer_id, e)) + .map_pass_err(scope)?; + check_buffer_usage(count_buffer.usage, BufferUsage::INDIRECT) + .map_pass_err(scope)?; + let &(ref count_raw, _) = count_buffer + .raw + .as_ref() + .ok_or(RenderCommandError::DestroyedBuffer(count_buffer_id)) + .map_pass_err(scope)?; + + let begin_offset = offset; + let end_offset = offset + stride * max_count as u64; + if end_offset > indirect_buffer.size { + return Err(RenderPassErrorInner::IndirectBufferOverrun { + offset, + count: None, + begin_offset, + end_offset, + buffer_size: indirect_buffer.size, + }) + .map_pass_err(scope); + } + + let begin_count_offset = count_buffer_offset; + let end_count_offset = count_buffer_offset + 4; + if end_count_offset > count_buffer.size { + return Err(RenderPassErrorInner::IndirectCountBufferOverrun { + begin_count_offset, + end_count_offset, + count_buffer_size: count_buffer.size, + }) + .map_pass_err(scope); + } + + match indexed { + false => unsafe { + raw.draw_indirect_count( + indirect_raw, + offset, + count_raw, + count_buffer_offset, + max_count, + stride as u32, + ); + }, + true => unsafe { + raw.draw_indexed_indirect_count( + indirect_raw, + offset, + count_raw, + count_buffer_offset, + max_count, + stride as u32, + ); + }, + } + } + RenderCommand::PushDebugGroup { color, len } => { + state.debug_scope_depth += 1; + let label = str::from_utf8(&base.string_data[..len]).unwrap(); + unsafe { + raw.begin_debug_marker(label, color); + } + base.string_data = &base.string_data[len..]; + } + RenderCommand::PopDebugGroup => { + let scope = PassErrorScope::PopDebugGroup; + if state.debug_scope_depth == 0 { + return Err(RenderPassErrorInner::InvalidPopDebugGroup) + .map_pass_err(scope); + } + state.debug_scope_depth -= 1; + unsafe { + raw.end_debug_marker(); + } + } + RenderCommand::InsertDebugMarker { color, len } => { + let label = str::from_utf8(&base.string_data[..len]).unwrap(); + unsafe { + raw.insert_debug_marker(label, color); + } + base.string_data = &base.string_data[len..]; + } + RenderCommand::ExecuteBundle(bundle_id) => { + let scope = PassErrorScope::ExecuteBundle; + let bundle = info + .trackers + .bundles + .use_extend(&*bundle_guard, bundle_id, (), ()) + .unwrap(); + + info.context + .check_compatible(&bundle.context) + .map_err(RenderPassErrorInner::IncompatibleRenderBundle) + .map_pass_err(scope)?; + + unsafe { + bundle.execute( + &mut raw, + &*pipeline_layout_guard, + &*bind_group_guard, + &*pipeline_guard, + &*buffer_guard, + ) + } + .map_err(|e| match e { + ExecutionError::DestroyedBuffer(id) => { + RenderCommandError::DestroyedBuffer(id) + } + }) + .map_pass_err(scope)?; + + info.trackers + .merge_extend(&bundle.used) + .map_pass_err(scope)?; + state.reset_bundle(); + } + } + } + + tracing::trace!("Merging {:?} with the render pass", encoder_id); + unsafe { + raw.end_render_pass(); + } + + let (trackers, used_swapchain_with_framebuffer) = + info.finish(&*texture_guard).map_pass_err(scope)?; + (raw, trackers, used_swapchain_with_framebuffer) + }; + + let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); + let (buffer_guard, mut token) = hub.buffers.read(&mut token); + let (texture_guard, _) = hub.textures.read(&mut token); let cmd_buf = - CommandBuffer::get_encoder(&mut *cmb_guard, encoder_id).map_pass_err(scope)?; - let device = &device_guard[cmd_buf.device_id.value]; - let mut raw = device.cmd_allocator.extend(cmd_buf); + CommandBuffer::get_encoder_mut(&mut *cmb_guard, encoder_id).map_pass_err(scope)?; + cmd_buf.has_labels |= base.label.is_some(); + cmd_buf.used_swap_chain = used_swapchain_with_framebuffer; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf.commands { @@ -536,1235 +1836,6 @@ impl Global { }); } - unsafe { - if let Some(ref label) = base.label { - cmd_buf.has_labels = true; - device.raw.set_command_buffer_name(&mut raw, label); - } - raw.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT); - } - - let (bundle_guard, mut token) = hub.render_bundles.read(&mut token); - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); - let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); - let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, mut token) = hub.textures.read(&mut token); - let (view_guard, _) = hub.texture_views.read(&mut token); - - // We default to false intentionally, even if depth-stencil isn't used at all. - // This allows us to use the primary raw pipeline in `RenderPipeline`, - // instead of the special read-only one, which would be `None`. - let mut is_ds_read_only = false; - - struct RenderAttachment<'a> { - texture_id: &'a Stored, - selector: &'a TextureSelector, - previous_use: Option, - new_use: TextureUse, - } - let mut render_attachments = AttachmentDataVec::::new(); - - let mut attachment_width = None; - let mut attachment_height = None; - let mut valid_attachment = true; - - let context = { - use hal::device::Device as _; - - let sample_count_limit = device.hal_limits.framebuffer_color_sample_counts; - let base_trackers = &cmd_buf.trackers; - - let mut extent = None; - let mut sample_count = 0; - let mut depth_stencil_aspects = hal::format::Aspects::empty(); - let mut used_swap_chain = None::>; - - let mut add_view = |view: &TextureView| { - if let Some(ex) = extent { - if ex != view.extent { - return Err(RenderPassErrorInner::ExtentStateMismatch { - state_extent: ex, - view_extent: view.extent, - }); - } - } else { - extent = Some(view.extent); - } - if sample_count == 0 { - sample_count = view.samples; - } else if sample_count != view.samples { - return Err(RenderPassErrorInner::SampleCountMismatch { - actual: view.samples, - expected: sample_count, - }); - } - Ok(()) - }; - - tracing::trace!( - "Encoding render pass begin in command buffer {:?}", - encoder_id - ); - let rp_key = { - let depth_stencil = match depth_stencil_attachment { - Some(at) => { - let view = trackers - .views - .use_extend(&*view_guard, at.attachment, (), ()) - .map_err(|_| RenderPassErrorInner::InvalidAttachment(at.attachment)) - .map_pass_err(scope)?; - add_view(view).map_pass_err(scope)?; - depth_stencil_aspects = view.aspects; - - let source_id = match view.inner { - TextureViewInner::Native { ref source_id, .. } => source_id, - TextureViewInner::SwapChain { .. } => { - return Err(RenderPassErrorInner::SwapChainImageAsDepthStencil) - .map_pass_err(scope) - } - }; - - // Using render pass for transition. - let previous_use = base_trackers - .textures - .query(source_id.value, view.selector.clone()); - let new_use = if at.is_read_only(view.aspects).map_pass_err(scope)? { - is_ds_read_only = true; - TextureUse::ATTACHMENT_READ - } else { - TextureUse::ATTACHMENT_WRITE - }; - render_attachments.push(RenderAttachment { - texture_id: source_id, - selector: &view.selector, - previous_use, - new_use, - }); - - let new_layout = conv::map_texture_state(new_use, view.aspects).1; - let old_layout = match previous_use { - Some(usage) => conv::map_texture_state(usage, view.aspects).1, - None => new_layout, - }; - - let ds_at = hal::pass::Attachment { - format: Some(conv::map_texture_format( - view.format, - device.private_features, - )), - samples: view.samples, - ops: conv::map_load_store_ops(&at.depth), - stencil_ops: conv::map_load_store_ops(&at.stencil), - layouts: old_layout..new_layout, - }; - Some((ds_at, new_layout)) - } - None => None, - }; - - let mut colors = ArrayVec::new(); - let mut resolves = ArrayVec::new(); - - for at in color_attachments { - let view = trackers - .views - .use_extend(&*view_guard, at.attachment, (), ()) - .map_err(|_| RenderPassErrorInner::InvalidAttachment(at.attachment)) - .map_pass_err(scope)?; - add_view(view).map_pass_err(scope)?; - - valid_attachment &= *attachment_width.get_or_insert(view.extent.width) - == view.extent.width - && *attachment_height.get_or_insert(view.extent.height) - == view.extent.height; - - let layouts = match view.inner { - TextureViewInner::Native { ref source_id, .. } => { - let previous_use = base_trackers - .textures - .query(source_id.value, view.selector.clone()); - let new_use = TextureUse::ATTACHMENT_WRITE; - render_attachments.push(RenderAttachment { - texture_id: source_id, - selector: &view.selector, - previous_use, - new_use, - }); - - let new_layout = - conv::map_texture_state(new_use, hal::format::Aspects::COLOR).1; - let old_layout = match previous_use { - Some(usage) => { - conv::map_texture_state(usage, hal::format::Aspects::COLOR).1 - } - None => new_layout, - }; - old_layout..new_layout - } - TextureViewInner::SwapChain { ref source_id, .. } => { - if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain { - if source_id.value != sc_id.value { - return Err(RenderPassErrorInner::SwapChainMismatch) - .map_pass_err(scope); - } - } else { - assert!(used_swap_chain.is_none()); - used_swap_chain = Some(source_id.clone()); - } - - let end = hal::image::Layout::Present; - let start = match at.channel.load_op { - LoadOp::Clear => hal::image::Layout::Undefined, - LoadOp::Load => end, - }; - start..end - } - }; - - let color_at = hal::pass::Attachment { - format: Some(conv::map_texture_format( - view.format, - device.private_features, - )), - samples: view.samples, - ops: conv::map_load_store_ops(&at.channel), - stencil_ops: hal::pass::AttachmentOps::DONT_CARE, - layouts, - }; - colors.push((color_at, hal::image::Layout::ColorAttachmentOptimal)); - } - - if !valid_attachment { - return Err(RenderPassErrorInner::MismatchAttachments).map_pass_err(scope); - } - - for resolve_target in color_attachments.iter().flat_map(|at| at.resolve_target) { - let view = trackers - .views - .use_extend(&*view_guard, resolve_target, (), ()) - .map_err(|_| RenderPassErrorInner::InvalidAttachment(resolve_target)) - .map_pass_err(scope)?; - if extent != Some(view.extent) { - return Err(RenderPassErrorInner::ExtentStateMismatch { - state_extent: extent.unwrap_or_default(), - view_extent: view.extent, - }) - .map_pass_err(scope); - } - if view.samples != 1 { - return Err(RenderPassErrorInner::InvalidResolveTargetSampleCount) - .map_pass_err(scope); - } - if sample_count == 1 { - return Err(RenderPassErrorInner::InvalidResolveSourceSampleCount) - .map_pass_err(scope); - } - - let layouts = match view.inner { - TextureViewInner::Native { ref source_id, .. } => { - let previous_use = base_trackers - .textures - .query(source_id.value, view.selector.clone()); - let new_use = TextureUse::ATTACHMENT_WRITE; - render_attachments.push(RenderAttachment { - texture_id: source_id, - selector: &view.selector, - previous_use, - new_use, - }); - - let new_layout = - conv::map_texture_state(new_use, hal::format::Aspects::COLOR).1; - let old_layout = match previous_use { - Some(usage) => { - conv::map_texture_state(usage, hal::format::Aspects::COLOR).1 - } - None => new_layout, - }; - old_layout..new_layout - } - TextureViewInner::SwapChain { ref source_id, .. } => { - if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain { - if source_id.value != sc_id.value { - return Err(RenderPassErrorInner::SwapChainMismatch) - .map_pass_err(scope); - } - } else { - assert!(used_swap_chain.is_none()); - used_swap_chain = Some(source_id.clone()); - } - hal::image::Layout::Undefined..hal::image::Layout::Present - } - }; - - let resolve_at = hal::pass::Attachment { - format: Some(conv::map_texture_format( - view.format, - device.private_features, - )), - samples: view.samples, - ops: hal::pass::AttachmentOps::new( - hal::pass::AttachmentLoadOp::DontCare, - hal::pass::AttachmentStoreOp::Store, - ), - stencil_ops: hal::pass::AttachmentOps::DONT_CARE, - layouts, - }; - resolves.push((resolve_at, hal::image::Layout::ColorAttachmentOptimal)); - } - - RenderPassKey { - colors, - resolves, - depth_stencil, - } - }; - - if sample_count & sample_count_limit == 0 { - return Err(RenderPassErrorInner::InvalidSampleCount(sample_count)) - .map_pass_err(scope); - } - - let mut render_pass_cache = device.render_passes.lock(); - let render_pass = match render_pass_cache.entry(rp_key.clone()) { - Entry::Occupied(e) => e.into_mut(), - Entry::Vacant(entry) => { - let color_ids: [hal::pass::AttachmentRef; MAX_COLOR_TARGETS] = [ - (0, hal::image::Layout::ColorAttachmentOptimal), - (1, hal::image::Layout::ColorAttachmentOptimal), - (2, hal::image::Layout::ColorAttachmentOptimal), - (3, hal::image::Layout::ColorAttachmentOptimal), - ]; - - let mut resolve_ids = ArrayVec::<[_; MAX_COLOR_TARGETS]>::new(); - let mut attachment_index = color_attachments.len(); - if color_attachments - .iter() - .any(|at| at.resolve_target.is_some()) - { - for ((i, at), &(_, layout)) in color_attachments - .iter() - .enumerate() - .zip(entry.key().resolves.iter()) - { - let real_attachment_index = match at.resolve_target { - Some(_) => attachment_index + i, - None => hal::pass::ATTACHMENT_UNUSED, - }; - resolve_ids.push((real_attachment_index, layout)); - } - attachment_index += color_attachments.len(); - } - - let depth_id = depth_stencil_attachment.map(|_| { - let usage = if is_ds_read_only { - TextureUse::ATTACHMENT_READ - } else { - TextureUse::ATTACHMENT_WRITE - }; - ( - attachment_index, - conv::map_texture_state(usage, depth_stencil_aspects).1, - ) - }); - - let subpass = hal::pass::SubpassDesc { - colors: &color_ids[..color_attachments.len()], - resolves: &resolve_ids, - depth_stencil: depth_id.as_ref(), - inputs: &[], - preserves: &[], - }; - let all = entry - .key() - .all() - .map(|(at, _)| at) - .collect::>(); - - let pass = - unsafe { device.raw.create_render_pass(all, iter::once(subpass), &[]) } - .unwrap(); - entry.insert(pass) - } - }; - - let mut framebuffer_cache; - let fb_key = FramebufferKey { - colors: color_attachments - .iter() - .map(|at| id::Valid(at.attachment)) - .collect(), - resolves: color_attachments - .iter() - .filter_map(|at| at.resolve_target) - .map(id::Valid) - .collect(), - depth_stencil: depth_stencil_attachment.map(|at| id::Valid(at.attachment)), - }; - let context = RenderPassContext { - attachments: AttachmentData { - colors: fb_key - .colors - .iter() - .map(|&at| view_guard[at].format) - .collect(), - resolves: fb_key - .resolves - .iter() - .map(|&at| view_guard[at].format) - .collect(), - depth_stencil: fb_key.depth_stencil.map(|at| view_guard[at].format), - }, - sample_count, - }; - - let framebuffer = match used_swap_chain.take() { - Some(sc_id) => { - assert!(cmd_buf.used_swap_chain.is_none()); - // Always create a new framebuffer and delete it after presentation. - let attachments = fb_key - .all() - .map(|&id| match view_guard[id].inner { - TextureViewInner::Native { ref raw, .. } => raw, - TextureViewInner::SwapChain { ref image, .. } => Borrow::borrow(image), - }) - .collect::>(); - let framebuffer = unsafe { - device - .raw - .create_framebuffer(&render_pass, attachments, extent.unwrap()) - .or(Err(RenderPassErrorInner::OutOfMemory)) - .map_pass_err(scope)? - }; - cmd_buf.used_swap_chain = Some((sc_id, framebuffer)); - &mut cmd_buf.used_swap_chain.as_mut().unwrap().1 - } - None => { - // Cache framebuffers by the device. - framebuffer_cache = device.framebuffers.lock(); - match framebuffer_cache.entry(fb_key) { - Entry::Occupied(e) => e.into_mut(), - Entry::Vacant(e) => { - let fb = { - let attachments = e - .key() - .all() - .map(|&id| match view_guard[id].inner { - TextureViewInner::Native { ref raw, .. } => raw, - TextureViewInner::SwapChain { ref image, .. } => { - Borrow::borrow(image) - } - }) - .collect::>(); - unsafe { - device - .raw - .create_framebuffer( - &render_pass, - attachments, - extent.unwrap(), - ) - .or(Err(RenderPassErrorInner::OutOfMemory)) - .map_pass_err(scope)? - } - }; - e.insert(fb) - } - } - } - }; - - let rect = { - let ex = extent.unwrap(); - hal::pso::Rect { - x: 0, - y: 0, - w: ex.width as _, - h: ex.height as _, - } - }; - - let clear_values = color_attachments - .iter() - .zip(&rp_key.colors) - .flat_map(|(at, (rat, _layout))| { - match at.channel.load_op { - LoadOp::Load => None, - LoadOp::Clear => { - use hal::format::ChannelType; - //TODO: validate sign/unsign and normalized ranges of the color values - let value = match rat.format.unwrap().base_format().1 { - ChannelType::Unorm - | ChannelType::Snorm - | ChannelType::Ufloat - | ChannelType::Sfloat - | ChannelType::Uscaled - | ChannelType::Sscaled - | ChannelType::Srgb => hal::command::ClearColor { - float32: conv::map_color_f32(&at.channel.clear_value), - }, - ChannelType::Sint => hal::command::ClearColor { - sint32: conv::map_color_i32(&at.channel.clear_value), - }, - ChannelType::Uint => hal::command::ClearColor { - uint32: conv::map_color_u32(&at.channel.clear_value), - }, - }; - Some(hal::command::ClearValue { color: value }) - } - } - }) - .chain(depth_stencil_attachment.and_then(|at| { - match (at.depth.load_op, at.stencil.load_op) { - (LoadOp::Load, LoadOp::Load) => None, - (LoadOp::Clear, _) | (_, LoadOp::Clear) => { - let value = hal::command::ClearDepthStencil { - depth: at.depth.clear_value, - stencil: at.stencil.clear_value, - }; - Some(hal::command::ClearValue { - depth_stencil: value, - }) - } - } - })) - .collect::>(); - - unsafe { - raw.begin_render_pass( - render_pass, - framebuffer, - rect, - clear_values, - hal::command::SubpassContents::Inline, - ); - raw.set_scissors(0, iter::once(&rect)); - raw.set_viewports( - 0, - iter::once(hal::pso::Viewport { - rect, - depth: 0.0..1.0, - }), - ); - } - - context - }; - - let mut state = State { - pipeline_flags: PipelineFlags::empty(), - binder: Binder::new(cmd_buf.limits.max_bind_groups), - blend_color: OptionalState::Unused, - stencil_reference: 0, - pipeline: StateChange::new(), - index: IndexState::default(), - vertex: VertexState::default(), - debug_scope_depth: 0, - }; - let mut temp_offsets = Vec::new(); - - for command in base.commands { - match *command { - RenderCommand::SetBindGroup { - index, - num_dynamic_offsets, - bind_group_id, - } => { - let scope = PassErrorScope::SetBindGroup(bind_group_id); - let max_bind_groups = device.limits.max_bind_groups; - if (index as u32) >= max_bind_groups { - return Err(RenderCommandError::BindGroupIndexOutOfRange { - index, - max: max_bind_groups, - }) - .map_pass_err(scope); - } - - temp_offsets.clear(); - temp_offsets - .extend_from_slice(&base.dynamic_offsets[..num_dynamic_offsets as usize]); - base.dynamic_offsets = &base.dynamic_offsets[num_dynamic_offsets as usize..]; - - let bind_group = trackers - .bind_groups - .use_extend(&*bind_group_guard, bind_group_id, (), ()) - .unwrap(); - bind_group - .validate_dynamic_bindings(&temp_offsets) - .map_pass_err(scope)?; - - trackers - .merge_extend(&bind_group.used) - .map_pass_err(scope)?; - - if let Some((pipeline_layout_id, follow_ups)) = state.binder.provide_entry( - index as usize, - id::Valid(bind_group_id), - bind_group, - &temp_offsets, - ) { - let bind_groups = iter::once(bind_group.raw.raw()) - .chain( - follow_ups - .clone() - .map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()), - ) - .collect::>(); - temp_offsets.extend(follow_ups.flat_map(|(_, offsets)| offsets)); - unsafe { - raw.bind_graphics_descriptor_sets( - &pipeline_layout_guard[pipeline_layout_id].raw, - index as usize, - bind_groups, - &temp_offsets, - ); - } - }; - } - RenderCommand::SetPipeline(pipeline_id) => { - let scope = PassErrorScope::SetPipelineRender(pipeline_id); - if state.pipeline.set_and_check_redundant(pipeline_id) { - continue; - } - - let pipeline = trackers - .render_pipes - .use_extend(&*pipeline_guard, pipeline_id, (), ()) - .map_err(|_| RenderCommandError::InvalidPipeline(pipeline_id)) - .map_pass_err(scope)?; - - context - .check_compatible(&pipeline.pass_context) - .map_err(RenderCommandError::IncompatiblePipeline) - .map_pass_err(scope)?; - - state.pipeline_flags = pipeline.flags; - - if pipeline.flags.contains(PipelineFlags::WRITES_DEPTH_STENCIL) - && is_ds_read_only - { - return Err(RenderCommandError::IncompatibleReadOnlyDepthStencil) - .map_pass_err(scope); - } - - state - .blend_color - .require(pipeline.flags.contains(PipelineFlags::BLEND_COLOR)); - - unsafe { - raw.bind_graphics_pipeline(&pipeline.raw); - } - - if pipeline.flags.contains(PipelineFlags::STENCIL_REFERENCE) { - unsafe { - raw.set_stencil_reference( - hal::pso::Face::all(), - state.stencil_reference, - ); - } - } - - // Rebind resource - if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) { - let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value]; - - state.binder.change_pipeline_layout( - &*pipeline_layout_guard, - pipeline.layout_id.value, - ); - - let mut is_compatible = true; - - for (index, (entry, &bgl_id)) in state - .binder - .entries - .iter_mut() - .zip(&pipeline_layout.bind_group_layout_ids) - .enumerate() - { - match entry.expect_layout(bgl_id) { - LayoutChange::Match(bg_id, offsets) if is_compatible => { - let desc_set = bind_group_guard[bg_id].raw.raw(); - unsafe { - raw.bind_graphics_descriptor_sets( - &pipeline_layout.raw, - index, - iter::once(desc_set), - offsets.iter().cloned(), - ); - } - } - LayoutChange::Match(..) | LayoutChange::Unchanged => {} - LayoutChange::Mismatch => { - is_compatible = false; - } - } - } - - // Clear push constant ranges - let non_overlapping = super::bind::compute_nonoverlapping_ranges( - &pipeline_layout.push_constant_ranges, - ); - for range in non_overlapping { - let offset = range.range.start; - let size_bytes = range.range.end - offset; - super::push_constant_clear( - offset, - size_bytes, - |clear_offset, clear_data| unsafe { - raw.push_graphics_constants( - &pipeline_layout.raw, - conv::map_shader_stage_flags(range.stages), - clear_offset, - clear_data, - ); - }, - ); - } - } - - state.index.pipeline_format = pipeline.index_format; - - let vertex_strides_len = pipeline.vertex_strides.len(); - state.vertex.buffers_required = vertex_strides_len as u32; - - while state.vertex.inputs.len() < vertex_strides_len { - state.vertex.inputs.push(VertexBufferState::EMPTY); - } - - // Update vertex buffer limits - for (vbs, &(stride, rate)) in - state.vertex.inputs.iter_mut().zip(&pipeline.vertex_strides) - { - vbs.stride = stride; - vbs.rate = rate; - } - for vbs in state.vertex.inputs.iter_mut().skip(vertex_strides_len) { - vbs.stride = 0; - vbs.rate = InputStepMode::Vertex; - } - state.vertex.update_limits(); - } - RenderCommand::SetIndexBuffer { - buffer_id, - index_format, - offset, - size, - } => { - let scope = PassErrorScope::SetIndexBuffer(buffer_id); - let buffer = trackers - .buffers - .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX) - .map_err(|e| RenderCommandError::Buffer(buffer_id, e)) - .map_pass_err(scope)?; - check_buffer_usage(buffer.usage, BufferUsage::INDEX).map_pass_err(scope)?; - let &(ref buf_raw, _) = buffer - .raw - .as_ref() - .ok_or(RenderCommandError::DestroyedBuffer(buffer_id)) - .map_pass_err(scope)?; - - let end = match size { - Some(s) => offset + s.get(), - None => buffer.size, - }; - state.index.bound_buffer_view = Some((id::Valid(buffer_id), offset..end)); - - state.index.format = Some(index_format); - state.index.update_limit(); - - let range = hal::buffer::SubRange { - offset, - size: Some(end - offset), - }; - let index_type = conv::map_index_format(index_format); - unsafe { - raw.bind_index_buffer(buf_raw, range, index_type); - } - } - RenderCommand::SetVertexBuffer { - slot, - buffer_id, - offset, - size, - } => { - let scope = PassErrorScope::SetVertexBuffer(buffer_id); - let buffer = trackers - .buffers - .use_extend(&*buffer_guard, buffer_id, (), BufferUse::VERTEX) - .map_err(|e| RenderCommandError::Buffer(buffer_id, e)) - .map_pass_err(scope)?; - check_buffer_usage(buffer.usage, BufferUsage::VERTEX).map_pass_err(scope)?; - let &(ref buf_raw, _) = buffer - .raw - .as_ref() - .ok_or(RenderCommandError::DestroyedBuffer(buffer_id)) - .map_pass_err(scope)?; - - let empty_slots = (1 + slot as usize).saturating_sub(state.vertex.inputs.len()); - state - .vertex - .inputs - .extend(iter::repeat(VertexBufferState::EMPTY).take(empty_slots)); - let vertex_state = &mut state.vertex.inputs[slot as usize]; - vertex_state.total_size = match size { - Some(s) => s.get(), - None => buffer.size - offset, - }; - vertex_state.bound = true; - - let range = hal::buffer::SubRange { - offset, - size: size.map(|s| s.get()), - }; - unsafe { - raw.bind_vertex_buffers(slot, iter::once((buf_raw, range))); - } - state.vertex.update_limits(); - } - RenderCommand::SetBlendColor(ref color) => { - state.blend_color = OptionalState::Set; - unsafe { - raw.set_blend_constants(conv::map_color_f32(color)); - } - } - RenderCommand::SetStencilReference(value) => { - state.stencil_reference = value; - if state - .pipeline_flags - .contains(PipelineFlags::STENCIL_REFERENCE) - { - unsafe { - raw.set_stencil_reference(hal::pso::Face::all(), value); - } - } - } - RenderCommand::SetViewport { - ref rect, - depth_min, - depth_max, - } => { - let scope = PassErrorScope::SetViewport; - use std::{convert::TryFrom, i16}; - if rect.w <= 0.0 - || rect.h <= 0.0 - || depth_min < 0.0 - || depth_min > 1.0 - || depth_max < 0.0 - || depth_max > 1.0 - { - return Err(RenderCommandError::InvalidViewport).map_pass_err(scope); - } - let r = hal::pso::Rect { - x: i16::try_from(rect.x.round() as i64).unwrap_or(0), - y: i16::try_from(rect.y.round() as i64).unwrap_or(0), - w: i16::try_from(rect.w.round() as i64).unwrap_or(i16::MAX), - h: i16::try_from(rect.h.round() as i64).unwrap_or(i16::MAX), - }; - unsafe { - raw.set_viewports( - 0, - iter::once(hal::pso::Viewport { - rect: r, - depth: depth_min..depth_max, - }), - ); - } - } - RenderCommand::SetPushConstant { - stages, - offset, - size_bytes, - values_offset, - } => { - let scope = PassErrorScope::SetPushConstant; - let values_offset = values_offset - .ok_or(RenderPassErrorInner::InvalidValuesOffset) - .map_pass_err(scope)?; - - let end_offset_bytes = offset + size_bytes; - let values_end_offset = - (values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize; - let data_slice = - &base.push_constant_data[(values_offset as usize)..values_end_offset]; - - let pipeline_layout_id = state - .binder - .pipeline_layout_id - .ok_or(DrawError::MissingPipeline) - .map_pass_err(scope)?; - let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id]; - - pipeline_layout - .validate_push_constant_ranges(stages, offset, end_offset_bytes) - .map_err(RenderCommandError::from) - .map_pass_err(scope)?; - - unsafe { - raw.push_graphics_constants( - &pipeline_layout.raw, - conv::map_shader_stage_flags(stages), - offset, - data_slice, - ) - } - } - RenderCommand::SetScissor(ref rect) => { - let scope = PassErrorScope::SetScissorRect; - use std::{convert::TryFrom, i16}; - if rect.w == 0 - || rect.h == 0 - || rect.x + rect.w > attachment_width.unwrap() - || rect.y + rect.h > attachment_height.unwrap() - { - return Err(RenderCommandError::InvalidScissorRect).map_pass_err(scope); - } - let r = hal::pso::Rect { - x: i16::try_from(rect.x).unwrap_or(0), - y: i16::try_from(rect.y).unwrap_or(0), - w: i16::try_from(rect.w).unwrap_or(i16::MAX), - h: i16::try_from(rect.h).unwrap_or(i16::MAX), - }; - unsafe { - raw.set_scissors(0, iter::once(r)); - } - } - RenderCommand::Draw { - vertex_count, - instance_count, - first_vertex, - first_instance, - } => { - let scope = PassErrorScope::Draw; - state.is_ready().map_pass_err(scope)?; - let last_vertex = first_vertex + vertex_count; - let vertex_limit = state.vertex.vertex_limit; - if last_vertex > vertex_limit { - return Err(DrawError::VertexBeyondLimit { - last_vertex, - vertex_limit, - slot: state.vertex.vertex_limit_slot, - }) - .map_pass_err(scope); - } - let last_instance = first_instance + instance_count; - let instance_limit = state.vertex.instance_limit; - if last_instance > instance_limit { - return Err(DrawError::InstanceBeyondLimit { - last_instance, - instance_limit, - slot: state.vertex.instance_limit_slot, - }) - .map_pass_err(scope); - } - - unsafe { - raw.draw( - first_vertex..first_vertex + vertex_count, - first_instance..first_instance + instance_count, - ); - } - } - RenderCommand::DrawIndexed { - index_count, - instance_count, - first_index, - base_vertex, - first_instance, - } => { - let scope = PassErrorScope::DrawIndexed; - state.is_ready().map_pass_err(scope)?; - - //TODO: validate that base_vertex + max_index() is within the provided range - let last_index = first_index + index_count; - let index_limit = state.index.limit; - if last_index > index_limit { - return Err(DrawError::IndexBeyondLimit { - last_index, - index_limit, - }) - .map_pass_err(scope); - } - let last_instance = first_instance + instance_count; - let instance_limit = state.vertex.instance_limit; - if last_instance > instance_limit { - return Err(DrawError::InstanceBeyondLimit { - last_instance, - instance_limit, - slot: state.vertex.instance_limit_slot, - }) - .map_pass_err(scope); - } - - unsafe { - raw.draw_indexed( - first_index..first_index + index_count, - base_vertex, - first_instance..first_instance + instance_count, - ); - } - } - RenderCommand::MultiDrawIndirect { - buffer_id, - offset, - count, - indexed, - } => { - let scope = if indexed { - PassErrorScope::DrawIndexedIndirect - } else { - PassErrorScope::DrawIndirect - }; - state.is_ready().map_pass_err(scope)?; - - let stride = match indexed { - false => 16, - true => 20, - }; - - if count.is_some() { - check_device_features(device.features, wgt::Features::MULTI_DRAW_INDIRECT) - .map_pass_err(scope)?; - } - - let indirect_buffer = trackers - .buffers - .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT) - .map_err(|e| RenderCommandError::Buffer(buffer_id, e)) - .map_pass_err(scope)?; - check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT) - .map_pass_err(scope)?; - let &(ref indirect_raw, _) = indirect_buffer - .raw - .as_ref() - .ok_or(RenderCommandError::DestroyedBuffer(buffer_id)) - .map_pass_err(scope)?; - - let actual_count = count.map_or(1, |c| c.get()); - - let begin_offset = offset; - let end_offset = offset + stride * actual_count as u64; - if end_offset > indirect_buffer.size { - return Err(RenderPassErrorInner::IndirectBufferOverrun { - offset, - count, - begin_offset, - end_offset, - buffer_size: indirect_buffer.size, - }) - .map_pass_err(scope); - } - - match indexed { - false => unsafe { - raw.draw_indirect(indirect_raw, offset, actual_count, stride as u32); - }, - true => unsafe { - raw.draw_indexed_indirect( - indirect_raw, - offset, - actual_count, - stride as u32, - ); - }, - } - } - RenderCommand::MultiDrawIndirectCount { - buffer_id, - offset, - count_buffer_id, - count_buffer_offset, - max_count, - indexed, - } => { - let scope = if indexed { - PassErrorScope::DrawIndexedIndirect - } else { - PassErrorScope::DrawIndirect - }; - state.is_ready().map_pass_err(scope)?; - - let stride = match indexed { - false => 16, - true => 20, - }; - - check_device_features( - device.features, - wgt::Features::MULTI_DRAW_INDIRECT_COUNT, - ) - .map_pass_err(scope)?; - - let indirect_buffer = trackers - .buffers - .use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT) - .map_err(|e| RenderCommandError::Buffer(buffer_id, e)) - .map_pass_err(scope)?; - check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT) - .map_pass_err(scope)?; - let &(ref indirect_raw, _) = indirect_buffer - .raw - .as_ref() - .ok_or(RenderCommandError::DestroyedBuffer(buffer_id)) - .map_pass_err(scope)?; - - let count_buffer = trackers - .buffers - .use_extend(&*buffer_guard, count_buffer_id, (), BufferUse::INDIRECT) - .map_err(|e| RenderCommandError::Buffer(count_buffer_id, e)) - .map_pass_err(scope)?; - check_buffer_usage(count_buffer.usage, BufferUsage::INDIRECT) - .map_pass_err(scope)?; - let &(ref count_raw, _) = count_buffer - .raw - .as_ref() - .ok_or(RenderCommandError::DestroyedBuffer(count_buffer_id)) - .map_pass_err(scope)?; - - let begin_offset = offset; - let end_offset = offset + stride * max_count as u64; - if end_offset > indirect_buffer.size { - return Err(RenderPassErrorInner::IndirectBufferOverrun { - offset, - count: None, - begin_offset, - end_offset, - buffer_size: indirect_buffer.size, - }) - .map_pass_err(scope); - } - - let begin_count_offset = count_buffer_offset; - let end_count_offset = count_buffer_offset + 4; - if end_count_offset > count_buffer.size { - return Err(RenderPassErrorInner::IndirectCountBufferOverrun { - begin_count_offset, - end_count_offset, - count_buffer_size: count_buffer.size, - }) - .map_pass_err(scope); - } - - match indexed { - false => unsafe { - raw.draw_indirect_count( - indirect_raw, - offset, - count_raw, - count_buffer_offset, - max_count, - stride as u32, - ); - }, - true => unsafe { - raw.draw_indexed_indirect_count( - indirect_raw, - offset, - count_raw, - count_buffer_offset, - max_count, - stride as u32, - ); - }, - } - } - RenderCommand::PushDebugGroup { color, len } => { - state.debug_scope_depth += 1; - let label = str::from_utf8(&base.string_data[..len]).unwrap(); - unsafe { - raw.begin_debug_marker(label, color); - } - base.string_data = &base.string_data[len..]; - } - RenderCommand::PopDebugGroup => { - let scope = PassErrorScope::PopDebugGroup; - if state.debug_scope_depth == 0 { - return Err(RenderPassErrorInner::InvalidPopDebugGroup).map_pass_err(scope); - } - state.debug_scope_depth -= 1; - unsafe { - raw.end_debug_marker(); - } - } - RenderCommand::InsertDebugMarker { color, len } => { - let label = str::from_utf8(&base.string_data[..len]).unwrap(); - unsafe { - raw.insert_debug_marker(label, color); - } - base.string_data = &base.string_data[len..]; - } - RenderCommand::ExecuteBundle(bundle_id) => { - let scope = PassErrorScope::ExecuteBundle; - let bundle = trackers - .bundles - .use_extend(&*bundle_guard, bundle_id, (), ()) - .unwrap(); - - context - .check_compatible(&bundle.context) - .map_err(RenderPassErrorInner::IncompatibleRenderBundle) - .map_pass_err(scope)?; - - unsafe { - bundle.execute( - &mut raw, - &*pipeline_layout_guard, - &*bind_group_guard, - &*pipeline_guard, - &*buffer_guard, - ) - } - .map_err(|e| match e { - ExecutionError::DestroyedBuffer(id) => { - RenderCommandError::DestroyedBuffer(id) - } - }) - .map_pass_err(scope)?; - - trackers.merge_extend(&bundle.used).map_pass_err(scope)?; - state.reset_bundle(); - } - } - } - - tracing::trace!("Merging {:?} with the render pass", encoder_id); - unsafe { - raw.end_render_pass(); - } - - for ra in render_attachments { - let texture = &texture_guard[ra.texture_id.value]; - check_texture_usage(texture.usage, TextureUsage::RENDER_ATTACHMENT) - .map_pass_err(scope)?; - - // the tracker set of the pass is always in "extend" mode - trackers - .textures - .change_extend( - ra.texture_id.value, - &ra.texture_id.ref_count, - ra.selector.clone(), - ra.new_use, - ) - .unwrap(); - - if let Some(usage) = ra.previous_use { - // Make the attachment tracks to be aware of the internal - // transition done by the render pass, by registering the - // previous usage as the initial state. - trackers - .textures - .prepend( - ra.texture_id.value, - &ra.texture_id.ref_count, - ra.selector.clone(), - usage, - ) - .unwrap(); - } - } - super::CommandBuffer::insert_barriers( cmd_buf.raw.last_mut().unwrap(), &mut cmd_buf.trackers, @@ -1775,7 +1846,7 @@ impl Global { unsafe { cmd_buf.raw.last_mut().unwrap().finish(); } - cmd_buf.raw.push(raw); + cmd_buf.raw.push(cmd_buf_raw); Ok(()) } diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index 18dd07d3c6..7232c302d2 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -317,7 +317,7 @@ impl Global { let mut token = Token::root(); let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; let (buffer_guard, _) = hub.buffers.read(&mut token); // we can't hold both src_pending and dst_pending in scope because they // borrow the buffer tracker mutably... @@ -428,7 +428,7 @@ impl Global { let hub = B::hub(self); let mut token = Token::root(); let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; let (buffer_guard, mut token) = hub.buffers.read(&mut token); let (texture_guard, _) = hub.textures.read(&mut token); let (dst_layers, dst_selector, dst_offset) = @@ -568,7 +568,7 @@ impl Global { let hub = B::hub(self); let mut token = Token::root(); let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; let (buffer_guard, mut token) = hub.buffers.read(&mut token); let (texture_guard, _) = hub.textures.read(&mut token); let (src_layers, src_selector, src_offset) = @@ -710,7 +710,7 @@ impl Global { let mut token = Token::root(); let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; let (_, mut token) = hub.buffers.read(&mut token); // skip token let (texture_guard, _) = hub.textures.read(&mut token); // we can't hold both src_pending and dst_pending in scope because they