286: RenderBundle support r=cwfitzgerald a=kvark

Implements the API of https://github.com/gpuweb/gpuweb/pull/301

The general concept here is having re-usable command streams, which seems much desired given that our command buffers are not reusable.

Currently, only "software" render bundles are supported. That means, they are just smaller chunks of render pass commands, not backed by any driver object.

TODO:
- [x] https://github.com/gfx-rs/wgpu-rs/pull/357
- [x] https://github.com/gfx-rs/wgpu-native/pull/37
- [x] figure out the lifetime solution

Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
This commit is contained in:
bors[bot]
2020-06-11 19:46:31 +00:00
committed by GitHub
15 changed files with 1401 additions and 244 deletions

View File

@@ -143,19 +143,8 @@ impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
commands,
dynamic_offsets,
} => unsafe {
let mut offsets = &dynamic_offsets[..];
let mut pass = wgc::command::RawPass::new_compute(encoder);
for com in commands {
pass.encode(&com);
if let wgc::command::ComputeCommand::SetBindGroup {
num_dynamic_offsets,
..
} = com
{
pass.encode_slice(&offsets[..num_dynamic_offsets as usize]);
offsets = &offsets[num_dynamic_offsets as usize..];
}
}
pass.fill_compute_commands(&commands, &dynamic_offsets);
let (data, _) = pass.finish_compute();
self.command_encoder_run_compute_pass::<B>(encoder, &data);
},
@@ -165,7 +154,6 @@ impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
commands,
dynamic_offsets,
} => unsafe {
let mut offsets = &dynamic_offsets[..];
let mut pass = wgc::command::RawPass::new_render(
encoder,
&wgc::command::RenderPassDescriptor {
@@ -174,17 +162,7 @@ impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
depth_stencil_attachment: target_depth_stencil.as_ref(),
},
);
for com in commands {
pass.encode(&com);
if let wgc::command::RenderCommand::SetBindGroup {
num_dynamic_offsets,
..
} = com
{
pass.encode_slice(&offsets[..num_dynamic_offsets as usize]);
offsets = &offsets[num_dynamic_offsets as usize..];
}
}
pass.fill_render_commands(&commands, &dynamic_offsets);
let (data, _) = pass.finish_render();
self.command_encoder_run_render_pass::<B>(encoder, &data);
},
@@ -408,6 +386,34 @@ impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
A::DestroyRenderPipeline(id) => {
self.render_pipeline_destroy::<B>(id);
}
A::CreateRenderBundle {
id,
desc,
commands,
dynamic_offsets,
} => {
let label = Label::new(&desc.label);
let mut bundle_encoder = wgc::command::RenderBundleEncoder::new(
&wgt::RenderBundleEncoderDescriptor {
label: None,
color_formats: &desc.color_formats,
depth_stencil_format: desc.depth_stencil_format,
sample_count: desc.sample_count,
},
device,
);
bundle_encoder.fill_commands(&commands, &dynamic_offsets);
self.render_bundle_encoder_finish::<B>(
bundle_encoder,
&wgt::RenderBundleDescriptor {
label: label.as_ptr(),
},
id,
);
}
A::DestroyRenderBundle(id) => {
self.render_bundle_destroy::<B>(id);
}
A::WriteBuffer {
id,
data,

View File

@@ -6,14 +6,13 @@ use crate::{
binding_model::BindGroup,
hub::GfxBackend,
id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId},
Stored,
Stored, MAX_BIND_GROUPS,
};
use smallvec::{smallvec, SmallVec};
use arrayvec::ArrayVec;
use std::slice;
use wgt::DynamicOffset;
pub const DEFAULT_BIND_GROUPS: usize = 4;
type BindGroupMask = u8;
#[derive(Clone, Debug)]
@@ -134,17 +133,24 @@ impl BindGroupEntry {
#[derive(Debug)]
pub struct Binder {
pub(crate) pipeline_layout_id: Option<PipelineLayoutId>, //TODO: strongly `Stored`
pub(crate) entries: SmallVec<[BindGroupEntry; DEFAULT_BIND_GROUPS]>,
pub(crate) entries: ArrayVec<[BindGroupEntry; MAX_BIND_GROUPS]>,
}
impl Binder {
pub(crate) fn new(max_bind_groups: u32) -> Self {
Self {
pipeline_layout_id: None,
entries: smallvec![Default::default(); max_bind_groups as usize],
entries: (0..max_bind_groups)
.map(|_| BindGroupEntry::default())
.collect(),
}
}
pub(crate) fn reset(&mut self) {
self.pipeline_layout_id = None;
self.entries.clear();
}
pub(crate) fn reset_expectations(&mut self, length: usize) {
for entry in self.entries[length..].iter_mut() {
entry.expected_layout_id = None;

View File

@@ -0,0 +1,933 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! Render Bundles
## Software implementation
The path from nothing to using a render bundle consists of 3 phases.
### Initial command encoding
User creates a `RenderBundleEncoder` and populates it by issuing commands
from `bundle_ffi` module, just like with `RenderPass`, except that the
set of available commands is reduced. Everything is written into a `RawPass`.
### Bundle baking
Once the commands are encoded, user calls `render_bundle_encoder_finish`.
This is perhaps the most complex part of the logic. It consumes the
commands stored in `RawPass`, while validating everything, tracking the state,
and re-recording the commands into a separate `Vec<RenderCommand>`. It
doesn't actually execute any commands.
What's more important, is that the produced vector of commands is "normalized",
which means it can be executed verbatim without any state tracking. More
formally, "normalized" command stream guarantees that any state required by
a draw call is set explicitly by one of the commands between the draw call
and the last changing of the pipeline.
### Execution
When the bundle is used in an actual render pass, `RenderBundle::execute` is
called. It goes through the commands and issues them into the native command
buffer. Thanks to the "normalized" property, it doesn't track any bind group
invalidations or index format changes.
!*/
use crate::{
command::{PhantomSlice, RawPass, RenderCommand},
conv,
device::{AttachmentData, Label, RenderPassContext, MAX_VERTEX_BUFFERS},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Storage, Token},
id,
resource::BufferUse,
track::TrackerSet,
LifeGuard, RefCount, Stored, MAX_BIND_GROUPS,
};
use arrayvec::ArrayVec;
use peek_poke::{Peek, Poke};
use std::{borrow::Borrow, iter, marker::PhantomData, ops::Range};
#[derive(Debug)]
pub struct RenderBundleEncoder {
raw: RawPass<id::DeviceId>,
pub(crate) context: RenderPassContext,
}
impl RenderBundleEncoder {
pub fn new(desc: &wgt::RenderBundleEncoderDescriptor, device_id: id::DeviceId) -> Self {
RenderBundleEncoder {
raw: RawPass::new::<RenderCommand>(device_id),
context: RenderPassContext {
attachments: AttachmentData {
colors: desc.color_formats.iter().cloned().collect(),
resolves: ArrayVec::new(),
depth_stencil: desc.depth_stencil_format,
},
sample_count: {
let sc = desc.sample_count;
assert!(sc != 0 && sc <= 32 && conv::is_power_of_two(sc));
sc as u8
},
},
}
}
pub fn parent(&self) -> id::DeviceId {
self.raw.parent
}
pub fn fill_commands(&mut self, commands: &[RenderCommand], offsets: &[wgt::DynamicOffset]) {
unsafe { self.raw.fill_render_commands(commands, offsets) }
}
pub fn destroy(mut self) {
unsafe { self.raw.invalidate() };
}
}
//Note: here, `RenderBundle` is just wrapping a raw stream of render commands.
// The plan is to back it by an actual Vulkan secondary buffer, D3D12 Bundle,
// or Metal indirect command buffer.
#[derive(Debug)]
pub struct RenderBundle {
// Normalized command stream. It can be executed verbatim,
// without re-binding anything on the pipeline change.
commands: Vec<RenderCommand>,
dynamic_offsets: Vec<wgt::DynamicOffset>,
pub(crate) device_id: Stored<id::DeviceId>,
pub(crate) used: TrackerSet,
pub(crate) context: RenderPassContext,
pub(crate) life_guard: LifeGuard,
}
unsafe impl Send for RenderBundle {}
unsafe impl Sync for RenderBundle {}
impl RenderBundle {
/// Actually encode the contents into a native command buffer.
///
/// This is partially duplicating the logic of `command_encoder_run_render_pass`.
/// However the point of this function is to be lighter, since we already had
/// a chance to go through the commands in `render_bundle_encoder_finish`.
pub(crate) unsafe fn execute<B: GfxBackend>(
&self,
comb: &mut B::CommandBuffer,
pipeline_layout_guard: &Storage<
crate::binding_model::PipelineLayout<B>,
id::PipelineLayoutId,
>,
bind_group_guard: &Storage<crate::binding_model::BindGroup<B>, id::BindGroupId>,
pipeline_guard: &Storage<crate::pipeline::RenderPipeline<B>, id::RenderPipelineId>,
buffer_guard: &Storage<crate::resource::Buffer<B>, id::BufferId>,
) {
use hal::command::CommandBuffer as _;
let mut offsets = self.dynamic_offsets.as_slice();
let mut index_type = hal::IndexType::U16;
let mut pipeline_layout_id = None::<id::PipelineLayoutId>;
for command in self.commands.iter() {
match *command {
RenderCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group_id,
phantom_offsets: _,
} => {
let bind_group = &bind_group_guard[bind_group_id];
comb.bind_graphics_descriptor_sets(
&pipeline_layout_guard[pipeline_layout_id.unwrap()].raw,
index as usize,
iter::once(bind_group.raw.raw()),
&offsets[..num_dynamic_offsets as usize],
);
offsets = &offsets[num_dynamic_offsets as usize..];
}
RenderCommand::SetPipeline(pipeline_id) => {
let pipeline = &pipeline_guard[pipeline_id];
comb.bind_graphics_pipeline(&pipeline.raw);
index_type = conv::map_index_format(pipeline.index_format);
pipeline_layout_id = Some(pipeline.layout_id.value);
}
RenderCommand::SetIndexBuffer {
buffer_id,
offset,
size,
} => {
let buffer = &buffer_guard[buffer_id];
let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw,
range: hal::buffer::SubRange {
offset,
size: if size != wgt::BufferSize::WHOLE {
Some(size.0)
} else {
None
},
},
index_type,
};
comb.bind_index_buffer(view);
}
RenderCommand::SetVertexBuffer {
slot,
buffer_id,
offset,
size,
} => {
let buffer = &buffer_guard[buffer_id];
let range = hal::buffer::SubRange {
offset,
size: if size != wgt::BufferSize::WHOLE {
Some(size.0)
} else {
None
},
};
comb.bind_vertex_buffers(slot, iter::once((&buffer.raw, range)));
}
RenderCommand::Draw {
vertex_count,
instance_count,
first_vertex,
first_instance,
} => {
comb.draw(
first_vertex..first_vertex + vertex_count,
first_instance..first_instance + instance_count,
);
}
RenderCommand::DrawIndexed {
index_count,
instance_count,
first_index,
base_vertex,
first_instance,
} => {
comb.draw_indexed(
first_index..first_index + index_count,
base_vertex,
first_instance..first_instance + instance_count,
);
}
RenderCommand::DrawIndirect { buffer_id, offset } => {
let buffer = &buffer_guard[buffer_id];
comb.draw_indirect(&buffer.raw, offset, 1, 0);
}
RenderCommand::DrawIndexedIndirect { buffer_id, offset } => {
let buffer = &buffer_guard[buffer_id];
comb.draw_indexed_indirect(&buffer.raw, offset, 1, 0);
}
RenderCommand::ExecuteBundle(_)
| RenderCommand::SetBlendColor(_)
| RenderCommand::SetStencilReference(_)
| RenderCommand::SetViewport { .. }
| RenderCommand::SetScissor(_)
| RenderCommand::End => unreachable!(),
}
}
}
}
impl Borrow<RefCount> for RenderBundle {
fn borrow(&self) -> &RefCount {
self.life_guard.ref_count.as_ref().unwrap()
}
}
#[derive(Debug)]
struct IndexState {
buffer: Option<id::BufferId>,
format: wgt::IndexFormat,
range: Range<wgt::BufferAddress>,
is_dirty: bool,
}
impl IndexState {
fn new() -> Self {
IndexState {
buffer: None,
format: wgt::IndexFormat::default(),
range: 0..0,
is_dirty: false,
}
}
fn limit(&self) -> u32 {
assert!(self.buffer.is_some());
let bytes_per_index = match self.format {
wgt::IndexFormat::Uint16 => 2,
wgt::IndexFormat::Uint32 => 4,
};
((self.range.end - self.range.start) / bytes_per_index) as u32
}
fn flush(&mut self) -> Option<RenderCommand> {
if self.is_dirty {
self.is_dirty = false;
Some(RenderCommand::SetIndexBuffer {
buffer_id: self.buffer.unwrap(),
offset: self.range.start,
size: wgt::BufferSize(self.range.end - self.range.start),
})
} else {
None
}
}
fn set_format(&mut self, format: wgt::IndexFormat) {
if self.format != format {
self.format = format;
self.is_dirty = true;
}
}
fn set_buffer(&mut self, id: id::BufferId, range: Range<wgt::BufferAddress>) {
self.buffer = Some(id);
self.range = range;
self.is_dirty = true;
}
}
#[derive(Debug)]
struct VertexState {
buffer: Option<id::BufferId>,
range: Range<wgt::BufferAddress>,
stride: wgt::BufferAddress,
rate: wgt::InputStepMode,
is_dirty: bool,
}
impl VertexState {
fn new() -> Self {
VertexState {
buffer: None,
range: 0..0,
stride: 0,
rate: wgt::InputStepMode::Vertex,
is_dirty: false,
}
}
fn set_buffer(&mut self, buffer_id: id::BufferId, range: Range<wgt::BufferAddress>) {
self.buffer = Some(buffer_id);
self.range = range;
self.is_dirty = true;
}
fn flush(&mut self, slot: u32) -> Option<RenderCommand> {
if self.is_dirty {
self.is_dirty = false;
Some(RenderCommand::SetVertexBuffer {
slot,
buffer_id: self.buffer.unwrap(),
offset: self.range.start,
size: wgt::BufferSize(self.range.end - self.range.start),
})
} else {
None
}
}
}
#[derive(Debug)]
struct BindState {
bind_group: Option<(id::BindGroupId, id::BindGroupLayoutId)>,
dynamic_offsets: Range<usize>,
is_dirty: bool,
}
impl BindState {
fn new() -> Self {
BindState {
bind_group: None,
dynamic_offsets: 0..0,
is_dirty: false,
}
}
fn set_group(
&mut self,
bind_group_id: id::BindGroupId,
layout_id: id::BindGroupLayoutId,
dyn_offset: usize,
dyn_count: usize,
) -> bool {
match self.bind_group {
Some((bg_id, _)) if bg_id == bind_group_id && dyn_count == 0 => false,
_ => {
self.bind_group = Some((bind_group_id, layout_id));
self.dynamic_offsets = dyn_offset..dyn_offset + dyn_count;
self.is_dirty = true;
true
}
}
}
}
#[derive(Debug)]
struct State {
trackers: TrackerSet,
index: IndexState,
vertex: ArrayVec<[VertexState; MAX_VERTEX_BUFFERS]>,
bind: ArrayVec<[BindState; MAX_BIND_GROUPS]>,
raw_dynamic_offsets: Vec<wgt::DynamicOffset>,
flat_dynamic_offsets: Vec<wgt::DynamicOffset>,
used_bind_groups: usize,
}
impl State {
fn vertex_limits(&self) -> (u32, u32) {
let mut vertex_limit = !0;
let mut instance_limit = !0;
for vbs in &self.vertex {
if vbs.stride == 0 {
continue;
}
let limit = ((vbs.range.end - vbs.range.start) / vbs.stride) as u32;
match vbs.rate {
wgt::InputStepMode::Vertex => vertex_limit = vertex_limit.min(limit),
wgt::InputStepMode::Instance => instance_limit = instance_limit.min(limit),
}
}
(vertex_limit, instance_limit)
}
fn invalidate_group_from(&mut self, slot: usize) {
for bind in self.bind[slot..].iter_mut() {
if bind.bind_group.is_some() {
bind.is_dirty = true;
}
}
}
fn set_bind_group(
&mut self,
slot: u8,
bind_group_id: id::BindGroupId,
layout_id: id::BindGroupLayoutId,
offsets: &[wgt::DynamicOffset],
) {
if self.bind[slot as usize].set_group(
bind_group_id,
layout_id,
self.raw_dynamic_offsets.len(),
offsets.len(),
) {
self.invalidate_group_from(slot as usize + 1);
}
self.raw_dynamic_offsets.extend(offsets);
}
fn set_pipeline(
&mut self,
index_format: wgt::IndexFormat,
vertex_strides: &[(wgt::BufferAddress, wgt::InputStepMode)],
layout_ids: &[Stored<id::BindGroupLayoutId>],
) {
self.index.set_format(index_format);
for (vs, &(stride, step_mode)) in self.vertex.iter_mut().zip(vertex_strides) {
if vs.stride != stride || vs.rate != step_mode {
vs.stride = stride;
vs.rate = step_mode;
vs.is_dirty = true;
}
}
self.used_bind_groups = layout_ids.len();
let invalid_from = self
.bind
.iter()
.zip(layout_ids)
.position(|(bs, layout_id)| match bs.bind_group {
Some((_, bgl_id)) => bgl_id != layout_id.value,
None => false,
});
if let Some(slot) = invalid_from {
self.invalidate_group_from(slot);
}
}
fn flush_vertices(&mut self) -> impl Iterator<Item = RenderCommand> + '_ {
self.vertex
.iter_mut()
.enumerate()
.flat_map(|(i, vs)| vs.flush(i as u32))
}
fn flush_binds(&mut self) -> impl Iterator<Item = RenderCommand> + '_ {
for bs in self.bind[..self.used_bind_groups].iter() {
if bs.is_dirty {
self.flat_dynamic_offsets
.extend_from_slice(&self.raw_dynamic_offsets[bs.dynamic_offsets.clone()]);
}
}
self.bind
.iter_mut()
.take(self.used_bind_groups)
.enumerate()
.flat_map(|(i, bs)| {
if bs.is_dirty {
bs.is_dirty = false;
Some(RenderCommand::SetBindGroup {
index: i as u8,
bind_group_id: bs.bind_group.unwrap().0,
num_dynamic_offsets: (bs.dynamic_offsets.end - bs.dynamic_offsets.start)
as u8,
phantom_offsets: PhantomSlice::default(),
})
} else {
None
}
})
}
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn render_bundle_encoder_finish<B: GfxBackend>(
&self,
bundle_encoder: RenderBundleEncoder,
desc: &wgt::RenderBundleDescriptor<Label>,
id_in: Input<G, id::RenderBundleId>,
) -> id::RenderBundleId {
let hub = B::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (data, device_id) = unsafe { bundle_encoder.raw.finish_render() };
let device = &device_guard[device_id];
let render_bundle = {
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let mut state = State {
trackers: TrackerSet::new(device_id.backend()),
index: IndexState::new(),
vertex: (0..MAX_VERTEX_BUFFERS)
.map(|_| VertexState::new())
.collect(),
bind: (0..MAX_BIND_GROUPS).map(|_| BindState::new()).collect(),
raw_dynamic_offsets: Vec::new(),
flat_dynamic_offsets: Vec::new(),
used_bind_groups: 0,
};
let mut commands = Vec::new();
let mut peeker = data.as_ptr();
let raw_data_end = unsafe { data.as_ptr().offset(data.len() as isize) };
let mut command = RenderCommand::Draw {
vertex_count: 0,
instance_count: 0,
first_vertex: 0,
first_instance: 0,
};
loop {
assert!(
unsafe { peeker.add(RenderCommand::max_size()) <= raw_data_end },
"RenderCommand (size {}) is too big to fit within raw_data",
RenderCommand::max_size(),
);
peeker = unsafe { RenderCommand::peek_from(peeker, &mut command) };
match command {
RenderCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group_id,
phantom_offsets,
} => {
let (new_peeker, offsets) = unsafe {
phantom_offsets.decode_unaligned(
peeker,
num_dynamic_offsets as usize,
raw_data_end,
)
};
peeker = new_peeker;
for off in offsets {
assert_eq!(
*off as wgt::BufferAddress % wgt::BIND_BUFFER_ALIGNMENT,
0,
"Misaligned dynamic buffer offset: {} does not align with {}",
off,
wgt::BIND_BUFFER_ALIGNMENT
);
}
let bind_group = state
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.unwrap();
assert_eq!(bind_group.dynamic_count, offsets.len());
state.set_bind_group(index, bind_group_id, bind_group.layout_id, offsets);
state.trackers.merge_extend(&bind_group.used);
}
RenderCommand::SetPipeline(pipeline_id) => {
let pipeline = state
.trackers
.render_pipes
.use_extend(&*pipeline_guard, pipeline_id, (), ())
.unwrap();
assert!(
bundle_encoder.context.compatible(&pipeline.pass_context),
"The render pipeline output formats and sample counts do not match render pass attachment formats!"
);
//TODO: check read-only depth
let layout = &pipeline_layout_guard[pipeline.layout_id.value];
state.set_pipeline(
pipeline.index_format,
&pipeline.vertex_strides,
&layout.bind_group_layout_ids,
);
commands.push(command);
}
RenderCommand::SetIndexBuffer {
buffer_id,
offset,
size,
} => {
let buffer = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX)
.unwrap();
assert!(buffer.usage.contains(wgt::BufferUsage::INDEX), "An invalid setIndexBuffer call has been made. The buffer usage is {:?} which does not contain required usage INDEX", buffer.usage);
let end = if size != wgt::BufferSize::WHOLE {
offset + size.0
} else {
buffer.size
};
state.index.set_buffer(buffer_id, offset..end);
}
RenderCommand::SetVertexBuffer {
slot,
buffer_id,
offset,
size,
} => {
let buffer = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::VERTEX)
.unwrap();
assert!(
buffer.usage.contains(wgt::BufferUsage::VERTEX),
"An invalid setVertexBuffer call has been made. The buffer usage is {:?} which does not contain required usage VERTEX",
buffer.usage
);
let end = if size != wgt::BufferSize::WHOLE {
offset + size.0
} else {
buffer.size
};
state.vertex[slot as usize].set_buffer(buffer_id, offset..end);
}
RenderCommand::Draw {
vertex_count,
instance_count,
first_vertex,
first_instance,
} => {
let (vertex_limit, instance_limit) = state.vertex_limits();
assert!(
first_vertex + vertex_count <= vertex_limit,
"Vertex {} extends beyond limit {}",
first_vertex + vertex_count,
vertex_limit
);
assert!(
first_instance + instance_count <= instance_limit,
"Instance {} extends beyond limit {}",
first_instance + instance_count,
instance_limit
);
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds());
commands.push(command);
}
RenderCommand::DrawIndexed {
index_count,
instance_count,
first_index,
base_vertex: _,
first_instance,
} => {
//TODO: validate that base_vertex + max_index() is within the provided range
let (_, instance_limit) = state.vertex_limits();
let index_limit = state.index.limit();
assert!(
first_index + index_count <= index_limit,
"Index {} extends beyond limit {}",
first_index + index_count,
index_limit
);
assert!(
first_instance + instance_count <= instance_limit,
"Instance {} extends beyond limit {}",
first_instance + instance_count,
instance_limit
);
commands.extend(state.index.flush());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds());
commands.push(command);
}
RenderCommand::DrawIndirect {
buffer_id,
offset: _,
} => {
let buffer = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
.unwrap();
assert!(
buffer.usage.contains(wgt::BufferUsage::INDIRECT),
"An invalid drawIndirect call has been made. The buffer usage is {:?} which does not contain required usage INDIRECT",
buffer.usage
);
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds());
commands.push(command);
}
RenderCommand::DrawIndexedIndirect {
buffer_id,
offset: _,
} => {
let buffer = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
.unwrap();
assert!(
buffer.usage.contains(wgt::BufferUsage::INDIRECT),
"An invalid drawIndexedIndirect call has been made. The buffer usage is {:?} which does not contain required usage INDIRECT",
buffer.usage
);
commands.extend(state.index.flush());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds());
commands.push(command);
}
RenderCommand::End => break,
RenderCommand::ExecuteBundle(_)
| RenderCommand::SetBlendColor(_)
| RenderCommand::SetStencilReference(_)
| RenderCommand::SetViewport { .. }
| RenderCommand::SetScissor(_) => {
unreachable!("not supported by a render bundle")
}
}
}
log::debug!("Render bundle {:?} = {:#?}", id_in, state.trackers);
let _ = desc.label; //TODO: actually use
//TODO: check if the device is still alive
RenderBundle {
commands,
dynamic_offsets: state.flat_dynamic_offsets,
device_id: Stored {
value: device_id,
ref_count: device.life_guard.add_ref(),
},
used: state.trackers,
context: bundle_encoder.context,
life_guard: LifeGuard::new(),
}
};
let ref_count = render_bundle.life_guard.add_ref();
let id = hub
.render_bundles
.register_identity(id_in, render_bundle, &mut token);
#[cfg(feature = "trace")]
match device.trace {
Some(ref trace) => {
use crate::device::trace;
let (bundle_guard, _) = hub.render_bundles.read(&mut token);
let bundle = &bundle_guard[id];
trace.lock().add(trace::Action::CreateRenderBundle {
id,
desc: trace::RenderBundleDescriptor::new(desc.label, &bundle.context),
commands: bundle.commands.clone(),
dynamic_offsets: bundle.dynamic_offsets.clone(),
});
}
None => {}
}
device
.trackers
.lock()
.bundles
.init(id, ref_count, PhantomData)
.unwrap();
id
}
}
pub mod bundle_ffi {
use super::{super::PhantomSlice, RenderBundleEncoder, RenderCommand};
use crate::{id, RawString};
use std::{convert::TryInto, slice};
use wgt::{BufferAddress, BufferSize, DynamicOffset};
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `offset_length` elements.
// TODO: There might be other safety issues, such as using the unsafe
// `RawPass::encode` and `RawPass::encode_slice`.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_set_bind_group(
bundle_encoder: &mut RenderBundleEncoder,
index: u32,
bind_group_id: id::BindGroupId,
offsets: *const DynamicOffset,
offset_length: usize,
) {
bundle_encoder.raw.encode(&RenderCommand::SetBindGroup {
index: index.try_into().unwrap(),
num_dynamic_offsets: offset_length.try_into().unwrap(),
bind_group_id,
phantom_offsets: PhantomSlice::default(),
});
bundle_encoder
.raw
.encode_slice(slice::from_raw_parts(offsets, offset_length));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_set_pipeline(
bundle_encoder: &mut RenderBundleEncoder,
pipeline_id: id::RenderPipelineId,
) {
bundle_encoder
.raw
.encode(&RenderCommand::SetPipeline(pipeline_id));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_set_index_buffer(
bundle_encoder: &mut RenderBundleEncoder,
buffer_id: id::BufferId,
offset: BufferAddress,
size: BufferSize,
) {
bundle_encoder.raw.encode(&RenderCommand::SetIndexBuffer {
buffer_id,
offset,
size,
});
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_set_vertex_buffer(
bundle_encoder: &mut RenderBundleEncoder,
slot: u32,
buffer_id: id::BufferId,
offset: BufferAddress,
size: BufferSize,
) {
bundle_encoder.raw.encode(&RenderCommand::SetVertexBuffer {
slot,
buffer_id,
offset,
size,
});
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_draw(
bundle_encoder: &mut RenderBundleEncoder,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) {
bundle_encoder.raw.encode(&RenderCommand::Draw {
vertex_count,
instance_count,
first_vertex,
first_instance,
});
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_draw_indexed(
bundle_encoder: &mut RenderBundleEncoder,
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
) {
bundle_encoder.raw.encode(&RenderCommand::DrawIndexed {
index_count,
instance_count,
first_index,
base_vertex,
first_instance,
});
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_draw_indirect(
bundle_encoder: &mut RenderBundleEncoder,
buffer_id: id::BufferId,
offset: BufferAddress,
) {
bundle_encoder
.raw
.encode(&RenderCommand::DrawIndirect { buffer_id, offset });
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_bundle_indexed_indirect(
bundle_encoder: &mut RenderBundleEncoder,
buffer_id: id::BufferId,
offset: BufferAddress,
) {
bundle_encoder
.raw
.encode(&RenderCommand::DrawIndexedIndirect { buffer_id, offset });
}
#[no_mangle]
pub extern "C" fn wgpu_render_bundle_push_debug_group(
_bundle_encoder: &mut RenderBundleEncoder,
_label: RawString,
) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_render_bundle_pop_debug_group(
_bundle_encoder: &mut RenderBundleEncoder,
) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_render_bundle_insert_debug_marker(
_bundle_encoder: &mut RenderBundleEncoder,
_label: RawString,
) {
//TODO
}
}

View File

@@ -51,9 +51,27 @@ impl Default for ComputeCommand {
}
}
impl super::RawPass {
impl super::RawPass<id::CommandEncoderId> {
pub unsafe fn new_compute(parent: id::CommandEncoderId) -> Self {
Self::from_vec(Vec::<ComputeCommand>::with_capacity(1), parent)
Self::new::<ComputeCommand>(parent)
}
pub unsafe fn fill_compute_commands(
&mut self,
commands: &[ComputeCommand],
mut offsets: &[DynamicOffset],
) {
for com in commands {
self.encode(com);
if let ComputeCommand::SetBindGroup {
num_dynamic_offsets,
..
} = *com
{
self.encode_slice(&offsets[..num_dynamic_offsets as usize]);
offsets = &offsets[num_dynamic_offsets as usize..];
}
}
}
pub unsafe fn finish_compute(mut self) -> (Vec<u8>, id::CommandEncoderId) {
@@ -84,6 +102,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let raw = cmb.raw.last_mut().unwrap();
let mut binder = Binder::new(cmb.limits.max_bind_groups);
let (_, mut token) = hub.render_bundles.read(&mut token);
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (pipeline_guard, mut token) = hub.compute_pipelines.read(&mut token);
@@ -291,14 +310,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
pub mod compute_ffi {
use super::{
super::{PhantomSlice, RawPass},
ComputeCommand,
};
use super::{super::PhantomSlice, ComputeCommand};
use crate::{id, RawString};
use std::{convert::TryInto, slice};
use wgt::{BufferAddress, DynamicOffset};
type RawPass = super::super::RawPass<id::CommandEncoderId>;
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is

View File

@@ -4,11 +4,13 @@
mod allocator;
mod bind;
mod bundle;
mod compute;
mod render;
mod transfer;
pub(crate) use self::allocator::CommandAllocator;
pub use self::bundle::*;
pub use self::compute::*;
pub use self::render::*;
pub use self::transfer::*;
@@ -57,23 +59,26 @@ impl<T> PhantomSlice<T> {
}
#[repr(C)]
pub struct RawPass {
#[derive(Debug)]
pub struct RawPass<P> {
data: *mut u8,
base: *mut u8,
capacity: usize,
parent: id::CommandEncoderId,
parent: P,
}
impl RawPass {
fn from_vec<T>(mut vec: Vec<T>, encoder_id: id::CommandEncoderId) -> Self {
impl<P: Copy> RawPass<P> {
fn new<T>(parent: P) -> Self {
let mut vec = Vec::<T>::with_capacity(1);
let ptr = vec.as_mut_ptr() as *mut u8;
let capacity = vec.capacity() * mem::size_of::<T>();
let capacity = mem::size_of::<T>();
assert_ne!(capacity, 0);
mem::forget(vec);
RawPass {
data: ptr,
base: ptr,
capacity,
parent: encoder_id,
parent,
}
}
@@ -94,15 +99,15 @@ impl RawPass {
}
/// Recover the data vector of the pass, consuming `self`.
unsafe fn into_vec(mut self) -> (Vec<u8>, id::CommandEncoderId) {
(self.invalidate(), self.parent)
unsafe fn into_vec(mut self) -> (Vec<u8>, P) {
self.invalidate()
}
/// Make pass contents invalid, return the contained data.
///
/// Any following access to the pass will result in a crash
/// for accessing address 0.
pub unsafe fn invalidate(&mut self) -> Vec<u8> {
pub unsafe fn invalidate(&mut self) -> (Vec<u8>, P) {
let size = self.size();
assert!(
size <= self.capacity,
@@ -114,7 +119,7 @@ impl RawPass {
self.data = ptr::null_mut();
self.base = ptr::null_mut();
self.capacity = 0;
vec
(vec, self.parent)
}
unsafe fn ensure_extra_size(&mut self, extra_size: usize) {
@@ -131,13 +136,13 @@ impl RawPass {
}
#[inline]
pub unsafe fn encode<C: peek_poke::Poke>(&mut self, command: &C) {
pub(crate) unsafe fn encode<C: peek_poke::Poke>(&mut self, command: &C) {
self.ensure_extra_size(C::max_size());
self.data = command.poke_into(self.data);
}
#[inline]
pub unsafe fn encode_slice<T: Copy>(&mut self, data: &[T]) {
pub(crate) unsafe fn encode_slice<T: Copy>(&mut self, data: &[T]) {
let align_offset = self.data.align_offset(mem::align_of::<T>());
let extra = align_offset + mem::size_of::<T>() * data.len();
self.ensure_extra_size(extra);
@@ -147,10 +152,6 @@ impl RawPass {
}
}
pub struct RenderBundle<B: hal::Backend> {
_raw: B::CommandBuffer,
}
#[derive(Debug)]
pub struct CommandBuffer<B: hal::Backend> {
pub(crate) raw: Vec<B::CommandBuffer>,
@@ -193,6 +194,7 @@ impl<B: GfxBackend> CommandBuffer<B> {
.merge_extend(&head.compute_pipes)
.unwrap();
base.render_pipes.merge_extend(&head.render_pipes).unwrap();
base.bundles.merge_extend(&head.bundles).unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {

View File

@@ -5,12 +5,13 @@
use crate::{
command::{
bind::{Binder, LayoutChange},
PassComponent, PhantomSlice, RawRenderPassColorAttachmentDescriptor,
PassComponent, PhantomSlice, RawPass, RawRenderPassColorAttachmentDescriptor,
RawRenderPassDepthStencilAttachmentDescriptor, RawRenderTargets,
},
conv,
device::{
FramebufferKey, RenderPassContext, RenderPassKey, MAX_COLOR_TARGETS, MAX_VERTEX_BUFFERS,
AttachmentData, FramebufferKey, RenderPassContext, RenderPassKey, MAX_COLOR_TARGETS,
MAX_VERTEX_BUFFERS,
},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
id,
@@ -23,7 +24,6 @@ use crate::{
use arrayvec::ArrayVec;
use hal::command::CommandBuffer as _;
use peek_poke::{Peek, PeekPoke, Poke};
use smallvec::SmallVec;
use wgt::{
BufferAddress, BufferSize, BufferUsage, Color, DynamicOffset, IndexFormat, InputStepMode,
LoadOp, RenderPassColorAttachmentDescriptorBase,
@@ -116,6 +116,7 @@ pub enum RenderCommand {
buffer_id: id::BufferId,
offset: BufferAddress,
},
ExecuteBundle(id::RenderBundleId),
End,
}
@@ -126,9 +127,9 @@ impl Default for RenderCommand {
}
}
impl super::RawPass {
impl RawPass<id::CommandEncoderId> {
pub unsafe fn new_render(parent_id: id::CommandEncoderId, desc: &RenderPassDescriptor) -> Self {
let mut pass = Self::from_vec(Vec::<RenderCommand>::with_capacity(1), parent_id);
let mut pass = Self::new::<RenderCommand>(parent_id);
let mut targets: RawRenderTargets = mem::zeroed();
if let Some(ds) = desc.depth_stencil_attachment {
@@ -168,8 +169,28 @@ impl super::RawPass {
pass.encode(&targets);
pass
}
}
pub unsafe fn finish_render(mut self) -> (Vec<u8>, id::CommandEncoderId) {
impl<P: Copy> RawPass<P> {
pub unsafe fn fill_render_commands(
&mut self,
commands: &[RenderCommand],
mut offsets: &[DynamicOffset],
) {
for com in commands {
self.encode(com);
if let RenderCommand::SetBindGroup {
num_dynamic_offsets,
..
} = *com
{
self.encode_slice(&offsets[..num_dynamic_offsets as usize]);
offsets = &offsets[num_dynamic_offsets as usize..];
}
}
}
pub unsafe fn finish_render(mut self) -> (Vec<u8>, P) {
self.finish(RenderCommand::End);
self.into_vec()
}
@@ -213,8 +234,8 @@ impl fmt::Debug for DrawError {
}
}
#[derive(Debug)]
pub struct IndexState {
#[derive(Debug, Default)]
struct IndexState {
bound_buffer_view: Option<(id::BufferId, Range<BufferAddress>)>,
format: IndexFormat,
limit: u32,
@@ -233,10 +254,15 @@ impl IndexState {
None => 0,
}
}
fn reset(&mut self) {
self.bound_buffer_view = None;
self.limit = 0;
}
}
#[derive(Clone, Copy, Debug)]
pub struct VertexBufferState {
struct VertexBufferState {
total_size: BufferAddress,
stride: BufferAddress,
rate: InputStepMode,
@@ -250,9 +276,9 @@ impl VertexBufferState {
};
}
#[derive(Debug)]
pub struct VertexState {
inputs: SmallVec<[VertexBufferState; MAX_VERTEX_BUFFERS]>,
#[derive(Debug, Default)]
struct VertexState {
inputs: ArrayVec<[VertexBufferState; MAX_VERTEX_BUFFERS]>,
vertex_limit: u32,
instance_limit: u32,
}
@@ -272,6 +298,12 @@ impl VertexState {
}
}
}
fn reset(&mut self) {
self.inputs.clear();
self.vertex_limit = 0;
self.instance_limit = 0;
}
}
#[derive(Debug)]
@@ -305,6 +337,14 @@ impl State {
}
Ok(())
}
/// Reset the `RenderBundle`-related states.
fn reset_bundle(&mut self) {
self.binder.reset();
self.pipeline = OptionalState::Required;
self.index.reset();
self.vertex.reset();
}
}
// Common routines between render/compute
@@ -330,6 +370,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
raw.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
}
let (bundle_guard, mut token) = hub.render_bundles.read(&mut token);
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token);
@@ -386,7 +427,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// instead of the special read-only one, which would be `None`.
let mut is_ds_read_only = false;
let (context, sample_count) = {
let context = {
use hal::device::Device as _;
let samples_count_limit = device.hal_limits.framebuffer_color_sample_counts;
@@ -852,19 +893,22 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
);
}
let context = RenderPassContext {
colors: color_attachments
.iter()
.map(|at| view_guard[at.attachment].format)
.collect(),
resolves: color_attachments
.iter()
.filter_map(|at| at.resolve_target)
.map(|resolve| view_guard[resolve].format)
.collect(),
depth_stencil: depth_stencil_attachment.map(|at| view_guard[at.attachment].format),
};
(context, sample_count)
RenderPassContext {
attachments: AttachmentData {
colors: color_attachments
.iter()
.map(|at| view_guard[at.attachment].format)
.collect(),
resolves: color_attachments
.iter()
.filter_map(|at| at.resolve_target)
.map(|resolve| view_guard[resolve].format)
.collect(),
depth_stencil: depth_stencil_attachment
.map(|at| view_guard[at.attachment].format),
},
sample_count,
}
};
let mut state = State {
@@ -872,16 +916,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
blend_color: OptionalState::Unused,
stencil_reference: OptionalState::Unused,
pipeline: OptionalState::Required,
index: IndexState {
bound_buffer_view: None,
format: IndexFormat::Uint16,
limit: 0,
},
vertex: VertexState {
inputs: SmallVec::new(),
vertex_limit: 0,
instance_limit: 0,
},
index: IndexState::default(),
vertex: VertexState::default(),
};
let mut command = RenderCommand::Draw {
@@ -947,7 +983,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
);
unsafe {
raw.bind_graphics_descriptor_sets(
&&pipeline_layout_guard[pipeline_layout_id].raw,
&pipeline_layout_guard[pipeline_layout_id].raw,
index as usize,
bind_groups,
offsets
@@ -967,11 +1003,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
assert!(
context.compatible(&pipeline.pass_context),
"The render pipeline output formats do not match render pass attachment formats!"
);
assert_eq!(
pipeline.sample_count, sample_count,
"The render pipeline and renderpass have mismatching sample_count"
"The render pipeline output formats and sample count do not match render pass attachment formats!"
);
assert!(
!is_ds_read_only || pipeline.flags.contains(PipelineFlags::DEPTH_STENCIL_READ_ONLY),
@@ -1262,6 +1294,30 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
raw.draw_indexed_indirect(&buffer.raw, offset, 1, 0);
}
}
RenderCommand::ExecuteBundle(bundle_id) => {
let bundle = trackers
.bundles
.use_extend(&*bundle_guard, bundle_id, (), ())
.unwrap();
assert!(
context.compatible(&bundle.context),
"The render bundle output formats do not match render pass attachment formats!"
);
unsafe {
bundle.execute(
&mut raw,
&*pipeline_layout_guard,
&*bind_group_guard,
&*pipeline_guard,
&*buffer_guard,
)
};
trackers.merge_extend(&bundle.used);
state.reset_bundle();
}
RenderCommand::End => break,
}
}
@@ -1325,13 +1381,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub mod render_ffi {
use super::{
super::{PhantomSlice, RawPass, Rect},
super::{PhantomSlice, Rect},
RenderCommand,
};
use crate::{id, RawString};
use std::{convert::TryInto, slice};
use wgt::{BufferAddress, BufferSize, Color, DynamicOffset};
type RawPass = super::super::RawPass<id::CommandEncoderId>;
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is
@@ -1486,15 +1544,6 @@ pub mod render_ffi {
pass.encode(&RenderCommand::DrawIndexedIndirect { buffer_id, offset });
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_execute_bundles(
_pass: &mut RawPass,
_bundles: *const id::RenderBundleId,
_bundles_length: usize,
) {
unimplemented!()
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_push_debug_group(_pass: &mut RawPass, _label: RawString) {
//TODO
@@ -1510,6 +1559,17 @@ pub mod render_ffi {
//TODO
}
#[no_mangle]
pub unsafe fn wgpu_render_pass_execute_bundles(
pass: &mut RawPass,
render_bundle_ids: *const id::RenderBundleId,
render_bundle_ids_length: usize,
) {
for &bundle_id in slice::from_raw_parts(render_bundle_ids, render_bundle_ids_length) {
pass.encode(&RenderCommand::ExecuteBundle(bundle_id));
}
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_finish(
pass: &mut RawPass,

View File

@@ -5,7 +5,7 @@
#[cfg(feature = "trace")]
use crate::device::trace;
use crate::{
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Token},
id, resource,
track::TrackerSet,
FastHashMap, RefCount, Stored, SubmissionIndex,
@@ -33,10 +33,11 @@ pub struct SuspectedResources {
pub(crate) render_pipelines: Vec<id::RenderPipelineId>,
pub(crate) bind_group_layouts: Vec<Stored<id::BindGroupLayoutId>>,
pub(crate) pipeline_layouts: Vec<Stored<id::PipelineLayoutId>>,
pub(crate) render_bundles: Vec<id::RenderBundleId>,
}
impl SuspectedResources {
pub fn clear(&mut self) {
pub(crate) fn clear(&mut self) {
self.buffers.clear();
self.textures.clear();
self.texture_views.clear();
@@ -46,9 +47,10 @@ impl SuspectedResources {
self.render_pipelines.clear();
self.bind_group_layouts.clear();
self.pipeline_layouts.clear();
self.render_bundles.clear();
}
pub fn extend(&mut self, other: &Self) {
pub(crate) fn extend(&mut self, other: &Self) {
self.buffers.extend_from_slice(&other.buffers);
self.textures.extend_from_slice(&other.textures);
self.texture_views.extend_from_slice(&other.texture_views);
@@ -62,6 +64,18 @@ impl SuspectedResources {
.extend_from_slice(&other.bind_group_layouts);
self.pipeline_layouts
.extend_from_slice(&other.pipeline_layouts);
self.render_bundles.extend_from_slice(&other.render_bundles);
}
pub(crate) fn add_trackers(&mut self, trackers: &TrackerSet) {
self.buffers.extend(trackers.buffers.used());
self.textures.extend(trackers.textures.used());
self.texture_views.extend(trackers.views.used());
self.samplers.extend(trackers.samplers.used());
self.bind_groups.extend(trackers.bind_groups.used());
self.compute_pipelines.extend(trackers.compute_pipes.used());
self.render_pipelines.extend(trackers.render_pipes.used());
self.render_bundles.extend(trackers.bundles.used());
}
}
@@ -303,37 +317,38 @@ impl<B: hal::Backend> LifetimeTracker<B> {
impl<B: GfxBackend> LifetimeTracker<B> {
pub(crate) fn triage_suspected<G: GlobalIdentityHandlerFactory>(
&mut self,
global: &Global<G>,
hub: &Hub<B, G>,
trackers: &Mutex<TrackerSet>,
#[cfg(feature = "trace")] trace: Option<&Mutex<trace::Trace>>,
token: &mut Token<super::Device<B>>,
) {
let hub = B::hub(global);
if !self.suspected_resources.render_bundles.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.render_bundles.write(token);
while let Some(id) = self.suspected_resources.render_bundles.pop() {
if trackers.bundles.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyRenderBundle(id)));
hub.render_bundles.free_id(id);
let res = guard.remove(id).unwrap();
self.suspected_resources.add_trackers(&res.used);
}
}
}
if !self.suspected_resources.bind_groups.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.bind_groups.write(token);
for id in self.suspected_resources.bind_groups.drain(..) {
while let Some(id) = self.suspected_resources.bind_groups.pop() {
if trackers.bind_groups.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroup(id)));
hub.bind_groups.free_id(id);
let res = guard.remove(id).unwrap();
assert!(res.used.bind_groups.is_empty());
self.suspected_resources
.buffers
.extend(res.used.buffers.used());
self.suspected_resources
.textures
.extend(res.used.textures.used());
self.suspected_resources
.texture_views
.extend(res.used.views.used());
self.suspected_resources
.samplers
.extend(res.used.samplers.used());
self.suspected_resources.add_trackers(&res.used);
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
@@ -528,13 +543,13 @@ impl<B: GfxBackend> LifetimeTracker<B> {
pub(crate) fn triage_mapped<G: GlobalIdentityHandlerFactory>(
&mut self,
global: &Global<G>,
hub: &Hub<B, G>,
token: &mut Token<super::Device<B>>,
) {
if self.mapped.is_empty() {
return;
}
let (buffer_guard, _) = B::hub(global).buffers.read(token);
let (buffer_guard, _) = hub.buffers.read(token);
for stored in self.mapped.drain(..) {
let resource_id = stored.value;
@@ -558,11 +573,11 @@ impl<B: GfxBackend> LifetimeTracker<B> {
pub(crate) fn triage_framebuffers<G: GlobalIdentityHandlerFactory>(
&mut self,
global: &Global<G>,
hub: &Hub<B, G>,
framebuffers: &mut FastHashMap<super::FramebufferKey, B::Framebuffer>,
token: &mut Token<super::Device<B>>,
) {
let (texture_view_guard, _) = B::hub(global).texture_views.read(token);
let (texture_view_guard, _) = hub.texture_views.read(token);
let remove_list = framebuffers
.keys()
.filter_map(|key| {
@@ -624,7 +639,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
pub(crate) fn handle_mapping<G: GlobalIdentityHandlerFactory>(
&mut self,
global: &Global<G>,
hub: &Hub<B, G>,
raw: &B::Device,
trackers: &Mutex<TrackerSet>,
token: &mut Token<super::Device<B>>,
@@ -632,8 +647,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
if self.ready_to_map.is_empty() {
return Vec::new();
}
let hub = B::hub(global);
let (mut buffer_guard, _) = B::hub(global).buffers.write(token);
let (mut buffer_guard, _) = hub.buffers.write(token);
let mut pending_callbacks: Vec<super::BufferMapPendingCallback> =
Vec::with_capacity(self.ready_to_map.len());
let mut trackers = trackers.lock();

View File

@@ -4,7 +4,7 @@
use crate::{
binding_model, command, conv,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Hub, Input, Token},
id, pipeline, resource, swap_chain,
track::{BufferState, TextureState, TrackerSet},
validation, FastHashMap, LifeGuard, PrivateFeatures, Stored, MAX_BIND_GROUPS,
@@ -101,17 +101,24 @@ impl<T> AttachmentData<T> {
}
}
pub(crate) type RenderPassKey = AttachmentData<(hal::pass::Attachment, hal::image::Layout)>;
pub(crate) type FramebufferKey = AttachmentData<id::TextureViewId>;
#[derive(Clone, Debug, Hash, PartialEq)]
pub(crate) struct RenderPassContext {
pub attachments: AttachmentData<TextureFormat>,
pub sample_count: u8,
}
impl RenderPassContext {
// Assumed the renderpass only contains one subpass
pub(crate) fn compatible(&self, other: &RenderPassContext) -> bool {
self.colors == other.colors && self.depth_stencil == other.depth_stencil
self.attachments.colors == other.attachments.colors
&& self.attachments.depth_stencil == other.attachments.depth_stencil
&& self.sample_count == other.sample_count
}
}
pub(crate) type RenderPassKey = AttachmentData<(hal::pass::Attachment, hal::image::Layout)>;
pub(crate) type FramebufferKey = AttachmentData<id::TextureViewId>;
pub(crate) type RenderPassContext = AttachmentData<TextureFormat>;
type BufferMapResult = Result<ptr::NonNull<u8>, hal::device::MapError>;
type BufferMapPendingCallback = (resource::BufferMapOperation, resource::BufferMapAsyncStatus);
@@ -172,7 +179,7 @@ pub struct Device<B: hal::Backend> {
pub(crate) com_allocator: command::CommandAllocator<B>,
mem_allocator: Mutex<Heaps<B>>,
desc_allocator: Mutex<DescriptorAllocator<B>>,
life_guard: LifeGuard,
pub(crate) life_guard: LifeGuard,
pub(crate) trackers: Mutex<TrackerSet>,
pub(crate) render_passes: Mutex<FastHashMap<RenderPassKey, B::RenderPass>>,
pub(crate) framebuffers: Mutex<FastHashMap<FramebufferKey, B::Framebuffer>>,
@@ -277,29 +284,89 @@ impl<B: GfxBackend> Device<B> {
fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
&'this self,
global: &Global<G>,
hub: &Hub<B, G>,
force_wait: bool,
token: &mut Token<'token, Self>,
) -> Vec<BufferMapPendingCallback> {
let mut life_tracker = self.lock_life(token);
life_tracker.triage_suspected(
global,
hub,
&self.trackers,
#[cfg(feature = "trace")]
self.trace.as_ref(),
token,
);
life_tracker.triage_mapped(global, token);
life_tracker.triage_framebuffers(global, &mut *self.framebuffers.lock(), token);
life_tracker.triage_mapped(hub, token);
life_tracker.triage_framebuffers(hub, &mut *self.framebuffers.lock(), token);
let last_done = life_tracker.triage_submissions(&self.raw, force_wait);
let callbacks = life_tracker.handle_mapping(global, &self.raw, &self.trackers, token);
let callbacks = life_tracker.handle_mapping(hub, &self.raw, &self.trackers, token);
life_tracker.cleanup(&self.raw, &self.mem_allocator, &self.desc_allocator);
self.com_allocator.maintain(&self.raw, last_done);
callbacks
}
fn untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
&'this mut self,
hub: &Hub<B, G>,
trackers: &TrackerSet,
mut token: &mut Token<'token, Self>,
) {
self.temp_suspected.clear();
// As the tracker is cleared/dropped, we need to consider all the resources
// that it references for destruction in the next GC pass.
{
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token);
let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token);
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, mut token) = hub.textures.read(&mut token);
let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
let (sampler_guard, _) = hub.samplers.read(&mut token);
for id in trackers.buffers.used() {
if buffer_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.buffers.push(id);
}
}
for id in trackers.textures.used() {
if texture_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.textures.push(id);
}
}
for id in trackers.views.used() {
if texture_view_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.texture_views.push(id);
}
}
for id in trackers.bind_groups.used() {
if bind_group_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.bind_groups.push(id);
}
}
for id in trackers.samplers.used() {
if sampler_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.samplers.push(id);
}
}
for id in trackers.compute_pipes.used() {
if compute_pipe_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.compute_pipelines.push(id);
}
}
for id in trackers.render_pipes.used() {
if render_pipe_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.render_pipelines.push(id);
}
}
}
self.lock_life(&mut token)
.suspected_resources
.extend(&self.temp_suspected);
}
fn create_buffer(
&self,
self_id: id::DeviceId,
@@ -1734,7 +1801,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn device_create_command_encoder<B: GfxBackend>(
&self,
device_id: id::DeviceId,
desc: &wgt::CommandEncoderDescriptor,
desc: &wgt::CommandEncoderDescriptor<Label>,
id_in: Input<G, id::CommandEncoderId>,
) -> id::CommandEncoderId {
let hub = B::hub(self);
@@ -1776,66 +1843,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let hub = B::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let comb = {
let (mut command_buffer_guard, _) = hub.command_buffers.write(&mut token);
command_buffer_guard.remove(command_encoder_id).unwrap()
};
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let device = &mut device_guard[comb.device_id.value];
device.temp_suspected.clear();
// As the tracker is cleared/dropped, we need to consider all the resources
// that it references for destruction in the next GC pass.
{
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token);
let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token);
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, mut token) = hub.textures.read(&mut token);
let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
let (sampler_guard, _) = hub.samplers.read(&mut token);
for id in comb.trackers.buffers.used() {
if buffer_guard[id].life_guard.ref_count.is_none() {
device.temp_suspected.buffers.push(id);
}
}
for id in comb.trackers.textures.used() {
if texture_guard[id].life_guard.ref_count.is_none() {
device.temp_suspected.textures.push(id);
}
}
for id in comb.trackers.views.used() {
if texture_view_guard[id].life_guard.ref_count.is_none() {
device.temp_suspected.texture_views.push(id);
}
}
for id in comb.trackers.bind_groups.used() {
if bind_group_guard[id].life_guard.ref_count.is_none() {
device.temp_suspected.bind_groups.push(id);
}
}
for id in comb.trackers.samplers.used() {
if sampler_guard[id].life_guard.ref_count.is_none() {
device.temp_suspected.samplers.push(id);
}
}
for id in comb.trackers.compute_pipes.used() {
if compute_pipe_guard[id].life_guard.ref_count.is_none() {
device.temp_suspected.compute_pipelines.push(id);
}
}
for id in comb.trackers.render_pipes.used() {
if render_pipe_guard[id].life_guard.ref_count.is_none() {
device.temp_suspected.render_pipelines.push(id);
}
}
}
device
.lock_life(&mut token)
.suspected_resources
.extend(&device.temp_suspected);
device.untrack::<G>(&hub, &comb.trackers, &mut token);
device.com_allocator.discard(comb);
}
@@ -1843,6 +1858,41 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
self.command_encoder_destroy::<B>(command_buffer_id)
}
pub fn device_create_render_bundle_encoder(
&self,
device_id: id::DeviceId,
desc: &wgt::RenderBundleEncoderDescriptor,
) -> id::RenderBundleEncoderId {
let encoder = command::RenderBundleEncoder::new(desc, device_id);
Box::into_raw(Box::new(encoder))
}
pub fn render_bundle_encoder_destroy(
&self,
render_bundle_encoder: command::RenderBundleEncoder,
) {
render_bundle_encoder.destroy();
}
pub fn render_bundle_destroy<B: GfxBackend>(&self, render_bundle_id: id::RenderBundleId) {
let hub = B::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let device_id = {
let (mut bundle_guard, _) = hub.render_bundles.write(&mut token);
let bundle = &mut bundle_guard[render_bundle_id];
bundle.life_guard.ref_count.take();
bundle.device_id.value
};
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources
.render_bundles
.push(render_bundle_id);
}
pub fn device_create_render_pipeline<B: GfxBackend>(
&self,
device_id: id::DeviceId,
@@ -2139,9 +2189,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
};
let pass_context = RenderPassContext {
colors: color_states.iter().map(|state| state.format).collect(),
resolves: ArrayVec::new(),
depth_stencil: depth_stencil_state.map(|state| state.format),
attachments: AttachmentData {
colors: color_states.iter().map(|state| state.format).collect(),
resolves: ArrayVec::new(),
depth_stencil: depth_stencil_state.map(|state| state.format),
},
sample_count: samples,
};
let mut flags = pipeline::PipelineFlags::empty();
@@ -2173,7 +2226,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
flags,
index_format: desc.vertex_state.index_format,
vertex_strides,
sample_count: samples,
life_guard: LifeGuard::new(),
};
@@ -2492,7 +2544,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = &device_guard[device_id];
device.lock_life(&mut token).triage_suspected(
self,
&hub,
&device.trackers,
#[cfg(feature = "trace")]
None,
@@ -2505,7 +2557,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root();
let callbacks = {
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id].maintain(self, force_wait, &mut token)
device_guard[device_id].maintain(&hub, force_wait, &mut token)
};
fire_map_callbacks(callbacks);
}
@@ -2519,7 +2571,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
for (_, device) in device_guard.iter(B::VARIANT) {
let cbs = device.maintain(self, force_wait, &mut token);
let cbs = device.maintain(&hub, force_wait, &mut token);
callbacks.extend(cbs);
}
}

View File

@@ -532,7 +532,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.after_submit_internal(comb_raw, submit_index);
}
let callbacks = device.maintain(self, false, &mut token);
let callbacks = device.maintain(&hub, false, &mut token);
super::Device::lock_life_internal(&device.life_tracker, &mut token).track_submission(
submit_index,
fence,

View File

@@ -2,10 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
command::{BufferCopyView, TextureCopyView},
id,
};
use crate::id;
#[cfg(feature = "trace")]
use std::io::Write as _;
use std::ops::Range;
@@ -92,6 +89,28 @@ pub struct RenderPipelineDescriptor {
pub alpha_to_coverage_enabled: bool,
}
#[derive(Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct RenderBundleDescriptor {
pub label: String,
pub color_formats: Vec<wgt::TextureFormat>,
pub depth_stencil_format: Option<wgt::TextureFormat>,
pub sample_count: u32,
}
#[cfg(feature = "trace")]
impl RenderBundleDescriptor {
pub(crate) fn new(label: super::Label, context: &super::RenderPassContext) -> Self {
RenderBundleDescriptor {
label: super::own_label(&label),
color_formats: context.attachments.colors.to_vec(),
depth_stencil_format: context.attachments.depth_stencil,
sample_count: context.sample_count as u32,
}
}
}
#[derive(Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
@@ -163,6 +182,13 @@ pub enum Action {
desc: RenderPipelineDescriptor,
},
DestroyRenderPipeline(id::RenderPipelineId),
CreateRenderBundle {
id: id::RenderBundleId,
desc: RenderBundleDescriptor,
commands: Vec<crate::command::RenderCommand>,
dynamic_offsets: Vec<wgt::DynamicOffset>,
},
DestroyRenderBundle(id::RenderBundleId),
WriteBuffer {
id: id::BufferId,
data: FileName,
@@ -170,7 +196,7 @@ pub enum Action {
queued: bool,
},
WriteTexture {
to: TextureCopyView,
to: crate::command::TextureCopyView,
data: FileName,
layout: wgt::TextureDataLayout,
size: wgt::Extent3d,
@@ -190,18 +216,18 @@ pub enum Command {
size: wgt::BufferAddress,
},
CopyBufferToTexture {
src: BufferCopyView,
dst: TextureCopyView,
src: crate::command::BufferCopyView,
dst: crate::command::TextureCopyView,
size: wgt::Extent3d,
},
CopyTextureToBuffer {
src: TextureCopyView,
dst: BufferCopyView,
src: crate::command::TextureCopyView,
dst: crate::command::BufferCopyView,
size: wgt::Extent3d,
},
CopyTextureToTexture {
src: TextureCopyView,
dst: TextureCopyView,
src: crate::command::TextureCopyView,
dst: crate::command::TextureCopyView,
size: wgt::Extent3d,
},
RunComputePass {

View File

@@ -5,12 +5,12 @@
use crate::{
backend,
binding_model::{BindGroup, BindGroupLayout, PipelineLayout},
command::CommandBuffer,
command::{CommandBuffer, RenderBundle},
device::Device,
id::{
AdapterId, BindGroupId, BindGroupLayoutId, BufferId, CommandBufferId, ComputePipelineId,
DeviceId, PipelineLayoutId, RenderPipelineId, SamplerId, ShaderModuleId, SurfaceId,
SwapChainId, TextureId, TextureViewId, TypedId,
DeviceId, PipelineLayoutId, RenderBundleId, RenderPipelineId, SamplerId, ShaderModuleId,
SurfaceId, SwapChainId, TextureId, TextureViewId, TypedId,
},
instance::{Adapter, Instance, Surface},
pipeline::{ComputePipeline, RenderPipeline, ShaderModule},
@@ -174,7 +174,7 @@ impl<B: hal::Backend> Access<SwapChain<B>> for Root {}
impl<B: hal::Backend> Access<SwapChain<B>> for Device<B> {}
impl<B: hal::Backend> Access<PipelineLayout<B>> for Root {}
impl<B: hal::Backend> Access<PipelineLayout<B>> for Device<B> {}
impl<B: hal::Backend> Access<PipelineLayout<B>> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<PipelineLayout<B>> for RenderBundle {}
impl<B: hal::Backend> Access<BindGroupLayout<B>> for Root {}
impl<B: hal::Backend> Access<BindGroupLayout<B>> for Device<B> {}
impl<B: hal::Backend> Access<BindGroupLayout<B>> for PipelineLayout<B> {}
@@ -186,6 +186,8 @@ impl<B: hal::Backend> Access<BindGroup<B>> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<CommandBuffer<B>> for Root {}
impl<B: hal::Backend> Access<CommandBuffer<B>> for Device<B> {}
impl<B: hal::Backend> Access<CommandBuffer<B>> for SwapChain<B> {}
impl<B: hal::Backend> Access<RenderBundle> for Device<B> {}
impl<B: hal::Backend> Access<RenderBundle> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<ComputePipeline<B>> for Device<B> {}
impl<B: hal::Backend> Access<ComputePipeline<B>> for BindGroup<B> {}
impl<B: hal::Backend> Access<RenderPipeline<B>> for Device<B> {}
@@ -298,6 +300,7 @@ pub trait GlobalIdentityHandlerFactory:
+ IdentityHandlerFactory<BindGroupLayoutId>
+ IdentityHandlerFactory<BindGroupId>
+ IdentityHandlerFactory<CommandBufferId>
+ IdentityHandlerFactory<RenderBundleId>
+ IdentityHandlerFactory<RenderPipelineId>
+ IdentityHandlerFactory<ComputePipelineId>
+ IdentityHandlerFactory<BufferId>
@@ -405,6 +408,7 @@ pub struct Hub<B: hal::Backend, F: GlobalIdentityHandlerFactory> {
pub bind_group_layouts: Registry<BindGroupLayout<B>, BindGroupLayoutId, F>,
pub bind_groups: Registry<BindGroup<B>, BindGroupId, F>,
pub command_buffers: Registry<CommandBuffer<B>, CommandBufferId, F>,
pub render_bundles: Registry<RenderBundle, RenderBundleId, F>,
pub render_pipelines: Registry<RenderPipeline<B>, RenderPipelineId, F>,
pub compute_pipelines: Registry<ComputePipeline<B>, ComputePipelineId, F>,
pub buffers: Registry<Buffer<B>, BufferId, F>,
@@ -424,6 +428,7 @@ impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
bind_group_layouts: Registry::new(B::VARIANT, factory, "BindGroupLayout"),
bind_groups: Registry::new(B::VARIANT, factory, "BindGroup"),
command_buffers: Registry::new(B::VARIANT, factory, "CommandBuffer"),
render_bundles: Registry::new(B::VARIANT, factory, "RenderBundle"),
render_pipelines: Registry::new(B::VARIANT, factory, "RenderPipeline"),
compute_pipelines: Registry::new(B::VARIANT, factory, "ComputePipeline"),
buffers: Registry::new(B::VARIANT, factory, "Buffer"),
@@ -657,3 +662,9 @@ impl GfxBackend for backend::Dx11 {
&mut surface.dx11
}
}
#[cfg(test)]
fn _test_send_sync(global: &Global<IdentityManagerFactory>) {
fn test_internal<T: Send + Sync>(_: T) {}
test_internal(global)
}

View File

@@ -164,11 +164,12 @@ pub type ShaderModuleId = Id<crate::pipeline::ShaderModule<Dummy>>;
pub type RenderPipelineId = Id<crate::pipeline::RenderPipeline<Dummy>>;
pub type ComputePipelineId = Id<crate::pipeline::ComputePipeline<Dummy>>;
// Command
pub type CommandBufferId = Id<crate::command::CommandBuffer<Dummy>>;
pub type CommandEncoderId = CommandBufferId;
pub type RenderPassId = *mut crate::command::RawPass;
pub type ComputePassId = *mut crate::command::RawPass;
pub type RenderBundleId = Id<crate::command::RenderBundle<Dummy>>;
pub type CommandBufferId = Id<crate::command::CommandBuffer<Dummy>>;
pub type RenderPassId = *mut crate::command::RawPass<CommandEncoderId>;
pub type ComputePassId = *mut crate::command::RawPass<CommandEncoderId>;
pub type RenderBundleEncoderId = *mut crate::command::RenderBundleEncoder;
pub type RenderBundleId = Id<crate::command::RenderBundle>;
// Swap chain
pub type SwapChainId = Id<crate::swap_chain::SwapChain<Dummy>>;

View File

@@ -127,7 +127,6 @@ pub struct RenderPipeline<B: hal::Backend> {
pub(crate) pass_context: RenderPassContext,
pub(crate) flags: PipelineFlags,
pub(crate) index_format: IndexFormat,
pub(crate) sample_count: u8,
pub(crate) vertex_strides: Vec<(BufferAddress, InputStepMode)>,
pub(crate) life_guard: LifeGuard,
}

View File

@@ -237,11 +237,6 @@ impl<S: ResourceState> ResourceTracker<S> {
self.map.clear();
}
/// Returns true if the tracker is empty.
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// Initialize a resource to be used.
///
/// Returns false if the resource is already registered.
@@ -444,6 +439,7 @@ pub(crate) struct TrackerSet {
pub samplers: ResourceTracker<PhantomData<id::SamplerId>>,
pub compute_pipes: ResourceTracker<PhantomData<id::ComputePipelineId>>,
pub render_pipes: ResourceTracker<PhantomData<id::RenderPipelineId>>,
pub bundles: ResourceTracker<PhantomData<id::RenderBundleId>>,
}
impl TrackerSet {
@@ -457,6 +453,7 @@ impl TrackerSet {
samplers: ResourceTracker::new(backend),
compute_pipes: ResourceTracker::new(backend),
render_pipes: ResourceTracker::new(backend),
bundles: ResourceTracker::new(backend),
}
}
@@ -469,6 +466,7 @@ impl TrackerSet {
self.samplers.clear();
self.compute_pipes.clear();
self.render_pipes.clear();
self.bundles.clear();
}
/// Try to optimize the tracking representation.
@@ -480,6 +478,7 @@ impl TrackerSet {
self.samplers.optimize();
self.compute_pipes.optimize();
self.render_pipes.optimize();
self.bundles.optimize();
}
/// Merge all the trackers of another instance by extending
@@ -494,6 +493,7 @@ impl TrackerSet {
.merge_extend(&other.compute_pipes)
.unwrap();
self.render_pipes.merge_extend(&other.render_pipes).unwrap();
self.bundles.merge_extend(&other.bundles).unwrap();
}
pub fn backend(&self) -> wgt::Backend {

View File

@@ -8,7 +8,9 @@ use peek_poke::PeekPoke;
use serde::Deserialize;
#[cfg(feature = "trace")]
use serde::Serialize;
use std::{io, ptr, slice};
use std::{io, slice};
pub type BufferAddress = u64;
/// Buffer-Texture copies on command encoders have to have the `bytes_per_row`
/// aligned to this number.
@@ -16,9 +18,9 @@ use std::{io, ptr, slice};
/// This doesn't apply to `Queue::write_texture`.
pub const COPY_BYTES_PER_ROW_ALIGNMENT: u32 = 256;
/// Bound uniform/storage buffer offsets must be aligned to this number.
pub const BIND_BUFFER_ALIGNMENT: u64 = 256;
pub const BIND_BUFFER_ALIGNMENT: BufferAddress = 256;
/// Buffer to buffer copy offsets and sizes must be aligned to this number
pub const COPY_BUFFER_ALIGNMENT: u64 = 4;
pub const COPY_BUFFER_ALIGNMENT: BufferAddress = 4;
#[repr(transparent)]
#[derive(Clone, Copy, Debug, PartialEq)]
@@ -33,7 +35,7 @@ pub const COPY_BUFFER_ALIGNMENT: u64 = 4;
derive(serde::Deserialize),
serde(from = "SerBufferSize")
)]
pub struct BufferSize(pub u64);
pub struct BufferSize(pub BufferAddress);
impl BufferSize {
pub const WHOLE: BufferSize = BufferSize(!0);
@@ -295,8 +297,6 @@ pub enum TextureViewDimension {
D3,
}
pub type BufferAddress = u64;
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "trace", derive(Serialize))]
@@ -545,6 +545,12 @@ pub enum IndexFormat {
Uint32 = 1,
}
impl Default for IndexFormat {
fn default() -> Self {
IndexFormat::Uint32
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "trace", derive(Serialize))]
@@ -715,17 +721,16 @@ impl<L> BufferDescriptor<L> {
}
#[repr(C)]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct CommandEncoderDescriptor {
// MSVC doesn't allow zero-sized structs
// We can remove this when we actually have a field
// pub todo: u32,
pub label: *const std::os::raw::c_char,
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct CommandEncoderDescriptor<L> {
pub label: L,
}
impl Default for CommandEncoderDescriptor {
fn default() -> CommandEncoderDescriptor {
CommandEncoderDescriptor { label: ptr::null() }
impl<L> CommandEncoderDescriptor<L> {
pub fn map_label<K>(&self, fun: impl FnOnce(&L) -> K) -> CommandEncoderDescriptor<K> {
CommandEncoderDescriptor {
label: fun(&self.label),
}
}
}
@@ -1083,6 +1088,30 @@ pub struct CommandBufferDescriptor {
pub todo: u32,
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct RenderBundleEncoderDescriptor<'a> {
pub label: Option<&'a str>,
pub color_formats: &'a [TextureFormat],
pub depth_stencil_format: Option<TextureFormat>,
pub sample_count: u32,
}
#[repr(C)]
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "trace", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct RenderBundleDescriptor<L> {
pub label: L,
}
impl<L> RenderBundleDescriptor<L> {
pub fn map_label<K>(&self, fun: impl FnOnce(&L) -> K) -> RenderBundleDescriptor<K> {
RenderBundleDescriptor {
label: fun(&self.label),
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "trace", derive(Serialize))]