226: Tracking Rewrite r=grovesNL a=kvark

Fixes #44

The idea is to support independent tracking of sub-resources. Today, this is needed for textures, which can have individual layers and mipmap levels in different states at a time. Tomorrow, this will be needed for buffer sub-ranges.

The intent to hack it in grew into a complete rewrite of the tracker... The new approach is cleaner in a few places (e.g. `TrackPermit` is gone), but the implementation is obviously more complex. I tried to separate the levels from each other (see `ResourceState` and `RangedStates`) to fight complexity, but it requires a whole lot of testing infrastructure to be solid.

Also regresses #216 a bit, cc @arashikou : tracker is a relatively complex structure. I somehow doubt it's useful to look at it in debug spew. We may need to implement `Debug` manually for it before re-adding `Debug` derives on passes and command buffers.

TODO:
  - [x] documentation of tracking types
  - [x] unit tests for tracking logic
  - [x] actual testing with existing apps, ensure no regressions
  - [x] write a mipmap generation example

Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
This commit is contained in:
bors[bot]
2019-06-16 14:53:26 +00:00
20 changed files with 1615 additions and 608 deletions

1
.gitignore vendored
View File

@@ -1,6 +1,7 @@
/target
**/*.rs.bk
#Cargo.lock
.DS_Store
.vscode
.vs
build

View File

@@ -57,6 +57,7 @@ before_install:
script:
- cargo test
- if [[ $TRAVIS_OS_NAME == "linux" ]]; then cargo check --release; fi
- if [[ $TRAVIS_RUST_VERSION == "nightly" ]]; then cargo +nightly install cbindgen; fi
- if [[ $TRAVIS_RUST_VERSION == "nightly" ]] && [[ $TRAVIS_OS_NAME == "windows" ]]; then
wget -nc -O glfw.zip https://github.com/glfw/glfw/archive/3.3.zip &&

View File

@@ -19,8 +19,6 @@ typedef struct WGPUClientFactory WGPUClientFactory;
typedef struct WGPUServer WGPUServer;
typedef struct WGPUTrackPermit WGPUTrackPermit;
typedef uint32_t WGPUIndex;
typedef uint32_t WGPUEpoch;
@@ -57,10 +55,6 @@ typedef struct {
const uint8_t *error;
} WGPUInfrastructure;
WGPUDeviceId wgpu_client_adapter_create_device(const WGPUClient *client,
WGPUAdapterId adapter_id,
const WGPUDeviceDescriptor *desc);

View File

@@ -11,6 +11,8 @@
#define WGPUMAX_COLOR_TARGETS 4
#define WGPUMAX_MIP_LEVELS 16
#define WGPUMAX_VERTEX_BUFFERS 8
typedef enum {
@@ -230,8 +232,6 @@ typedef enum {
WGPUVertexFormat_Int4 = 48,
} WGPUVertexFormat;
typedef struct WGPUTrackPermit WGPUTrackPermit;
typedef uint32_t WGPUIndex;
typedef uint32_t WGPUEpoch;
@@ -615,10 +615,6 @@ typedef struct {
uint32_t array_count;
} WGPUTextureViewDescriptor;
#if defined(WGPU_LOCAL)
WGPUDeviceId wgpu_adapter_request_device(WGPUAdapterId adapter_id,
const WGPUDeviceDescriptor *desc);

View File

@@ -105,7 +105,6 @@ pub struct BindGroupDescriptor {
pub bindings_length: usize,
}
#[derive(Debug)]
pub struct BindGroup<B: hal::Backend> {
pub(crate) raw: DescriptorSet<B>,
pub(crate) device_id: Stored<DeviceId>,

View File

@@ -24,7 +24,6 @@ impl<B: hal::Backend> CommandPool<B> {
}
}
#[derive(Debug)]
struct Inner<B: hal::Backend> {
pools: HashMap<thread::ThreadId, CommandPool<B>>,
pending: Vec<CommandBuffer<B>>,
@@ -42,7 +41,6 @@ impl<B: hal::Backend> Inner<B> {
}
}
#[derive(Debug)]
pub struct CommandAllocator<B: hal::Backend> {
queue_family: hal::queue::QueueFamilyId,
inner: Mutex<Inner<B>>,

View File

@@ -17,7 +17,6 @@ use hal::{self, command::RawCommandBuffer};
use std::{iter, slice};
#[derive(Debug)]
pub struct ComputePass<B: hal::Backend> {
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,

View File

@@ -21,7 +21,7 @@ use crate::{
hub::{Storage, HUB},
resource::TexturePlacement,
swap_chain::{SwapChainLink, SwapImageEpoch},
track::{DummyUsage, Stitch, TrackerSet},
track::{Stitch, TrackerSet},
BufferHandle,
BufferId,
Color,
@@ -88,7 +88,6 @@ pub struct RenderPassDescriptor {
pub depth_stencil_attachment: *const RenderPassDepthStencilAttachmentDescriptor<TextureViewId>,
}
#[derive(Debug)]
pub struct CommandBuffer<B: hal::Backend> {
pub(crate) raw: Vec<B::CommandBuffer>,
is_recording: bool,
@@ -110,34 +109,29 @@ impl CommandBufferHandle {
) {
let buffer_barriers =
base.buffers
.consume_by_replace(&head.buffers, stitch)
.map(|(id, transit)| {
let b = &buffer_guard[id];
trace!("transit buffer {:?} {:?}", id, transit);
.merge_replace(&head.buffers, stitch)
.map(|pending| {
trace!("transit buffer {:?}", pending);
hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(transit.start)
.. conv::map_buffer_state(transit.end),
target: &b.raw,
states: pending.to_states(),
target: &buffer_guard[pending.id].raw,
range: None .. None,
families: None,
}
});
let texture_barriers = base
.textures
.consume_by_replace(&head.textures, stitch)
.map(|(id, transit)| {
let t = &texture_guard[id];
trace!("transit texture {:?} {:?}", id, transit);
let aspects = t.full_range.aspects;
.merge_replace(&head.textures, stitch)
.map(|pending| {
trace!("transit texture {:?}", pending);
hal::memory::Barrier::Image {
states: conv::map_texture_state(transit.start, aspects)
.. conv::map_texture_state(transit.end, aspects),
target: &t.raw,
range: t.full_range.clone(), //TODO?
states: pending.to_states(),
target: &texture_guard[pending.id].raw,
range: pending.selector,
families: None,
}
});
base.views.consume_by_extend(&head.views).unwrap();
base.views.merge_extend(&head.views).unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {
@@ -175,6 +169,7 @@ pub fn command_encoder_begin_render_pass(
let mut cmb_guard = HUB.command_buffers.write();
let cmb = &mut cmb_guard[command_encoder_id];
let device = &device_guard[cmb.device_id.value];
let texture_guard = HUB.textures.read();
let view_guard = HUB.texture_views.read();
let mut current_comb = device.com_allocator.extend(cmb);
@@ -185,6 +180,7 @@ pub fn command_encoder_begin_render_pass(
);
}
let mut extent = None;
let mut barriers = Vec::new();
let color_attachments =
unsafe { slice::from_raw_parts(desc.color_attachments, desc.color_attachments_length) };
@@ -195,38 +191,63 @@ pub fn command_encoder_begin_render_pass(
let swap_chain_links = &mut cmb.swap_chain_links;
let depth_stencil = depth_stencil_attachment.map(|at| {
let view = &view_guard[at.attachment];
let view = trackers.views
.use_extend(&*view_guard, at.attachment, (), ())
.unwrap();
if let Some(ex) = extent {
assert_eq!(ex, view.extent);
} else {
extent = Some(view.extent);
}
trackers
.views
.query(at.attachment, &view.life_guard.ref_count, DummyUsage);
let query = trackers.textures.query(
let old_layout = match trackers.textures.query(
view.texture_id.value,
&view.texture_id.ref_count,
TextureUsage::empty(),
);
let (_, layout) = conv::map_texture_state(
query.usage,
hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL,
);
view.range.clone(),
) {
Some(usage) => {
conv::map_texture_state(
usage,
hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL,
).1
}
None => {
// Required sub-resources have inconsistent states, we need to
// issue individual barriers instead of relying on the render pass.
let (texture, pending) = trackers.textures.use_replace(
&*texture_guard,
view.texture_id.value,
view.range.clone(),
TextureUsage::OUTPUT_ATTACHMENT,
);
barriers.extend(pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &texture.raw,
families: None,
range: pending.selector,
}));
hal::image::Layout::DepthStencilAttachmentOptimal
}
};
hal::pass::Attachment {
format: Some(conv::map_texture_format(view.format)),
samples: view.samples,
ops: conv::map_load_store_ops(at.depth_load_op, at.depth_store_op),
stencil_ops: conv::map_load_store_ops(at.stencil_load_op, at.stencil_store_op),
layouts: layout .. layout,
layouts: old_layout .. hal::image::Layout::DepthStencilAttachmentOptimal,
}
});
let color_keys = color_attachments.iter().map(|at| {
let view = &view_guard[at.attachment];
let view = trackers.views
.use_extend(&*view_guard, at.attachment, (), ())
.unwrap();
if let Some(ex) = extent {
assert_eq!(ex, view.extent);
} else {
extent = Some(view.extent);
}
if view.is_owned_by_swap_chain {
let link = match HUB.textures.read()[view.texture_id.value].placement {
let link = match texture_guard[view.texture_id.value].placement {
TexturePlacement::SwapChain(ref link) => SwapChainLink {
swap_chain_id: link.swap_chain_id.clone(),
epoch: *link.epoch.lock(),
@@ -237,26 +258,37 @@ pub fn command_encoder_begin_render_pass(
swap_chain_links.push(link);
}
if let Some(ex) = extent {
assert_eq!(ex, view.extent);
} else {
extent = Some(view.extent);
}
trackers
.views
.query(at.attachment, &view.life_guard.ref_count, DummyUsage);
let query = trackers.textures.query(
let old_layout = match trackers.textures.query(
view.texture_id.value,
&view.texture_id.ref_count,
TextureUsage::empty(),
);
let (_, layout) = conv::map_texture_state(query.usage, hal::format::Aspects::COLOR);
view.range.clone(),
) {
Some(usage) => {
conv::map_texture_state(usage, hal::format::Aspects::COLOR).1
}
None => {
// Required sub-resources have inconsistent states, we need to
// issue individual barriers instead of relying on the render pass.
let (texture, pending) = trackers.textures.use_replace(
&*texture_guard,
view.texture_id.value,
view.range.clone(),
TextureUsage::OUTPUT_ATTACHMENT,
);
barriers.extend(pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &texture.raw,
families: None,
range: pending.selector,
}));
hal::image::Layout::ColorAttachmentOptimal
}
};
hal::pass::Attachment {
format: Some(conv::map_texture_format(view.format)),
samples: view.samples,
ops: conv::map_load_store_ops(at.load_op, at.store_op),
stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
layouts: layout .. layout,
layouts: old_layout .. hal::image::Layout::ColorAttachmentOptimal,
}
});
@@ -266,6 +298,16 @@ pub fn command_encoder_begin_render_pass(
}
};
if !barriers.is_empty() {
unsafe {
current_comb.pipeline_barrier(
all_image_stages() .. all_image_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
}
}
let mut render_pass_cache = device.render_passes.lock();
let render_pass = match render_pass_cache.entry(rp_key.clone()) {
Entry::Occupied(e) => e.into_mut(),

View File

@@ -109,7 +109,6 @@ impl VertexState {
}
}
#[derive(Debug)]
pub struct RenderPass<B: hal::Backend> {
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,
@@ -177,6 +176,7 @@ pub extern "C" fn wgpu_render_pass_end_pass(pass_id: RenderPassId) -> CommandBuf
unsafe {
pass.raw.end_render_pass();
}
pass.trackers.optimize();
let cmb = &mut cmb_guard[pass.cmb_id.value];
match cmb.raw.last_mut() {
@@ -192,7 +192,7 @@ pub extern "C" fn wgpu_render_pass_end_pass(pass_id: RenderPassId) -> CommandBuf
unsafe { last.finish() };
}
None => {
cmb.trackers.consume_by_extend(&pass.trackers);
cmb.trackers.merge_extend(&pass.trackers);
}
}
@@ -232,7 +232,7 @@ pub extern "C" fn wgpu_render_pass_set_bind_group(
}
}
pass.trackers.consume_by_extend(&bind_group.used);
pass.trackers.merge_extend(&bind_group.used);
if let Some((pipeline_layout_id, follow_up)) =
pass.binder
@@ -282,7 +282,7 @@ pub extern "C" fn wgpu_render_pass_set_index_buffer(
let buffer = pass
.trackers
.buffers
.get_with_extended_usage(&*buffer_guard, buffer_id, BufferUsage::INDEX)
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDEX)
.unwrap();
let range = offset .. buffer.size;
@@ -316,7 +316,7 @@ pub extern "C" fn wgpu_render_pass_set_vertex_buffers(
for (vbs, (&id, &offset)) in pass.vertex_state.inputs.iter_mut().zip(buffers.iter().zip(offsets)) {
let buffer = pass.trackers
.buffers
.get_with_extended_usage(&*buffer_guard, id, BufferUsage::VERTEX)
.use_extend(&*buffer_guard, id, (), BufferUsage::VERTEX)
.unwrap();
vbs.total_size = buffer.size - offset;
}
@@ -450,7 +450,7 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
let buffer = pass
.trackers
.buffers
.get_with_extended_usage(&*buffer_guard, buffer_id, BufferUsage::INDEX)
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDEX)
.unwrap();
let view = hal::buffer::IndexBufferView {

View File

@@ -39,6 +39,31 @@ pub struct TextureCopyView {
pub origin: Origin3d,
}
impl TextureCopyView {
//TODO: we currently access each texture twice for a transfer,
// once only to get the aspect flags, which is unfortunate.
fn to_selector(&self, aspects: hal::format::Aspects) -> hal::image::SubresourceRange {
let level = self.mip_level as hal::image::Level;
let layer = self.array_layer as hal::image::Layer;
hal::image::SubresourceRange {
aspects,
levels: level .. level + 1,
layers: layer .. layer + 1,
}
}
fn to_sub_layers(
&self, aspects: hal::format::Aspects
) -> hal::image::SubresourceLayers {
let layer = self.array_layer as hal::image::Layer;
hal::image::SubresourceLayers {
aspects,
level: self.mip_level as hal::image::Level,
layers: layer .. layer + 1,
}
}
}
#[no_mangle]
pub extern "C" fn wgpu_command_buffer_copy_buffer_to_buffer(
command_buffer_id: CommandBufferId,
@@ -51,30 +76,31 @@ pub extern "C" fn wgpu_command_buffer_copy_buffer_to_buffer(
let mut cmb_guard = HUB.command_buffers.write();
let cmb = &mut cmb_guard[command_buffer_id];
let buffer_guard = HUB.buffers.read();
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();
let (src_buffer, src_usage) = cmb
let (src_buffer, src_pending) = cmb
.trackers
.buffers
.get_with_replaced_usage(&*buffer_guard, src, BufferUsage::TRANSFER_SRC)
.unwrap();
let src_barrier = src_usage.map(|old| hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(old) .. hal::buffer::Access::TRANSFER_READ,
.use_replace(&*buffer_guard, src, (), BufferUsage::TRANSFER_SRC);
barriers.extend(src_pending.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &src_buffer.raw,
families: None,
range: None .. None,
});
}));
let (dst_buffer, dst_usage) = cmb
let (dst_buffer, dst_pending) = cmb
.trackers
.buffers
.get_with_replaced_usage(&*buffer_guard, dst, BufferUsage::TRANSFER_DST)
.unwrap();
let dst_barrier = dst_usage.map(|old| hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(old) .. hal::buffer::Access::TRANSFER_WRITE,
.use_replace(&*buffer_guard, dst, (), BufferUsage::TRANSFER_DST);
barriers.extend(dst_pending.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &dst_buffer.raw,
families: None,
range: None .. None,
});
}));
let region = hal::command::BufferCopy {
src: src_offset,
@@ -86,7 +112,7 @@ pub extern "C" fn wgpu_command_buffer_copy_buffer_to_buffer(
cmb_raw.pipeline_barrier(
all_buffer_stages() .. all_buffer_stages(),
hal::memory::Dependencies::empty(),
src_barrier.into_iter().chain(dst_barrier),
barriers,
);
cmb_raw.copy_buffer(&src_buffer.raw, &dst_buffer.raw, iter::once(region));
}
@@ -103,35 +129,30 @@ pub extern "C" fn wgpu_command_buffer_copy_buffer_to_texture(
let cmb = &mut cmb_guard[command_buffer_id];
let buffer_guard = HUB.buffers.read();
let texture_guard = HUB.textures.read();
let aspects = texture_guard[destination.texture].full_range.aspects;
let (src_buffer, src_usage) = cmb
let (src_buffer, src_pending) = cmb
.trackers
.buffers
.get_with_replaced_usage(&*buffer_guard, source.buffer, BufferUsage::TRANSFER_SRC)
.unwrap();
let src_barrier = src_usage.map(|old| hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(old) .. hal::buffer::Access::TRANSFER_READ,
.use_replace(&*buffer_guard, source.buffer, (), BufferUsage::TRANSFER_SRC);
let src_barriers = src_pending.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &src_buffer.raw,
families: None,
range: None .. None,
});
let (dst_texture, dst_usage) = cmb
.trackers
.textures
.get_with_replaced_usage(
&*texture_guard,
destination.texture,
TextureUsage::TRANSFER_DST,
)
.unwrap();
let aspects = dst_texture.full_range.aspects;
let dst_texture_state = conv::map_texture_state(TextureUsage::TRANSFER_DST, aspects);
let dst_barrier = dst_usage.map(|old| hal::memory::Barrier::Image {
states: conv::map_texture_state(old, aspects) .. dst_texture_state,
let (dst_texture, dst_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
destination.texture,
destination.to_selector(aspects),
TextureUsage::TRANSFER_DST,
);
let dst_barriers = dst_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &dst_texture.raw,
families: None,
range: dst_texture.full_range.clone(),
range: pending.selector,
});
if let TexturePlacement::SwapChain(ref link) = dst_texture.placement {
@@ -142,6 +163,7 @@ pub extern "C" fn wgpu_command_buffer_copy_buffer_to_texture(
});
}
let aspects = dst_texture.full_range.aspects;
let bytes_per_texel = conv::map_texture_format(dst_texture.format)
.surface_desc()
.bits as u32
@@ -152,11 +174,7 @@ pub extern "C" fn wgpu_command_buffer_copy_buffer_to_texture(
buffer_offset: source.offset,
buffer_width,
buffer_height: source.image_height,
image_layers: hal::image::SubresourceLayers {
aspects, //TODO
level: destination.mip_level as hal::image::Level,
layers: destination.array_layer as u16 .. destination.array_layer as u16 + 1,
},
image_layers: destination.to_sub_layers(aspects),
image_offset: conv::map_origin(destination.origin),
image_extent: conv::map_extent(copy_size),
};
@@ -166,12 +184,12 @@ pub extern "C" fn wgpu_command_buffer_copy_buffer_to_texture(
cmb_raw.pipeline_barrier(
stages .. stages,
hal::memory::Dependencies::empty(),
src_barrier.into_iter().chain(dst_barrier),
src_barriers.chain(dst_barriers),
);
cmb_raw.copy_buffer_to_image(
&src_buffer.raw,
&dst_texture.raw,
dst_texture_state.1,
hal::image::Layout::TransferDstOptimal,
iter::once(region),
);
}
@@ -188,19 +206,19 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_buffer(
let cmb = &mut cmb_guard[command_buffer_id];
let buffer_guard = HUB.buffers.read();
let texture_guard = HUB.textures.read();
let aspects = texture_guard[source.texture].full_range.aspects;
let (src_texture, src_usage) = cmb
.trackers
.textures
.get_with_replaced_usage(&*texture_guard, source.texture, TextureUsage::TRANSFER_SRC)
.unwrap();
let aspects = src_texture.full_range.aspects;
let src_texture_state = conv::map_texture_state(TextureUsage::TRANSFER_SRC, aspects);
let src_barrier = src_usage.map(|old| hal::memory::Barrier::Image {
states: conv::map_texture_state(old, aspects) .. src_texture_state,
let (src_texture, src_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
source.texture,
source.to_selector(aspects),
TextureUsage::TRANSFER_SRC,
);
let src_barriers = src_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &src_texture.raw,
families: None,
range: src_texture.full_range.clone(),
range: pending.selector,
});
match src_texture.placement {
TexturePlacement::SwapChain(_) => unimplemented!(),
@@ -208,22 +226,20 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_buffer(
TexturePlacement::Memory(_) => (),
}
let (dst_buffer, dst_usage) = cmb
.trackers
.buffers
.get_with_replaced_usage(
&*buffer_guard,
destination.buffer,
BufferUsage::TRANSFER_DST,
)
.unwrap();
let dst_barrier = dst_usage.map(|old| hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(old) .. hal::buffer::Access::TRANSFER_WRITE,
let (dst_buffer, dst_barriers) = cmb.trackers.buffers.use_replace(
&*buffer_guard,
destination.buffer,
(),
BufferUsage::TRANSFER_DST,
);
let dst_barrier = dst_barriers.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &dst_buffer.raw,
families: None,
range: None .. None,
});
let aspects = src_texture.full_range.aspects;
let bytes_per_texel = conv::map_texture_format(src_texture.format)
.surface_desc()
.bits as u32
@@ -234,11 +250,7 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_buffer(
buffer_offset: destination.offset,
buffer_width,
buffer_height: destination.image_height,
image_layers: hal::image::SubresourceLayers {
aspects, //TODO
level: source.mip_level as hal::image::Level,
layers: source.array_layer as u16 .. source.array_layer as u16 + 1,
},
image_layers: source.to_sub_layers(aspects),
image_offset: conv::map_origin(source.origin),
image_extent: conv::map_extent(copy_size),
};
@@ -248,11 +260,11 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_buffer(
cmb_raw.pipeline_barrier(
stages .. stages,
hal::memory::Dependencies::empty(),
src_barrier.into_iter().chain(dst_barrier),
src_barriers.chain(dst_barrier),
);
cmb_raw.copy_image_to_buffer(
&src_texture.raw,
src_texture_state.1,
hal::image::Layout::TransferSrcOptimal,
&dst_buffer.raw,
iter::once(region),
);
@@ -269,38 +281,37 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_texture(
let mut cmb_guard = HUB.command_buffers.write();
let cmb = &mut cmb_guard[command_buffer_id];
let texture_guard = HUB.textures.read();
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();
let aspects = texture_guard[source.texture].full_range.aspects &
texture_guard[destination.texture].full_range.aspects;
let (src_texture, src_usage) = cmb
.trackers
.textures
.get_with_replaced_usage(&*texture_guard, source.texture, TextureUsage::TRANSFER_SRC)
.unwrap();
let (dst_texture, dst_usage) = cmb
.trackers
.textures
.get_with_replaced_usage(
&*texture_guard,
destination.texture,
TextureUsage::TRANSFER_DST,
)
.unwrap();
let aspects = src_texture.full_range.aspects & dst_texture.full_range.aspects;
let src_texture_state = conv::map_texture_state(TextureUsage::TRANSFER_SRC, aspects);
let dst_texture_state = conv::map_texture_state(TextureUsage::TRANSFER_DST, aspects);
let src_barrier = src_usage.map(|old| hal::memory::Barrier::Image {
states: conv::map_texture_state(old, aspects) .. src_texture_state,
let (src_texture, src_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
source.texture,
source.to_selector(aspects),
TextureUsage::TRANSFER_SRC,
);
barriers.extend(src_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &src_texture.raw,
families: None,
range: src_texture.full_range.clone(),
});
let dst_barrier = dst_usage.map(|old| hal::memory::Barrier::Image {
states: conv::map_texture_state(old, aspects) .. dst_texture_state,
range: pending.selector,
}));
let (dst_texture, dst_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
destination.texture,
destination.to_selector(aspects),
TextureUsage::TRANSFER_DST,
);
barriers.extend(dst_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &dst_texture.raw,
families: None,
range: dst_texture.full_range.clone(),
});
range: pending.selector,
}));
if let TexturePlacement::SwapChain(ref link) = dst_texture.placement {
cmb.swap_chain_links.alloc().init(SwapChainLink {
@@ -310,18 +321,11 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_texture(
});
}
let aspects = src_texture.full_range.aspects & dst_texture.full_range.aspects;
let region = hal::command::ImageCopy {
src_subresource: hal::image::SubresourceLayers {
aspects,
level: source.mip_level as hal::image::Level,
layers: source.array_layer as u16 .. source.array_layer as u16 + 1,
},
src_subresource: source.to_sub_layers(aspects),
src_offset: conv::map_origin(source.origin),
dst_subresource: hal::image::SubresourceLayers {
aspects,
level: destination.mip_level as hal::image::Level,
layers: destination.array_layer as u16 .. destination.array_layer as u16 + 1,
},
dst_subresource: destination.to_sub_layers(aspects),
dst_offset: conv::map_origin(destination.origin),
extent: conv::map_extent(copy_size),
};
@@ -330,13 +334,13 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_texture(
cmb_raw.pipeline_barrier(
all_image_stages() .. all_image_stages(),
hal::memory::Dependencies::empty(),
src_barrier.into_iter().chain(dst_barrier),
barriers,
);
cmb_raw.copy_image(
&src_texture.raw,
src_texture_state.1,
hal::image::Layout::TransferSrcOptimal,
&dst_texture.raw,
dst_texture_state.1,
hal::image::Layout::TransferDstOptimal,
iter::once(region),
);
}

View File

@@ -6,7 +6,7 @@ use crate::{
pipeline,
resource,
swap_chain,
track::{DummyUsage, Stitch, TrackPermit, TrackerSet, Tracktion},
track::{Stitch, TrackerSet},
AdapterId,
BindGroupId,
BufferAddress,
@@ -58,6 +58,7 @@ use std::{collections::hash_map::Entry, ffi, iter, ptr, ops::Range, slice, sync:
const CLEANUP_WAIT_MS: u64 = 5000;
pub const MAX_COLOR_TARGETS: usize = 4;
pub const MAX_MIP_LEVELS: usize = 16;
pub const MAX_VERTEX_BUFFERS: usize = 8;
/// Bound uniform/storage buffer offsets must be aligned to this number.
@@ -604,12 +605,12 @@ pub fn device_track_buffer(
ref_count: RefCount,
flags: resource::BufferUsage,
) {
let query = HUB.devices.read()[device_id]
let ok = HUB.devices.read()[device_id]
.trackers
.lock()
.buffers
.query(buffer_id, &ref_count, flags);
assert!(query.initialized);
.init(buffer_id, &ref_count, (), flags);
assert!(ok);
}
#[cfg(feature = "local")]
@@ -686,6 +687,8 @@ pub fn device_create_texture(
let device_guard = HUB.devices.read();
let device = &device_guard[device_id];
assert!((desc.mip_level_count as usize) < MAX_MIP_LEVELS);
let mut image = unsafe {
device.raw.create_image(
kind,
@@ -734,17 +737,23 @@ pub fn device_create_texture(
}
}
pub fn device_track_texture(device_id: DeviceId, texture_id: TextureId, ref_count: RefCount) {
let query = HUB.devices.read()[device_id]
pub fn device_track_texture(
device_id: DeviceId,
texture_id: TextureId,
ref_count: RefCount,
full_range: hal::image::SubresourceRange,
) {
let ok = HUB.devices.read()[device_id]
.trackers
.lock()
.textures
.query(
.init(
texture_id,
&ref_count,
full_range,
resource::TextureUsage::UNINITIALIZED,
);
assert!(query.initialized);
assert!(ok);
}
#[cfg(feature = "local")]
@@ -755,8 +764,9 @@ pub extern "C" fn wgpu_device_create_texture(
) -> TextureId {
let texture = device_create_texture(device_id, desc);
let ref_count = texture.life_guard.ref_count.clone();
let range = texture.full_range.clone();
let id = HUB.textures.register_local(texture);
device_track_texture(device_id, id, ref_count);
device_track_texture(device_id, id, ref_count, range);
id
}
@@ -778,7 +788,7 @@ pub fn texture_create_view(
view_kind,
conv::map_texture_format(format),
hal::format::Swizzle::NO,
range,
range.clone(),
)
.unwrap()
};
@@ -790,8 +800,9 @@ pub fn texture_create_view(
ref_count: texture.life_guard.ref_count.clone(),
},
format: texture.format,
extent: texture.kind.extent(),
extent: texture.kind.extent().at_level(range.levels.start),
samples: texture.kind.num_samples(),
range,
is_owned_by_swap_chain: false,
life_guard: LifeGuard::new(),
}
@@ -799,12 +810,12 @@ pub fn texture_create_view(
pub fn device_track_view(texture_id: TextureId, view_id: TextureViewId, ref_count: RefCount) {
let device_id = HUB.textures.read()[texture_id].device_id.value;
let query = HUB.devices.read()[device_id]
let ok = HUB.devices.read()[device_id]
.trackers
.lock()
.views
.query(view_id, &ref_count, DummyUsage);
assert!(query.initialized);
.init(view_id, &ref_count, (), ());
assert!(ok);
}
#[cfg(feature = "local")]
@@ -1064,7 +1075,7 @@ pub fn device_create_bind_group(
);
let buffer = used
.buffers
.get_with_extended_usage(&*buffer_guard, bb.buffer, usage)
.use_extend(&*buffer_guard, bb.buffer, (), usage)
.unwrap();
let range = Some(bb.offset) .. Some(bb.offset + bb.size);
hal::pso::Descriptor::Buffer(&buffer.raw, range)
@@ -1086,14 +1097,15 @@ pub fn device_create_bind_group(
),
_ => panic!("Mismatched texture binding for {:?}", decl),
};
let view = &texture_view_guard[id];
used.views.query(id, &view.life_guard.ref_count, DummyUsage);
let view = used.views
.use_extend(&*texture_view_guard, id, (), ())
.unwrap();
used.textures
.transit(
.change_extend(
view.texture_id.value,
&view.texture_id.ref_count,
view.range.clone(),
usage,
TrackPermit::EXTEND,
)
.unwrap();
hal::pso::Descriptor::Image(&view.raw, image_layout)
@@ -1126,15 +1138,15 @@ pub fn device_create_bind_group(
pub fn device_track_bind_group(
device_id: DeviceId,
buffer_id: BindGroupId,
bind_group_id: BindGroupId,
ref_count: RefCount,
) {
let query = HUB.devices.read()[device_id]
let ok = HUB.devices.read()[device_id]
.trackers
.lock()
.bind_groups
.query(buffer_id, &ref_count, DummyUsage);
assert!(query.initialized);
.init(bind_group_id, &ref_count, (), ());
assert!(ok);
}
#[cfg(feature = "local")]
@@ -1271,6 +1283,9 @@ pub extern "C" fn wgpu_queue_submit(
}
}
// optimize the tracked states
comb.trackers.optimize();
// update submission IDs
for id in comb.trackers.buffers.used() {
let buffer = &buffer_guard[id];
@@ -1785,6 +1800,7 @@ pub fn swap_chain_populate_textures(
for (i, mut texture) in textures.into_iter().enumerate() {
let format = texture.format;
let kind = texture.kind;
let range = texture.full_range.clone();
let view_raw = unsafe {
device
@@ -1794,7 +1810,7 @@ pub fn swap_chain_populate_textures(
hal::image::ViewKind::D2,
conv::map_texture_format(format),
hal::format::Swizzle::NO,
texture.full_range.clone(),
range.clone(),
)
.unwrap()
};
@@ -1807,9 +1823,10 @@ pub fn swap_chain_populate_textures(
ref_count: texture.life_guard.ref_count.clone(),
value: HUB.textures.register_local(texture),
};
trackers.textures.query(
trackers.textures.init(
texture_id.value,
&texture_id.ref_count,
range.clone(),
resource::TextureUsage::UNINITIALIZED,
);
@@ -1819,6 +1836,7 @@ pub fn swap_chain_populate_textures(
format,
extent: kind.extent(),
samples: kind.num_samples(),
range,
is_owned_by_swap_chain: true,
life_guard: LifeGuard::new(),
};
@@ -1828,7 +1846,7 @@ pub fn swap_chain_populate_textures(
};
trackers
.views
.query(view_id.value, &view_id.ref_count, DummyUsage);
.init(view_id.value, &view_id.ref_count, (), ());
swap_chain.frames.alloc().init(swap_chain::Frame {
texture_id,
@@ -1899,19 +1917,11 @@ pub extern "C" fn wgpu_buffer_map_read_async(
let device_guard = HUB.devices.read();
let device = &device_guard[device_id];
let usage = resource::BufferUsage::MAP_READ;
match device
device
.trackers
.lock()
.buffers
.transit(buffer_id, &ref_count, usage, TrackPermit::REPLACE)
{
Ok(Tracktion::Keep) => {}
Ok(Tracktion::Replace { .. }) => {
//TODO: launch a memory barrier into `HOST_READ` access?
}
other => panic!("Invalid mapping transition {:?}", other),
}
.change_replace(buffer_id, &ref_count, (), resource::BufferUsage::MAP_READ);
device.pending.lock().map(buffer_id, ref_count);
}
@@ -1942,19 +1952,11 @@ pub extern "C" fn wgpu_buffer_map_write_async(
let device_guard = HUB.devices.read();
let device = &device_guard[device_id];
let usage = resource::BufferUsage::MAP_WRITE;
match device
device
.trackers
.lock()
.buffers
.transit(buffer_id, &ref_count, usage, TrackPermit::REPLACE)
{
Ok(Tracktion::Keep) => {}
Ok(Tracktion::Replace { .. }) => {
//TODO: launch a memory barrier into `HOST_WRITE` access?
}
other => panic!("Invalid mapping transition {:?}", other),
}
.change_replace(buffer_id, &ref_count, (), resource::BufferUsage::MAP_WRITE);
device.pending.lock().map(buffer_id, ref_count);
}

View File

@@ -1,6 +1,5 @@
use crate::{
binding_model::MAX_BIND_GROUPS,
device::BIND_BUFFER_ALIGNMENT,
hub::HUB,
AdapterHandle,
AdapterId,
@@ -9,7 +8,7 @@ use crate::{
SurfaceHandle,
};
#[cfg(feature = "local")]
use crate::{DeviceId, SurfaceId};
use crate::{device::BIND_BUFFER_ALIGNMENT, DeviceId, SurfaceId};
#[cfg(feature = "local")]
use log::info;

View File

@@ -34,7 +34,7 @@ mod track;
pub use self::binding_model::*;
pub use self::command::*;
pub use self::device::*;
#[cfg(feature = "remote")]
#[cfg(not(feature = "local"))]
pub use self::hub::{IdentityManager, Registry, HUB};
pub use self::instance::*;
pub use self::pipeline::*;

View File

@@ -181,7 +181,7 @@ pub struct TextureDescriptor {
#[derive(Debug)]
pub(crate) enum TexturePlacement<B: hal::Backend> {
#[cfg_attr(feature = "remote", allow(unused))]
#[cfg_attr(not(feature = "local"), allow(unused))]
SwapChain(SwapChainLink<Mutex<SwapImageEpoch>>),
Memory(MemoryBlock<B>),
Void,
@@ -255,11 +255,18 @@ pub struct TextureView<B: hal::Backend> {
pub(crate) format: TextureFormat,
pub(crate) extent: hal::image::Extent,
pub(crate) samples: hal::image::NumSamples,
pub(crate) range: hal::image::SubresourceRange,
pub(crate) is_owned_by_swap_chain: bool,
#[cfg_attr(not(feature = "local"), allow(dead_code))]
pub(crate) life_guard: LifeGuard,
}
impl<B: hal::Backend> Borrow<RefCount> for TextureView<B> {
fn borrow(&self) -> &RefCount {
&self.life_guard.ref_count
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub enum AddressMode {

View File

@@ -3,7 +3,6 @@ use crate::{
device::all_image_stages,
hub::HUB,
resource,
track::TrackPermit,
DeviceId,
Extent3d,
Stored,
@@ -231,27 +230,23 @@ pub extern "C" fn wgpu_swap_chain_present(swap_chain_id: SwapChainId) {
//TODO: support for swapchain being sampled or read by the shader?
trace!("transit {:?} to present", frame.texture_id.value);
let barrier = device
.trackers
.lock()
.textures
.transit(
let mut trackers = device.trackers.lock();
let barriers = trackers.textures
.change_replace(
frame.texture_id.value,
&texture.life_guard.ref_count,
texture.full_range.clone(),
resource::TextureUsage::UNINITIALIZED,
TrackPermit::REPLACE,
)
.unwrap()
.into_source()
.map(|old| hal::memory::Barrier::Image {
states: conv::map_texture_state(old, hal::format::Aspects::COLOR)
.map(|pending| hal::memory::Barrier::Image {
states: conv::map_texture_state(pending.usage.start, hal::format::Aspects::COLOR)
.. (
hal::image::Access::COLOR_ATTACHMENT_WRITE,
hal::image::Layout::Present,
),
target: &texture.raw,
families: None,
range: texture.full_range.clone(),
range: pending.selector,
});
let err = unsafe {
@@ -259,7 +254,7 @@ pub extern "C" fn wgpu_swap_chain_present(swap_chain_id: SwapChainId) {
frame.comb.pipeline_barrier(
all_image_stages() .. hal::pso::PipelineStage::COLOR_ATTACHMENT_OUTPUT,
hal::memory::Dependencies::empty(),
barrier,
barriers,
);
frame.comb.finish();

View File

@@ -1,368 +0,0 @@
use crate::{
hub::Storage,
resource::{BufferUsage, TextureUsage},
BufferId,
Epoch,
Index,
RefCount,
TextureId,
TextureViewId,
TypedId,
BindGroupId,
};
use bitflags::bitflags;
use hal::backend::FastHashMap;
use std::{
borrow::Borrow,
collections::hash_map::{Entry, Iter},
marker::PhantomData,
mem,
ops::{BitOr, Range},
};
#[derive(Clone, Debug, PartialEq)]
#[allow(unused)]
pub enum Tracktion<T> {
Init,
Keep,
Extend { old: T },
Replace { old: T },
}
impl<T> Tracktion<T> {
pub fn into_source(self) -> Option<T> {
match self {
Tracktion::Init | Tracktion::Keep => None,
Tracktion::Extend { old } | Tracktion::Replace { old } => Some(old),
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct Query<T> {
pub usage: T,
pub initialized: bool,
}
bitflags! {
pub struct TrackPermit: u32 {
/// Allow extension of the current usage. This is useful during render pass
/// recording, where the usage has to stay constant, but we can defer the
/// decision on what it is until the end of the pass.
const EXTEND = 1;
/// Allow replacing the current usage with the new one. This is useful when
/// recording a command buffer live, and the current usage is already been set.
const REPLACE = 2;
}
}
pub trait GenericUsage {
fn is_exclusive(&self) -> bool;
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct DummyUsage;
impl BitOr for DummyUsage {
type Output = Self;
fn bitor(self, other: Self) -> Self {
other
}
}
impl GenericUsage for BufferUsage {
fn is_exclusive(&self) -> bool {
BufferUsage::WRITE_ALL.intersects(*self)
}
}
impl GenericUsage for TextureUsage {
fn is_exclusive(&self) -> bool {
TextureUsage::WRITE_ALL.intersects(*self)
}
}
impl GenericUsage for DummyUsage {
fn is_exclusive(&self) -> bool {
false
}
}
#[derive(Clone, Debug)]
struct Track<U> {
ref_count: RefCount,
init: U,
last: U,
epoch: Epoch,
}
//TODO: consider having `I` as an associated type of `U`?
#[derive(Debug)]
pub struct Tracker<I, U> {
map: FastHashMap<Index, Track<U>>,
_phantom: PhantomData<I>,
}
pub type BufferTracker = Tracker<BufferId, BufferUsage>;
pub type TextureTracker = Tracker<TextureId, TextureUsage>;
pub type TextureViewTracker = Tracker<TextureViewId, DummyUsage>;
pub type BindGroupTracker = Tracker<BindGroupId, DummyUsage>;
//TODO: make this a generic parameter.
/// Mode of stitching to states together.
#[derive(Clone, Copy, Debug)]
pub enum Stitch {
/// Stitch to the init state of the other resource.
Init,
/// Stitch to the last state of the other resource.
Last,
}
//TODO: consider rewriting this without any iterators that have side effects.
#[derive(Debug)]
pub struct ConsumeIterator<'a, I: TypedId, U: Copy + PartialEq> {
src: Iter<'a, Index, Track<U>>,
dst: &'a mut FastHashMap<Index, Track<U>>,
stitch: Stitch,
_marker: PhantomData<I>,
}
impl<'a, I: TypedId, U: Copy + PartialEq> Iterator for ConsumeIterator<'a, I, U> {
type Item = (I, Range<U>);
fn next(&mut self) -> Option<Self::Item> {
loop {
let (&index, new) = self.src.next()?;
match self.dst.entry(index) {
Entry::Vacant(e) => {
e.insert(new.clone());
}
Entry::Occupied(mut e) => {
assert_eq!(e.get().epoch, new.epoch);
let old = mem::replace(&mut e.get_mut().last, new.last);
if old != new.init {
let state = match self.stitch {
Stitch::Init => new.init,
Stitch::Last => new.last,
};
return Some((I::new(index, new.epoch), old .. state))
}
}
}
}
}
}
// Make sure to finish all side effects on drop
impl<'a, I: TypedId, U: Copy + PartialEq> Drop for ConsumeIterator<'a, I, U> {
fn drop(&mut self) {
self.for_each(drop)
}
}
#[derive(Debug)]
pub struct TrackerSet {
pub buffers: BufferTracker,
pub textures: TextureTracker,
pub views: TextureViewTracker,
pub bind_groups: BindGroupTracker,
//TODO: samplers
}
impl TrackerSet {
pub fn new() -> Self {
TrackerSet {
buffers: BufferTracker::new(),
textures: TextureTracker::new(),
views: TextureViewTracker::new(),
bind_groups: BindGroupTracker::new(),
}
}
pub fn clear(&mut self) {
self.buffers.clear();
self.textures.clear();
self.views.clear();
self.bind_groups.clear();
}
pub fn consume_by_extend(&mut self, other: &Self) {
self.buffers.consume_by_extend(&other.buffers).unwrap();
self.textures.consume_by_extend(&other.textures).unwrap();
self.views.consume_by_extend(&other.views).unwrap();
self.bind_groups.consume_by_extend(&other.bind_groups).unwrap();
}
}
impl<I: TypedId, U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker<I, U> {
pub fn new() -> Self {
Tracker {
map: FastHashMap::default(),
_phantom: PhantomData,
}
}
/// Remove an id from the tracked map.
pub(crate) fn remove(&mut self, id: I) -> bool {
match self.map.remove(&id.index()) {
Some(track) => {
assert_eq!(track.epoch, id.epoch());
true
}
None => false,
}
}
/// Get the last usage on a resource.
pub(crate) fn query(&mut self, id: I, ref_count: &RefCount, default: U) -> Query<U> {
match self.map.entry(id.index()) {
Entry::Vacant(e) => {
e.insert(Track {
ref_count: ref_count.clone(),
init: default,
last: default,
epoch: id.epoch(),
});
Query {
usage: default,
initialized: true,
}
}
Entry::Occupied(e) => {
assert_eq!(e.get().epoch, id.epoch());
Query {
usage: e.get().last,
initialized: false,
}
}
}
}
/// Transit a specified resource into a different usage.
pub(crate) fn transit(
&mut self,
id: I,
ref_count: &RefCount,
usage: U,
permit: TrackPermit,
) -> Result<Tracktion<U>, U> {
match self.map.entry(id.index()) {
Entry::Vacant(e) => {
e.insert(Track {
ref_count: ref_count.clone(),
init: usage,
last: usage,
epoch: id.epoch(),
});
Ok(Tracktion::Init)
}
Entry::Occupied(mut e) => {
assert_eq!(e.get().epoch, id.epoch());
let old = e.get().last;
if usage == old {
Ok(Tracktion::Keep)
} else if permit.contains(TrackPermit::EXTEND) && !(old | usage).is_exclusive() {
e.get_mut().last = old | usage;
Ok(Tracktion::Extend { old })
} else if permit.contains(TrackPermit::REPLACE) {
e.get_mut().last = usage;
Ok(Tracktion::Replace { old })
} else {
Err(old)
}
}
}
}
/// Consume another tacker, adding it's transitions to `self`.
/// Transitions the current usage to the new one.
pub fn consume_by_replace<'a>(
&'a mut self,
other: &'a Self,
stitch: Stitch,
) -> ConsumeIterator<'a, I, U> {
ConsumeIterator {
src: other.map.iter(),
dst: &mut self.map,
stitch,
_marker: PhantomData,
}
}
/// Consume another tacker, adding it's transitions to `self`.
/// Extends the current usage without doing any transitions.
pub fn consume_by_extend<'a>(&'a mut self, other: &'a Self) -> Result<(), (I, Range<U>)> {
for (&index, new) in other.map.iter() {
match self.map.entry(index) {
Entry::Vacant(e) => {
e.insert(new.clone());
}
Entry::Occupied(mut e) => {
assert_eq!(e.get().epoch, new.epoch);
let old = e.get().last;
if old != new.last {
let extended = old | new.last;
if extended.is_exclusive() {
let id = I::new(index, new.epoch);
return Err((id, old .. new.last));
}
e.get_mut().last = extended;
}
}
}
}
Ok(())
}
/// Return an iterator over used resources keys.
pub fn used<'a>(&'a self) -> impl 'a + Iterator<Item = I> {
self.map
.iter()
.map(|(&index, track)| I::new(index, track.epoch))
}
}
impl<I: TypedId + Copy, U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker<I, U> {
fn clear(&mut self) {
self.map.clear();
}
fn _get_with_usage<'a, T: 'a + Borrow<RefCount>>(
&mut self,
storage: &'a Storage<T, I>,
id: I,
usage: U,
permit: TrackPermit,
) -> Result<(&'a T, Tracktion<U>), U> {
let item = &storage[id];
self.transit(id, item.borrow(), usage, permit)
.map(|tracktion| (item, tracktion))
}
pub(crate) fn get_with_extended_usage<'a, T: 'a + Borrow<RefCount>>(
&mut self,
storage: &'a Storage<T, I>,
id: I,
usage: U,
) -> Result<&'a T, U> {
let item = &storage[id];
self.transit(id, item.borrow(), usage, TrackPermit::EXTEND)
.map(|_tracktion| item)
}
pub(crate) fn get_with_replaced_usage<'a, T: 'a + Borrow<RefCount>>(
&mut self,
storage: &'a Storage<T, I>,
id: I,
usage: U,
) -> Result<(&'a T, Option<U>), U> {
let item = &storage[id];
self.transit(id, item.borrow(), usage, TrackPermit::REPLACE)
.map(|tracktion| {
(
item,
match tracktion {
Tracktion::Init | Tracktion::Keep => None,
Tracktion::Extend { .. } => unreachable!(),
Tracktion::Replace { old } => Some(old),
},
)
})
}
}

View File

@@ -0,0 +1,125 @@
use crate::{
conv,
resource::BufferUsage,
BufferId,
};
use super::{PendingTransition, ResourceState, Stitch, Unit};
use std::ops::Range;
//TODO: store `hal::buffer::State` here to avoid extra conversions
pub type BufferState = Unit<BufferUsage>;
impl PendingTransition<BufferState> {
/// Produce the gfx-hal buffer states corresponding to the transition.
pub fn to_states(&self) -> Range<hal::buffer::State> {
conv::map_buffer_state(self.usage.start) ..
conv::map_buffer_state(self.usage.end)
}
}
impl Default for BufferState {
fn default() -> Self {
BufferState {
init: BufferUsage::empty(),
last: BufferUsage::empty(),
}
}
}
impl ResourceState for BufferState {
type Id = BufferId;
type Selector = ();
type Usage = BufferUsage;
fn query(
&self,
_selector: Self::Selector,
) -> Option<Self::Usage> {
Some(self.last)
}
fn change(
&mut self,
id: Self::Id,
_selector: Self::Selector,
usage: Self::Usage,
output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {
let old = self.last;
if usage != old {
let pending = PendingTransition {
id,
selector: (),
usage: old .. usage,
};
self.last = match output {
Some(transitions) => {
transitions.push(pending);
usage
}
None => {
if !old.is_empty() && BufferUsage::WRITE_ALL.intersects(old | usage) {
return Err(pending);
}
old | usage
}
};
}
Ok(())
}
fn merge(
&mut self,
id: Self::Id,
other: &Self,
stitch: Stitch,
output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {
let old = self.last;
let new = other.select(stitch);
self.last = if old == new {
other.last
} else {
let pending = PendingTransition {
id,
selector: (),
usage: old .. new,
};
match output {
Some(transitions) => {
transitions.push(pending);
other.last
}
None => {
if !old.is_empty() && BufferUsage::WRITE_ALL.intersects(old | new) {
return Err(pending);
}
old | new
}
}
};
Ok(())
}
fn optimize(&mut self) {
}
}
#[cfg(test)]
mod test {
use crate::TypedId;
use super::*;
#[test]
fn change() {
let mut bs = Unit {
init: BufferUsage::INDEX,
last: BufferUsage::STORAGE,
};
let id = TypedId::new(0, 0);
assert!(bs.change(id, (), BufferUsage::VERTEX, None).is_err());
bs.change(id, (), BufferUsage::VERTEX, Some(&mut Vec::new())).unwrap();
bs.change(id, (), BufferUsage::INDEX, None).unwrap();
assert_eq!(bs.last, BufferUsage::VERTEX | BufferUsage::INDEX);
}
}

View File

@@ -0,0 +1,454 @@
mod buffer;
mod range;
mod texture;
use crate::{
hub::Storage,
Epoch,
Index,
RefCount,
TextureViewId,
TypedId,
BindGroupId,
};
use hal::backend::FastHashMap;
use std::{
borrow::Borrow,
collections::hash_map::Entry,
fmt::Debug,
marker::PhantomData,
ops::Range,
vec::Drain,
};
use buffer::BufferState;
use texture::TextureState;
/// A single unit of state tracking. It keeps an initial
/// usage as well as the last/current one, similar to `Range`.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Unit<U> {
init: U,
last: U,
}
impl<U: Copy> Unit<U> {
/// Create a new unit from a given usage.
fn new(usage: U) -> Self {
Unit {
init: usage,
last: usage,
}
}
/// Select one of the ends of the usage, based on the
/// given `Stitch`.
///
/// In some scenarios, when merging two trackers
/// A and B for a resource, we want to connect A to the initial state
/// of B. In other scenarios, we want to reach the last state of B.
fn select(&self, stitch: Stitch) -> U {
match stitch {
Stitch::Init => self.init,
Stitch::Last => self.last,
}
}
}
/// Mode of stitching to states together.
#[derive(Clone, Copy, Debug)]
pub enum Stitch {
/// Stitch to the init state of the other resource.
Init,
/// Stitch to the last state of the other resource.
Last,
}
/// The main trait that abstracts away the tracking logic of
/// a particular resource type, like a buffer or a texture.
pub trait ResourceState: Clone + Default {
/// Corresponding `HUB` identifier.
type Id: Copy + Debug + TypedId;
/// A type specifying the sub-resources.
type Selector: Debug;
/// Usage type for a `Unit` of a sub-resource.
type Usage: Debug;
/// Check if all the selected sub-resources have the same
/// usage, and return it.
///
/// Returns `None` if no sub-resources
/// are intersecting with the selector, or their usage
/// isn't consistent.
fn query(
&self,
selector: Self::Selector,
) -> Option<Self::Usage>;
/// Change the last usage of the selected sub-resources.
///
/// If `output` is specified, it's filled with the
/// `PendingTransition` objects corresponding to smaller
/// sub-resource transitions. The old usage is replaced by
/// the new one.
///
/// If `output` is `None`, the old usage is extended with
/// the new usage. The error is returned if it's not possible,
/// specifying the conflicting transition. Extension can only
/// be done for read-only usages.
fn change(
&mut self,
id: Self::Id,
selector: Self::Selector,
usage: Self::Usage,
output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>>;
/// Merge the state of this resource tracked by a different instance
/// with the current one.
///
/// Same rules for `output` apply as with `change()`: last usage state
/// is either replaced (when `output` is provided) with a
/// `PendingTransition` pushed to this vector, or extended with the
/// other read-only usage, unless there is a usage conflict, and
/// the error is generated (returning the conflict).
///
/// `stitch` only defines the end points of generated transitions.
/// Last states of `self` are nevertheless updated to the *last* states
/// of `other`, if `output` is provided.
fn merge(
&mut self,
id: Self::Id,
other: &Self,
stitch: Stitch,
output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>>;
/// Try to optimize the internal representation.
fn optimize(&mut self);
}
/// Structure wrapping the abstract tracking state with the relevant resource
/// data, such as the reference count and the epoch.
#[derive(Clone)]
#[cfg_attr(debug_assertions, derive(Debug))]
struct Resource<S> {
ref_count: RefCount,
state: S,
epoch: Epoch,
}
/// A structure containing all the information about a particular resource
/// transition. User code should be able to generate a pipeline barrier
/// based on the contents.
#[derive(Debug)]
pub struct PendingTransition<S: ResourceState> {
pub id: S::Id,
pub selector: S::Selector,
pub usage: Range<S::Usage>,
}
/// A tracker for all resources of a given type.
#[cfg_attr(debug_assertions, derive(Debug))]
pub struct ResourceTracker<S: ResourceState> {
/// An association of known resource indices with their tracked states.
map: FastHashMap<Index, Resource<S>>,
/// Temporary storage for collecting transitions.
temp: Vec<PendingTransition<S>>,
}
impl<S: ResourceState> ResourceTracker<S> {
/// Create a new empty tracker.
pub fn new() -> Self {
ResourceTracker {
map: FastHashMap::default(),
temp: Vec::new(),
}
}
/// Remove an id from the tracked map.
pub fn remove(&mut self, id: S::Id) -> bool {
match self.map.remove(&id.index()) {
Some(resource) => {
assert_eq!(resource.epoch, id.epoch());
true
}
None => false,
}
}
/// Try to optimize the internal representation.
pub fn optimize(&mut self) {
for resource in self.map.values_mut() {
resource.state.optimize();
}
}
/// Return an iterator over used resources keys.
pub fn used<'a>(&'a self) -> impl 'a + Iterator<Item = S::Id> {
self.map
.iter()
.map(|(&index, resource)| S::Id::new(index, resource.epoch))
}
/// Clear the tracked contents.
fn clear(&mut self) {
self.map.clear();
}
/// Initialize a resource to be used.
///
/// Returns `false` if the resource is already tracked.
pub fn init(
&mut self,
id: S::Id,
ref_count: &RefCount,
selector: S::Selector,
default: S::Usage,
) -> bool {
let mut state = S::default();
let _ = state.change(
id,
selector,
default,
None,
);
self.map
.insert(id.index(), Resource {
ref_count: ref_count.clone(),
state,
epoch: id.epoch(),
})
.is_none()
}
/// Query the usage of a resource selector.
///
/// Returns `Some(Usage)` only if this usage is consistent
/// across the given selector.
pub fn query(
&mut self,
id: S::Id,
selector: S::Selector,
) -> Option<S::Usage> {
let res = self.map.get(&id.index())?;
assert_eq!(res.epoch, id.epoch());
res.state.query(selector)
}
/// Make sure that a resource is tracked, and return a mutable
/// reference to it.
fn get_or_insert<'a>(
map: &'a mut FastHashMap<Index, Resource<S>>,
id: S::Id,
ref_count: &RefCount,
) -> &'a mut Resource<S> {
match map.entry(id.index()) {
Entry::Vacant(e) => {
e.insert(Resource {
ref_count: ref_count.clone(),
state: S::default(),
epoch: id.epoch(),
})
}
Entry::Occupied(e) => {
assert_eq!(e.get().epoch, id.epoch());
e.into_mut()
}
}
}
/// Extend the usage of a specified resource.
///
/// Returns conflicting transition as an error.
pub fn change_extend(
&mut self,
id: S::Id,
ref_count: &RefCount,
selector: S::Selector,
usage: S::Usage,
) -> Result<(), PendingTransition<S>> {
Self::get_or_insert(&mut self.map, id, ref_count)
.state.change(id, selector, usage, None)
}
/// Replace the usage of a specified resource.
pub fn change_replace(
&mut self,
id: S::Id,
ref_count: &RefCount,
selector: S::Selector,
usage: S::Usage,
) -> Drain<PendingTransition<S>> {
let res = Self::get_or_insert(&mut self.map, id, ref_count);
res.state.change(id, selector, usage, Some(&mut self.temp))
.ok(); //TODO: unwrap?
self.temp.drain(..)
}
/// Merge another tracker into `self` by extending the current states
/// without any transitions.
pub fn merge_extend(
&mut self, other: &Self,
) -> Result<(), PendingTransition<S>> {
for (&index, new) in other.map.iter() {
match self.map.entry(index) {
Entry::Vacant(e) => {
e.insert(new.clone());
}
Entry::Occupied(e) => {
assert_eq!(e.get().epoch, new.epoch);
let id = S::Id::new(index, new.epoch);
e.into_mut().state.merge(id, &new.state, Stitch::Last, None)?;
}
}
}
Ok(())
}
/// Merge another tracker, adding it's transitions to `self`.
/// Transitions the current usage to the new one.
pub fn merge_replace<'a>(
&'a mut self,
other: &'a Self,
stitch: Stitch,
) -> Drain<PendingTransition<S>> {
for (&index, new) in other.map.iter() {
match self.map.entry(index) {
Entry::Vacant(e) => {
e.insert(new.clone());
}
Entry::Occupied(e) => {
assert_eq!(e.get().epoch, new.epoch);
let id = S::Id::new(index, new.epoch);
e.into_mut().state
.merge(id, &new.state, stitch, Some(&mut self.temp))
.ok(); //TODO: unwrap?
}
}
}
self.temp.drain(..)
}
/// Use a given resource provided by an `Id` with the specified usage.
/// Combines storage access by 'Id' with the transition that extends
/// the last read-only usage, if possible.
///
/// Returns the old usage as an error if there is a conflict.
pub fn use_extend<'a, T: 'a + Borrow<RefCount>>(
&mut self,
storage: &'a Storage<T, S::Id>,
id: S::Id,
selector: S::Selector,
usage: S::Usage,
) -> Result<&'a T, S::Usage> {
let item = &storage[id];
self.change_extend(id, item.borrow(), selector, usage)
.map(|()| item)
.map_err(|pending| pending.usage.start)
}
/// Use a given resource provided by an `Id` with the specified usage.
/// Combines storage access by 'Id' with the transition that replaces
/// the last usage with a new one, returning an iterator over these
/// transitions.
pub fn use_replace<'a, T: 'a + Borrow<RefCount>>(
&mut self,
storage: &'a Storage<T, S::Id>,
id: S::Id,
selector: S::Selector,
usage: S::Usage,
) -> (&'a T, Drain<PendingTransition<S>>) {
let item = &storage[id];
let drain = self.change_replace(id, item.borrow(), selector, usage);
(item, drain)
}
}
impl<I: Copy + Debug + TypedId> ResourceState for PhantomData<I> {
type Id = I;
type Selector = ();
type Usage = ();
fn query(
&self,
_selector: Self::Selector,
) -> Option<Self::Usage> {
Some(())
}
fn change(
&mut self,
_id: Self::Id,
_selector: Self::Selector,
_usage: Self::Usage,
_output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {
Ok(())
}
fn merge(
&mut self,
_id: Self::Id,
_other: &Self,
_stitch: Stitch,
_output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {
Ok(())
}
fn optimize(&mut self) {
}
}
/// A set of trackers for all relevant resources.
#[cfg_attr(debug_assertions, derive(Debug))]
pub struct TrackerSet {
pub buffers: ResourceTracker<BufferState>,
pub textures: ResourceTracker<TextureState>,
pub views: ResourceTracker<PhantomData<TextureViewId>>,
pub bind_groups: ResourceTracker<PhantomData<BindGroupId>>,
//TODO: samplers
}
impl TrackerSet {
/// Create an empty set.
pub fn new() -> Self {
TrackerSet {
buffers: ResourceTracker::new(),
textures: ResourceTracker::new(),
views: ResourceTracker::new(),
bind_groups: ResourceTracker::new(),
}
}
/// Clear all the trackers.
pub fn clear(&mut self) {
self.buffers.clear();
self.textures.clear();
self.views.clear();
self.bind_groups.clear();
}
/// Try to optimize the tracking representation.
pub fn optimize(&mut self) {
self.buffers.optimize();
self.textures.optimize();
self.views.optimize();
self.bind_groups.optimize();
}
/// Merge all the trackers of another instance by extending
/// the usage. Panics on a conflict.
pub fn merge_extend(&mut self, other: &Self) {
self.buffers.merge_extend(&other.buffers).unwrap();
self.textures.merge_extend(&other.textures).unwrap();
self.views.merge_extend(&other.views).unwrap();
self.bind_groups.merge_extend(&other.bind_groups).unwrap();
}
}

View File

@@ -0,0 +1,479 @@
use std::{
cmp::Ordering,
fmt::Debug,
iter::Peekable,
ops::Range,
slice::Iter,
};
/// Structure that keeps track of a I -> T mapping,
/// optimized for a case where keys of the same values
/// are often grouped together linearly.
#[derive(Clone, Debug)]
pub struct RangedStates<I, T> {
/// List of ranges, each associated with a singe value.
/// Ranges of keys have to be non-intersecting and ordered.
ranges: Vec<(Range<I>, T)>,
}
impl<I, T> Default for RangedStates<I, T> {
fn default() -> Self {
RangedStates {
ranges: Vec::new(),
}
}
}
impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
/// Construct a new instance from a slice of ranges.
#[cfg(test)]
pub fn new(values: &[(Range<I>, T)]) -> Self {
RangedStates {
ranges: values.to_vec(),
}
}
/// Clear all the ranges.
pub fn clear(&mut self) {
self.ranges.clear();
}
/// Append a range.
///
/// Assumes that the object is being constructed from a set of
/// ranges, and they are given in the ascending order of their keys.
pub fn append(&mut self, index: Range<I>, value: T) {
if let Some(last) = self.ranges.last() {
debug_assert!(last.0.end <= index.start);
}
self.ranges.push((index, value));
}
/// Check that all the ranges are non-intersecting and ordered.
/// Panics otherwise.
#[cfg(test)]
fn check_sanity(&self) {
for a in self.ranges.iter() {
assert!(a.0.start < a.0.end);
}
for (a, b) in self.ranges.iter().zip(self.ranges[1..].iter()) {
assert!(a.0.end <= b.0.start);
}
}
/// Merge the neighboring ranges together, where possible.
pub fn coalesce(&mut self) {
let mut num_removed = 0;
let mut iter = self.ranges.iter_mut();
let mut cur = match iter.next() {
Some(elem) => elem,
None => return,
};
while let Some(next) = iter.next() {
if cur.0.end == next.0.start && cur.1 == next.1 {
num_removed += 1;
cur.0.end = next.0.end;
next.0.end = next.0.start;
} else {
cur = next;
}
}
if num_removed != 0 {
self.ranges.retain(|pair| pair.0.start != pair.0.end);
}
}
/// Check if all intersecting ranges have the same value, which is returned.
///
/// Returns `None` if no intersections are detected.
/// Returns `Some(Err)` if the intersected values are inconsistent.
pub fn query<U: PartialEq>(
&self, index: &Range<I>, fun: impl Fn(&T) -> U
) -> Option<Result<U, ()>> {
let mut result = None;
for &(ref range, ref value) in self.ranges.iter() {
if range.end > index.start && range.start < index.end {
let old = result.replace(fun(value));
if old.is_some() && old != result {
return Some(Err(()))
}
}
}
result.map(Ok)
}
/// Split the storage ranges in such a way that there is a linear subset of
/// them occupying exactly `index` range, which is returned mutably.
///
/// Gaps in the ranges are filled with `default` value.
pub fn isolate(&mut self, index: &Range<I>, default: T) -> &mut [(Range<I>, T)] {
//TODO: implement this in 2 passes:
// 1. scan the ranges to figure out how many extra ones need to be inserted
// 2. go through the ranges by moving them them to the right and inserting the missing ones
let mut start_pos = match self.ranges
.iter()
.position(|pair| pair.0.end > index.start)
{
Some(pos) => pos,
None => {
let pos = self.ranges.len();
self.ranges.push((index.clone(), default));
return &mut self.ranges[pos ..];
}
};
{
let (range, value) = self.ranges[start_pos].clone();
if range.start < index.start {
self.ranges[start_pos].0.start = index.start;
self.ranges.insert(start_pos, (range.start .. index.start, value));
start_pos += 1;
}
}
let mut pos = start_pos;
let mut range_pos = index.start;
loop {
let (range, value) = self.ranges[pos].clone();
if range.start >= index.end {
self.ranges.insert(pos, (range_pos .. index.end, default));
pos += 1;
break;
}
if range.start > range_pos {
self.ranges.insert(pos, (range_pos .. range.start, default));
pos += 1;
range_pos = range.start;
}
if range.end >= index.end {
if range.end != index.end {
self.ranges[pos].0.start = index.end;
self.ranges.insert(pos, (range_pos .. index.end, value));
}
pos += 1;
break;
}
pos += 1;
range_pos = range.end;
if pos == self.ranges.len() {
self.ranges.push((range_pos .. index.end, default));
pos += 1;
break;
}
}
&mut self.ranges[start_pos .. pos]
}
/// Helper method for isolation that checks the sanity of the results.
#[cfg(test)]
pub fn sanely_isolated(&self, index: Range<I>, default: T) -> Vec<(Range<I>, T)> {
let mut clone = self.clone();
let result = clone.isolate(&index, default).to_vec();
clone.check_sanity();
result
}
/// Produce an iterator that merges two instances together.
///
/// Each range in the returned iterator is a subset of a range in either
/// `self` or `other`, and the value returned as a `Range` from `self` to `other`.
pub fn merge<'a>(&'a self, other: &'a Self, base: I) -> Merge<'a, I, T> {
Merge {
base,
sa: self.ranges.iter().peekable(),
sb: other.ranges.iter().peekable(),
}
}
}
/// A custom iterator that goes through two `RangedStates` and process a merge.
pub struct Merge<'a, I, T> {
base: I,
sa: Peekable<Iter<'a, (Range<I>, T)>>,
sb: Peekable<Iter<'a, (Range<I>, T)>>,
}
impl<'a, I: Copy + Debug + Ord, T: Copy + Debug> Iterator for Merge<'a, I, T> {
type Item = (Range<I>, Range<Option<T>>);
fn next(&mut self) -> Option<Self::Item> {
match (self.sa.peek(), self.sb.peek()) {
// we have both streams
(Some(&(ref ra, va)), Some(&(ref rb, vb))) => {
let (range, usage) = if ra.start < self.base { // in the middle of the left stream
if self.base == rb.start { // right stream is starting
debug_assert!(self.base < ra.end);
(self.base .. ra.end.min(rb.end), Some(*va) .. Some(*vb))
} else { // right hasn't started yet
debug_assert!(self.base < rb.start);
(self.base .. rb.start, Some(*va) .. None)
}
} else if rb.start < self.base { // in the middle of the right stream
if self.base == ra.start { // left stream is starting
debug_assert!(self.base < rb.end);
(self.base .. ra.end.min(rb.end), Some(*va) .. Some(*vb))
} else { // left hasn't started yet
debug_assert!(self.base < ra.start);
(self.base .. ra.start, None .. Some(*vb))
}
} else { // no active streams
match ra.start.cmp(&rb.start) {
// both are starting
Ordering::Equal => (ra.start .. ra.end.min(rb.end), Some(*va) .. Some(*vb)),
// only left is starting
Ordering::Less => (ra.start .. rb.start.min(ra.end), Some(*va) .. None),
// only right is starting
Ordering::Greater => (rb.start .. ra.start.min(rb.end), None .. Some(*vb)),
}
};
self.base = range.end;
if ra.end == range.end {
let _ = self.sa.next();
}
if rb.end == range.end {
let _ = self.sb.next();
}
Some((range, usage))
}
// only right stream
(None, Some(&(ref rb, vb))) => {
let range = self.base.max(rb.start) .. rb.end;
self.base = rb.end;
let _ = self.sb.next();
Some((range, None .. Some(*vb)))
}
// only left stream
(Some(&(ref ra, va)), None) => {
let range = self.base.max(ra.start) .. ra.end;
self.base = ra.end;
let _ = self.sa.next();
Some((range, Some(*va) .. None))
}
// done
(None, None) => None,
}
}
}
#[cfg(test)]
mod test {
//TODO: randomized/fuzzy testing
use super::RangedStates;
use std::{ fmt::Debug, ops::Range };
fn easy_merge<T: PartialEq + Copy + Debug>(
ra: Vec<(Range<usize>, T)>, rb: Vec<(Range<usize>, T)>
) -> Vec<(Range<usize>, Range<Option<T>>)> {
RangedStates { ranges: ra }.merge(&RangedStates { ranges: rb }, 0).collect()
}
#[test]
fn sane_good() {
let rs = RangedStates { ranges: vec![
(1..4, 9u8),
(4..5, 9),
]};
rs.check_sanity();
}
#[test]
#[should_panic]
fn sane_empty() {
let rs = RangedStates { ranges: vec![
(1..4, 9u8),
(5..5, 9),
]};
rs.check_sanity();
}
#[test]
#[should_panic]
fn sane_intersect() {
let rs = RangedStates { ranges: vec![
(1..4, 9u8),
(3..5, 9),
]};
rs.check_sanity();
}
#[test]
fn coalesce() {
let mut rs = RangedStates { ranges: vec![
(1..4, 9u8),
(4..5, 9),
(5..7, 1),
(8..9, 1),
]};
rs.coalesce();
rs.check_sanity();
assert_eq!(rs.ranges, vec![
(1..5, 9),
(5..7, 1),
(8..9, 1),
]);
}
#[test]
fn query() {
let rs = RangedStates { ranges: vec![
(1..4, 1u8),
(5..7, 2),
]};
assert_eq!(rs.query(&(0..1), |v| *v), None);
assert_eq!(rs.query(&(1..3), |v| *v), Some(Ok(1)));
assert_eq!(rs.query(&(1..6), |v| *v), Some(Err(())));
}
#[test]
fn isolate() {
let rs = RangedStates { ranges: vec![
(1..4, 9u8),
(4..5, 9),
(5..7, 1),
(8..9, 1),
]};
assert_eq!(&rs.sanely_isolated(4..5, 0), &[
(4..5, 9u8),
]);
assert_eq!(&rs.sanely_isolated(0..6, 0), &[
(0..1, 0),
(1..4, 9u8),
(4..5, 9),
(5..6, 1),
]);
assert_eq!(&rs.sanely_isolated(8..10, 1), &[
(8..9, 1),
(9..10, 1),
]);
assert_eq!(&rs.sanely_isolated(6..9, 0), &[
(6..7, 1),
(7..8, 0),
(8..9, 1),
]);
}
#[test]
fn merge_same() {
assert_eq!(
easy_merge(
vec![
(1..4, 0u8),
],
vec![
(1..4, 2u8),
],
),
vec![
(1..4, Some(0)..Some(2)),
]
);
}
#[test]
fn merge_empty() {
assert_eq!(
easy_merge(
vec![
(1..2, 0u8),
],
vec![
],
),
vec![
(1..2, Some(0)..None),
]
);
assert_eq!(
easy_merge(
vec![
],
vec![
(3..4, 1u8),
],
),
vec![
(3..4, None..Some(1)),
]
);
}
#[test]
fn merge_separate() {
assert_eq!(
easy_merge(
vec![
(1..2, 0u8),
(5..6, 1u8),
],
vec![
(2..4, 2u8),
],
),
vec![
(1..2, Some(0)..None),
(2..4, None..Some(2)),
(5..6, Some(1)..None),
]
);
}
#[test]
fn merge_subset() {
assert_eq!(
easy_merge(
vec![
(1..6, 0u8),
],
vec![
(2..4, 2u8),
],
),
vec![
(1..2, Some(0)..None),
(2..4, Some(0)..Some(2)),
(4..6, Some(0)..None),
]
);
assert_eq!(
easy_merge(
vec![
(2..4, 0u8),
],
vec![
(1..4, 2u8),
],
),
vec![
(1..2, None..Some(2)),
(2..4, Some(0)..Some(2)),
]
);
}
#[test]
fn merge_all() {
assert_eq!(
easy_merge(
vec![
(1..4, 0u8),
(5..8, 1u8),
],
vec![
(2..6, 2u8),
(7..9, 3u8),
],
),
vec![
(1..2, Some(0)..None),
(2..4, Some(0)..Some(2)),
(4..5, None..Some(2)),
(5..6, Some(1)..Some(2)),
(6..7, Some(1)..None),
(7..8, Some(1)..Some(3)),
(8..9, None..Some(3)),
]
);
}
}

View File

@@ -0,0 +1,280 @@
use crate::{
conv,
device::MAX_MIP_LEVELS,
resource::TextureUsage,
TextureId,
};
use super::{range::RangedStates, PendingTransition, ResourceState, Stitch, Unit};
use arrayvec::ArrayVec;
use std::ops::Range;
type PlaneStates = RangedStates<hal::image::Layer, Unit<TextureUsage>>;
//TODO: store `hal::image::State` here to avoid extra conversions
#[derive(Clone, Debug, Default)]
struct MipState {
color: PlaneStates,
depth: PlaneStates,
stencil: PlaneStates,
}
#[derive(Clone, Debug, Default)]
pub struct TextureState {
mips: ArrayVec<[MipState; MAX_MIP_LEVELS]>,
}
impl PendingTransition<TextureState> {
/// Produce the gfx-hal image states corresponding to the transition.
pub fn to_states(&self) -> Range<hal::image::State> {
conv::map_texture_state(self.usage.start, self.selector.aspects) ..
conv::map_texture_state(self.usage.end, self.selector.aspects)
}
//TODO: make this less awkward!
/// Check for the validity of `self` with regards to the presence of `output`.
///
/// Return the end usage if the `output` is provided and pushes self to it.
/// Otherwise, return the extended usage, or an error if extension is impossible.
///
/// When a transition is generated, returns the specified `replace` usage.
fn record(
self, output: Option<&mut &mut Vec<Self>>, replace: TextureUsage
) -> Result<TextureUsage, Self> {
let u = self.usage.clone();
match output {
Some(out) => {
out.push(self);
Ok(replace)
}
None => {
if !u.start.is_empty() && TextureUsage::WRITE_ALL.intersects(u.start | u.end) {
Err(self)
} else {
Ok(u.start | u.end)
}
}
}
}
}
impl ResourceState for TextureState {
type Id = TextureId;
type Selector = hal::image::SubresourceRange;
type Usage = TextureUsage;
fn query(
&self,
selector: Self::Selector,
) -> Option<Self::Usage> {
let mut result = None;
let num_levels = self.mips.len();
let mip_start = num_levels.min(selector.levels.start as usize);
let mip_end = num_levels.min(selector.levels.end as usize);
for mip in self.mips[mip_start .. mip_end].iter() {
for &(aspect, plane_states) in &[
(hal::format::Aspects::COLOR, &mip.color),
(hal::format::Aspects::DEPTH, &mip.depth),
(hal::format::Aspects::STENCIL, &mip.stencil),
] {
if !selector.aspects.contains(aspect) {
continue
}
match plane_states.query(&selector.layers, |unit| unit.last) {
None => {}
Some(Ok(usage)) if result == Some(usage) => {}
Some(Ok(usage)) if result.is_none() => {
result = Some(usage);
}
Some(Ok(_)) |
Some(Err(())) => return None,
}
}
}
result
}
fn change(
&mut self,
id: Self::Id,
selector: Self::Selector,
usage: Self::Usage,
mut output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {
while self.mips.len() < selector.levels.end as usize {
self.mips.push(MipState::default());
}
for (mip_id, mip) in self
.mips[selector.levels.start as usize .. selector.levels.end as usize]
.iter_mut()
.enumerate()
{
let level = selector.levels.start + mip_id as hal::image::Level;
for &mut (aspect, ref mut plane_states) in &mut [
(hal::format::Aspects::COLOR, &mut mip.color),
(hal::format::Aspects::DEPTH, &mut mip.depth),
(hal::format::Aspects::STENCIL, &mut mip.stencil),
] {
if !selector.aspects.contains(aspect) {
continue
}
let layers = plane_states.isolate(&selector.layers, Unit::new(usage));
for &mut (ref range, ref mut unit) in layers {
if unit.last == usage {
continue
}
let pending = PendingTransition {
id,
selector: hal::image::SubresourceRange {
aspects: hal::format::Aspects::COLOR,
levels: level .. level + 1,
layers: range.clone(),
},
usage: unit.last .. usage,
};
unit.last = pending.record(output.as_mut(), usage)?;
}
}
}
Ok(())
}
fn merge(
&mut self,
id: Self::Id,
other: &Self,
stitch: Stitch,
mut output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {
let mut temp = Vec::new();
while self.mips.len() < other.mips.len() as usize {
self.mips.push(MipState::default());
}
for (mip_id, (mip_self, mip_other)) in self.mips
.iter_mut()
.zip(&other.mips)
.enumerate()
{
let level = mip_id as hal::image::Level;
for &mut (aspects, ref mut planes_self, planes_other) in &mut [
(hal::format::Aspects::COLOR, &mut mip_self.color, &mip_other.color),
(hal::format::Aspects::DEPTH, &mut mip_self.depth, &mip_other.depth),
(hal::format::Aspects::STENCIL, &mut mip_self.stencil, &mip_other.stencil),
] {
temp.extend(planes_self.merge(planes_other, 0));
planes_self.clear();
for (layers, states) in temp.drain(..) {
let unit = match states {
Range { start: None, end: None } => unreachable!(),
Range { start: Some(start), end: None } => start,
Range { start: None, end: Some(end) } => Unit::new(end.select(stitch)),
Range { start: Some(start), end: Some(end) } => {
let mut final_usage = end.select(stitch);
if start.last != final_usage {
let pending = PendingTransition {
id,
selector: hal::image::SubresourceRange {
aspects,
levels: level .. level + 1,
layers: layers.clone(),
},
usage: start.last .. final_usage,
};
final_usage = pending.record(output.as_mut(), end.last)?;
}
Unit {
init: start.init,
last: final_usage,
}
}
};
planes_self.append(layers, unit);
}
}
}
Ok(())
}
fn optimize(&mut self) {
for mip in self.mips.iter_mut() {
mip.color.coalesce();
mip.depth.coalesce();
mip.stencil.coalesce();
}
}
}
#[cfg(test)]
mod test {
//TODO: change() and merge() tests
//use crate::TypedId;
use super::*;
use hal::{
format::Aspects,
image::SubresourceRange,
};
#[test]
fn query() {
let mut ts = TextureState::default();
ts.mips.push(MipState::default());
ts.mips.push(MipState::default());
ts.mips[1].color = PlaneStates::new(&[
(1..3, Unit::new(TextureUsage::SAMPLED)),
(3..5, Unit::new(TextureUsage::SAMPLED)),
(5..6, Unit::new(TextureUsage::STORAGE)),
]);
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
levels: 1..2,
layers: 2..5,
}),
// level 1 matches
Some(TextureUsage::SAMPLED),
);
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::DEPTH,
levels: 1..2,
layers: 2..5,
}),
// no depth found
None,
);
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
levels: 0..2,
layers: 2..5,
}),
// level 0 is empty, level 1 matches
Some(TextureUsage::SAMPLED),
);
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
levels: 1..2,
layers: 1..5,
}),
// level 1 matches with gaps
Some(TextureUsage::SAMPLED),
);
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
levels: 1..2,
layers: 4..6,
}),
// level 1 doesn't match
None,
);
}
}