356: Add generic ranges to buffer mapping api and make it safe. r=kvark a=lachlansneff

[Rendered](https://charted.space/notes/wgpu-rs/wgpu/struct.Buffer.html)

The safety issues with the current api (being able to unmap while still holding a slice to mapped data) are fixed by having `get_mapped_range` and `get_mapped_range_mut` return `BufferView` and `BufferViewMut`, which notify the buffer that those ranges are no longer being used when they're dropped. `Buffer.unmap` asserts that the list of mapped ranges is empty, therefore it is safe.

Co-authored-by: Lachlan Sneff <lachlan.sneff@gmail.com>
This commit is contained in:
bors[bot]
2020-06-10 17:46:36 +00:00
committed by GitHub
3 changed files with 167 additions and 35 deletions

View File

@@ -118,7 +118,8 @@ async fn run() {
queue.submit(Some(command_buffer));
// Note that we're not calling `.await` here.
let buffer_future = output_buffer.map_async(wgpu::MapMode::Read, 0, wgt::BufferSize::WHOLE);
let buffer_slice = output_buffer.slice(..);
let buffer_future = buffer_slice.map_async(wgpu::MapMode::Read);
// Poll the device in a blocking manner so that our future resolves.
// In an actual application, `device.poll(...)` should
@@ -132,7 +133,7 @@ async fn run() {
}
if let Ok(()) = buffer_future.await {
let padded_buffer = output_buffer.get_mapped_range(0, wgt::BufferSize::WHOLE);
let padded_buffer = buffer_slice.get_mapped_range();
let mut png_encoder = png::Encoder::new(
File::create("red.png").unwrap(),

View File

@@ -109,7 +109,8 @@ async fn execute_gpu(numbers: Vec<u32>) -> Vec<u32> {
queue.submit(Some(encoder.finish()));
// Note that we're not calling `.await` here.
let buffer_future = staging_buffer.map_async(wgpu::MapMode::Read, 0, wgt::BufferSize::WHOLE);
let buffer_slice = staging_buffer.slice(..);
let buffer_future = buffer_slice.map_async(wgpu::MapMode::Read);
// Poll the device in a blocking manner so that our future resolves.
// In an actual application, `device.poll(...)` should
@@ -117,7 +118,7 @@ async fn execute_gpu(numbers: Vec<u32>) -> Vec<u32> {
device.poll(wgpu::Maintain::Wait);
if let Ok(()) = buffer_future.await {
let data = staging_buffer.get_mapped_range(0, wgt::BufferSize::WHOLE);
let data = buffer_slice.get_mapped_range();
let result = data
.chunks_exact(4)
.map(|b| u32::from_ne_bytes(b.try_into().unwrap()))

View File

@@ -375,7 +375,11 @@ impl MapContext {
fn reset(&mut self) {
self.initial_range = 0..0;
self.sub_ranges.clear();
assert!(
self.sub_ranges.is_empty(),
"You cannot unmap a buffer that still has accessible mapped views"
);
}
fn add(&mut self, offset: BufferAddress, size: BufferSize) -> BufferAddress {
@@ -395,6 +399,22 @@ impl MapContext {
self.sub_ranges.push(offset..end);
end
}
fn remove(&mut self, offset: BufferAddress, size: BufferSize) {
let end = if size == BufferSize::WHOLE {
self.initial_range.end
} else {
offset + size.0
};
// Switch this out with `Vec::remove_item` once that stabilizes.
let index = self
.sub_ranges
.iter()
.position(|r| *r == (offset..end))
.expect("unable to remove range from map context");
self.sub_ranges.swap_remove(index);
}
}
/// A handle to a GPU-accessible buffer.
@@ -402,9 +422,11 @@ pub struct Buffer {
context: Arc<C>,
id: <C as Context>::BufferId,
map_context: Mutex<MapContext>,
usage: BufferUsage,
}
/// A description of what portion of a buffer to use
#[derive(Copy, Clone)]
pub struct BufferSlice<'a> {
buffer: &'a Buffer,
offset: BufferAddress,
@@ -1123,6 +1145,7 @@ impl Device {
context: Arc::clone(&self.context),
id: Context::device_create_buffer(&*self.context, &self.id, desc),
map_context: Mutex::new(map_context),
usage: desc.usage,
}
}
@@ -1190,20 +1213,84 @@ pub enum MapMode {
Write,
}
fn range_to_offset_size<S: RangeBounds<BufferAddress>>(bounds: S) -> (BufferAddress, BufferSize) {
let offset = match bounds.start_bound() {
Bound::Included(&bound) => bound,
Bound::Excluded(&bound) => bound + 1,
Bound::Unbounded => 0,
};
let size = match bounds.end_bound() {
Bound::Included(&bound) => BufferSize(bound + 1 - offset),
Bound::Excluded(&bound) => BufferSize(bound - offset),
Bound::Unbounded => BufferSize::WHOLE,
};
(offset, size)
}
pub struct BufferView<'a> {
slice: BufferSlice<'a>,
data: &'a [u8],
}
pub struct BufferViewMut<'a> {
slice: BufferSlice<'a>,
data: &'a mut [u8],
readable: bool,
}
impl std::ops::Deref for BufferView<'_> {
type Target = [u8];
fn deref(&self) -> &[u8] {
self.data
}
}
impl std::ops::Deref for BufferViewMut<'_> {
type Target = [u8];
fn deref(&self) -> &[u8] {
assert!(
self.readable,
"Attempting to read a write-only mapping for buffer {:?}",
self.slice.buffer.id
);
self.data
}
}
impl std::ops::DerefMut for BufferViewMut<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.data
}
}
impl Drop for BufferView<'_> {
fn drop(&mut self) {
self.slice
.buffer
.map_context
.lock()
.remove(self.slice.offset, self.slice.size);
}
}
impl Drop for BufferViewMut<'_> {
fn drop(&mut self) {
self.slice
.buffer
.map_context
.lock()
.remove(self.slice.offset, self.slice.size);
}
}
impl Buffer {
/// Use only a portion of this Buffer for a given operation. Choosing a range with 0 size will
/// return a slice that extends to the end of the buffer.
pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice {
let offset = match bounds.start_bound() {
Bound::Included(&bound) => bound,
Bound::Excluded(&bound) => bound + 1,
Bound::Unbounded => 0,
};
let size = match bounds.end_bound() {
Bound::Included(&bound) => BufferSize(bound + 1 - offset),
Bound::Excluded(&bound) => BufferSize(bound - offset),
Bound::Unbounded => BufferSize::WHOLE,
};
let (offset, size) = range_to_offset_size(bounds);
BufferSlice {
buffer: self,
offset,
@@ -1211,6 +1298,38 @@ impl Buffer {
}
}
/// Flushes any pending write operations and unmaps the buffer from host memory.
pub fn unmap(&self) {
self.map_context.lock().reset();
Context::buffer_unmap(&*self.context, &self.id);
}
}
impl BufferSlice<'_> {
pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> Self {
let (sub_offset, sub_size) = range_to_offset_size(bounds);
let new_offset = self.offset + sub_offset;
let new_size = if sub_size == BufferSize::WHOLE {
BufferSize(
self.size
.0
.checked_sub(sub_offset)
.expect("underflow when slicing `BufferSlice`"),
)
} else {
assert!(
new_offset + sub_size.0 <= self.offset + self.size.0,
"offset and size must stay within the bounds of the parent slice"
);
sub_size
};
Self {
buffer: self.buffer,
offset: new_offset,
size: new_size,
}
}
/// Map the buffer. Buffer is ready to map once the future is resolved.
///
/// For the future to complete, `device.poll(...)` must be called elsewhere in the runtime, possibly integrated
@@ -1222,42 +1341,53 @@ impl Buffer {
pub fn map_async(
&self,
mode: MapMode,
offset: BufferAddress,
size: BufferSize,
) -> impl Future<Output = Result<(), BufferAsyncError>> + Send {
let end = {
let mut mc = self.map_context.lock();
let mut mc = self.buffer.map_context.lock();
assert_eq!(
mc.initial_range,
0..0,
"Buffer {:?} is already mapped",
self.id
self.buffer.id
);
let end = if size == BufferSize::WHOLE {
let end = if self.size == BufferSize::WHOLE {
mc.total_size
} else {
offset + size.0
self.offset + self.size.0
};
mc.initial_range = offset..end;
mc.initial_range = self.offset..end;
end
};
Context::buffer_map_async(&*self.context, &self.id, mode, offset..end)
Context::buffer_map_async(
&*self.buffer.context,
&self.buffer.id,
mode,
self.offset..end,
)
}
pub fn get_mapped_range(&self, offset: BufferAddress, size: BufferSize) -> &[u8] {
let end = self.map_context.lock().add(offset, size);
Context::buffer_get_mapped_range(&*self.context, &self.id, offset..end)
pub fn get_mapped_range(&self) -> BufferView {
let end = self.buffer.map_context.lock().add(self.offset, self.size);
let data = Context::buffer_get_mapped_range(
&*self.buffer.context,
&self.buffer.id,
self.offset..end,
);
BufferView { slice: *self, data }
}
pub fn get_mapped_range_mut(&self, offset: BufferAddress, size: BufferSize) -> &mut [u8] {
let end = self.map_context.lock().add(offset, size);
Context::buffer_get_mapped_range_mut(&*self.context, &self.id, offset..end)
}
/// Flushes any pending write operations and unmaps the buffer from host memory.
pub fn unmap(&self) {
self.map_context.lock().reset();
Context::buffer_unmap(&*self.context, &self.id);
pub fn get_mapped_range_mut(&self) -> BufferViewMut {
let end = self.buffer.map_context.lock().add(self.offset, self.size);
let data = Context::buffer_get_mapped_range_mut(
&*self.buffer.context,
&self.buffer.id,
self.offset..end,
);
BufferViewMut {
slice: *self,
data,
readable: self.buffer.usage.contains(BufferUsage::MAP_READ),
}
}
}