281: The Context trait r=grovesNL a=kvark

The main motivation here is to avoid blocking the wgpu-core updates by `wgpu-native`. Instead, `wgpu-native` becomes a branch, and the dependency of `wgpu-rs` -> `wgpu-native` starts adhering to the contract/API of the standard webgpu-native headers.

The biggest change is the introduction of the Context trait. I recall us discussing 2 downsides to having this trait:
  1. inconvenient for the users to include. This is a non-issue here, since it's private.
  2. more code to maintain. This is less of an issue if we aim to have 3 backends.

What this gives in return is a well established contract with the backends. Unlike gfx-rs, the backend code is right here, a part of the crate, so the contract is only for internal use.

Fixes #156 : the "direct" implementation of it goes straight to wgpu-core. What this gives us is less overhead for command recording, since there is no longer an extra indirection on every command, and no heap allocation at the end of a render pass.

The downside of this PR is one extra `Arc` (with addref) per object.

This commit also has small improvements:
- consuming command buffers on submit (Fixes #267)
- Instance type
- proper call to destructors
- fallible `request_device`

Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
This commit is contained in:
bors[bot]
2020-04-27 04:12:38 +00:00
committed by GitHub
12 changed files with 2597 additions and 1999 deletions

View File

@@ -21,28 +21,23 @@ exclude = ["etc/**/*", "examples/**/*", "tests/**/*", "Cargo.lock", "target/**/*
[features]
default = []
# Make Vulkan backend available on platforms where it is by default not, e.g. macOS
vulkan = ["wgn/vulkan-portability"]
[target.'cfg(not(target_arch = "wasm32"))'.dependencies.wgn]
package = "wgpu-native"
version = "0.5"
git = "https://github.com/gfx-rs/wgpu"
rev = "49dbe08f37f8396cff0d6672667a48116ec487f5"
vulkan = ["wgc/gfx-backend-vulkan"]
[target.'cfg(not(target_arch = "wasm32"))'.dependencies.wgc]
package = "wgpu-core"
version = "0.5"
git = "https://github.com/gfx-rs/wgpu"
rev = "49dbe08f37f8396cff0d6672667a48116ec487f5"
rev = "5c172dd4756aa152b4f3350e624d7b1b5d24ddda"
[dependencies.wgt]
package = "wgpu-types"
version = "0.5"
git = "https://github.com/gfx-rs/wgpu"
rev = "49dbe08f37f8396cff0d6672667a48116ec487f5"
rev = "5c172dd4756aa152b4f3350e624d7b1b5d24ddda"
[dependencies]
arrayvec = "0.5"
futures = "0.3"
smallvec = "1"
raw-window-handle = "0.3"
parking_lot = "0.10"
@@ -54,7 +49,6 @@ png = "0.15"
winit = { version = "0.22.1", features = ["web-sys"] }
rand = { version = "0.7.2", features = ["wasm-bindgen"] }
bytemuck = "1"
futures = "0.3"
[[example]]
name="hello-compute"
@@ -64,7 +58,6 @@ test = true
[patch.crates-io]
#wgpu-types = { version = "0.5.0", path = "../wgpu/wgpu-types" }
#wgpu-core = { version = "0.5.0", path = "../wgpu/wgpu-core" }
#wgpu-native = { version = "0.5.0", path = "../wgpu/wgpu-native" }
#gfx-hal = { version = "0.5.0", path = "../gfx/src/hal" }
#gfx-backend-empty = { version = "0.5.0", path = "../gfx/src/backend/empty" }
#gfx-backend-vulkan = { version = "0.5.0", path = "../gfx/src/backend/vulkan" }
@@ -78,6 +71,9 @@ wasm-bindgen-futures = { git = "https://github.com/rustwasm/wasm-bindgen" }
web-sys = { git = "https://github.com/rustwasm/wasm-bindgen" }
js-sys = { git = "https://github.com/rustwasm/wasm-bindgen" }
[target.'cfg(target_os = "macos")'.dependencies]
objc = "0.2.7"
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
env_logger = "0.7"

View File

@@ -8,7 +8,7 @@
wgpu-rs is an idiomatic Rust wrapper over [wgpu-core](https://github.com/gfx-rs/wgpu). It's designed to be suitable for general purpose graphics and computation needs of Rust community.
Currently wgpu-rs works on native platforms, but [WASM support is currently being added](https://github.com/gfx-rs/wgpu-rs/issues/101) as well.
wgpu-rs can target both the natively supported backends and WASM directly.
## Gallery

View File

@@ -5,15 +5,16 @@ use std::fs::File;
use std::mem::size_of;
async fn run() {
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: None,
},
wgpu::BackendBit::PRIMARY,
)
.await
.unwrap();
let adapter = wgpu::Instance::new()
.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: None,
},
wgpu::BackendBit::PRIMARY,
)
.await
.unwrap();
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor {
@@ -22,7 +23,8 @@ async fn run() {
},
limits: wgpu::Limits::default(),
})
.await;
.await
.unwrap();
// Rendered image is 256×256 with 32-bit RGBA color
let size = 256u32;
@@ -86,7 +88,7 @@ async fn run() {
encoder.finish()
};
queue.submit(&[command_buffer]);
queue.submit(Some(command_buffer));
// Note that we're not calling `.await` here.
let buffer_future = output_buffer.map_read(0, (size * size) as u64 * size_of::<u32>() as u64);

View File

@@ -1,14 +1,15 @@
/// This example shows how to describe the adapter in use.
async fn run() {
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: None,
},
wgpu::BackendBit::PRIMARY,
)
.await
.unwrap();
let adapter = wgpu::Instance::new()
.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: None,
},
wgpu::BackendBit::PRIMARY,
)
.await
.unwrap();
#[cfg(not(target_arch = "wasm32"))]
println!("{:?}", adapter.get_info())

View File

@@ -49,21 +49,23 @@ pub trait Example: 'static + Sized {
async fn run_async<E: Example>(event_loop: EventLoop<()>, window: Window) {
log::info!("Initializing the surface...");
let (size, surface) = {
let instance = wgpu::Instance::new();
let (size, surface) = unsafe {
let size = window.inner_size();
let surface = wgpu::Surface::create(&window);
let surface = instance.create_surface(&window);
(size, surface)
};
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY,
)
.await
.unwrap();
let adapter = instance
.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY,
)
.await
.unwrap();
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor {
@@ -72,7 +74,8 @@ async fn run_async<E: Example>(event_loop: EventLoop<()>, window: Window) {
},
limits: wgpu::Limits::default(),
})
.await;
.await
.unwrap();
let mut sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@@ -90,8 +93,8 @@ async fn run_async<E: Example>(event_loop: EventLoop<()>, window: Window) {
log::info!("Initializing the example...");
let (mut example, init_command_buf) = E::init(&sc_desc, &device);
if let Some(command_buf) = init_command_buf {
queue.submit(&[command_buf]);
if init_command_buf.is_some() {
queue.submit(init_command_buf);
}
log::info!("Entering render loop...");
@@ -112,8 +115,8 @@ async fn run_async<E: Example>(event_loop: EventLoop<()>, window: Window) {
sc_desc.height = size.height;
swap_chain = device.create_swap_chain(&surface, &sc_desc);
let command_buf = example.resize(&sc_desc, &device);
if let Some(command_buf) = command_buf {
queue.submit(&[command_buf]);
if command_buf.is_some() {
queue.submit(command_buf);
}
}
event::Event::WindowEvent { event, .. } => match event {
@@ -138,7 +141,7 @@ async fn run_async<E: Example>(event_loop: EventLoop<()>, window: Window) {
.get_next_texture()
.expect("Timeout when acquiring next swap chain texture");
let command_buf = example.render(&frame, &device);
queue.submit(&[command_buf]);
queue.submit(Some(command_buf));
}
_ => {}
}

View File

@@ -20,15 +20,17 @@ async fn execute_gpu(numbers: Vec<u32>) -> Vec<u32> {
let slice_size = numbers.len() * std::mem::size_of::<u32>();
let size = slice_size as wgpu::BufferAddress;
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: None,
},
wgpu::BackendBit::PRIMARY,
)
.await
.unwrap();
let instace = wgpu::Instance::new();
let adapter = instace
.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: None,
},
wgpu::BackendBit::PRIMARY,
)
.await
.unwrap();
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor {
@@ -37,7 +39,8 @@ async fn execute_gpu(numbers: Vec<u32>) -> Vec<u32> {
},
limits: wgpu::Limits::default(),
})
.await;
.await
.unwrap();
let cs = include_bytes!("shader.comp.spv");
let cs_module =
@@ -103,7 +106,7 @@ async fn execute_gpu(numbers: Vec<u32>) -> Vec<u32> {
}
encoder.copy_buffer_to_buffer(&storage_buffer, 0, &staging_buffer, 0, size);
queue.submit(&[encoder.finish()]);
queue.submit(Some(encoder.finish()));
// Note that we're not calling `.await` here.
let buffer_future = staging_buffer.map_read(0, size);

View File

@@ -6,17 +6,18 @@ use winit::{
async fn run(event_loop: EventLoop<()>, window: Window, swapchain_format: wgpu::TextureFormat) {
let size = window.inner_size();
let surface = wgpu::Surface::create(&window);
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY,
)
.await
.unwrap();
let instance = wgpu::Instance::new();
let surface = unsafe { instance.create_surface(&window) };
let adapter = instance
.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY,
)
.await
.unwrap();
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor {
@@ -25,7 +26,8 @@ async fn run(event_loop: EventLoop<()>, window: Window, swapchain_format: wgpu::
},
limits: wgpu::Limits::default(),
})
.await;
.await
.unwrap();
let vs = include_bytes!("shader.vert.spv");
let vs_module =
@@ -116,7 +118,7 @@ async fn run(event_loop: EventLoop<()>, window: Window, swapchain_format: wgpu::
rpass.draw(0..3, 0..1);
}
queue.submit(&[encoder.finish()]);
queue.submit(Some(encoder.finish()));
}
Event::WindowEvent {
event: WindowEvent::CloseRequested,
@@ -150,10 +152,6 @@ fn main() {
.ok()
})
.expect("couldn't append canvas to document body");
wasm_bindgen_futures::spawn_local(run(
event_loop,
window,
wgpu::TextureFormat::Bgra8Unorm,
));
wasm_bindgen_futures::spawn_local(run(event_loop, window, wgpu::TextureFormat::Bgra8Unorm));
}
}

1011
src/backend/direct.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,13 +1,13 @@
#[cfg(target_arch = "wasm32")]
mod web;
#[cfg(target_arch = "wasm32")]
pub use web::*;
pub(crate) use web::Context;
#[cfg(not(target_arch = "wasm32"))]
mod native;
mod direct;
#[cfg(not(target_arch = "wasm32"))]
pub use native::*;
pub(crate) use direct::Context;
#[cfg(not(target_arch = "wasm32"))]
mod native_gpu_future;

View File

@@ -1,941 +0,0 @@
use wgn;
use crate::{
backend::native_gpu_future, BindGroupDescriptor, BindGroupLayoutDescriptor, BindingResource,
BindingType, BufferDescriptor, CommandEncoderDescriptor, ComputePipelineDescriptor,
PipelineLayoutDescriptor, RenderPipelineDescriptor, SamplerDescriptor, TextureDescriptor,
TextureViewDescriptor, TextureViewDimension,
};
use arrayvec::ArrayVec;
use smallvec::SmallVec;
use std::{ffi::CString, future::Future, ops::Range, ptr, slice};
pub type AdapterId = wgc::id::AdapterId;
pub type DeviceId = wgc::id::DeviceId;
pub type QueueId = wgc::id::QueueId;
pub type ShaderModuleId = wgc::id::ShaderModuleId;
pub type BindGroupLayoutId = wgc::id::BindGroupLayoutId;
pub type BindGroupId = wgc::id::BindGroupId;
pub type TextureViewId = wgc::id::TextureViewId;
pub type SamplerId = wgc::id::SamplerId;
pub type BufferId = wgc::id::BufferId;
pub type TextureId = wgc::id::TextureId;
pub type PipelineLayoutId = wgc::id::PipelineLayoutId;
pub type RenderPipelineId = wgc::id::RenderPipelineId;
pub type ComputePipelineId = wgc::id::ComputePipelineId;
pub type CommandEncoderId = wgc::id::CommandEncoderId;
pub type ComputePassId = wgc::id::ComputePassId;
pub type CommandBufferId = wgc::id::CommandBufferId;
pub type SurfaceId = wgc::id::SurfaceId;
pub type SwapChainId = wgc::id::SwapChainId;
pub type RenderPassEncoderId = wgc::id::RenderPassId;
fn map_buffer_copy_view(view: crate::BufferCopyView<'_>) -> wgc::command::BufferCopyView {
wgc::command::BufferCopyView {
buffer: view.buffer.id,
offset: view.offset,
bytes_per_row: view.bytes_per_row,
rows_per_image: view.rows_per_image,
}
}
fn map_texture_copy_view<'a>(view: crate::TextureCopyView<'a>) -> wgc::command::TextureCopyView {
wgc::command::TextureCopyView {
texture: view.texture.id,
mip_level: view.mip_level,
array_layer: view.array_layer,
origin: view.origin,
}
}
pub(crate) async fn request_adapter(
options: &crate::RequestAdapterOptions<'_>,
backends: wgt::BackendBit,
) -> Option<AdapterId> {
unsafe extern "C" fn adapter_callback(
id: Option<wgc::id::AdapterId>,
user_data: *mut std::ffi::c_void,
) {
*(user_data as *mut Option<wgc::id::AdapterId>) = id;
}
let mut id_maybe = None;
unsafe {
wgn::wgpu_request_adapter_async(
Some(&wgc::instance::RequestAdapterOptions {
power_preference: options.power_preference,
compatible_surface: options.compatible_surface.map(|surface| surface.id),
}),
backends,
adapter_callback,
&mut id_maybe as *mut _ as *mut std::ffi::c_void,
)
};
id_maybe
}
pub(crate) async fn request_device_and_queue(
adapter: &AdapterId,
desc: Option<&wgt::DeviceDescriptor>,
) -> (DeviceId, QueueId) {
let device_id = wgn::wgpu_adapter_request_device(*adapter, desc);
(device_id, wgn::wgpu_device_get_default_queue(device_id))
}
pub(crate) fn create_shader_module(device: &DeviceId, spv: &[u32]) -> ShaderModuleId {
let desc = wgc::pipeline::ShaderModuleDescriptor {
code: wgc::U32Array {
bytes: spv.as_ptr(),
length: spv.len(),
},
};
wgn::wgpu_device_create_shader_module(*device, &desc)
}
pub(crate) fn create_bind_group_layout(
device: &DeviceId,
desc: &BindGroupLayoutDescriptor,
) -> BindGroupLayoutId {
use wgc::binding_model as bm;
let temp_layouts = desc
.bindings
.iter()
.map(|bind| bm::BindGroupLayoutEntry {
binding: bind.binding,
visibility: bind.visibility,
ty: match bind.ty {
BindingType::UniformBuffer { .. } => bm::BindingType::UniformBuffer,
BindingType::StorageBuffer {
readonly: false, ..
} => bm::BindingType::StorageBuffer,
BindingType::StorageBuffer { readonly: true, .. } => {
bm::BindingType::ReadonlyStorageBuffer
}
BindingType::Sampler { comparison: false } => bm::BindingType::Sampler,
BindingType::Sampler { .. } => bm::BindingType::ComparisonSampler,
BindingType::SampledTexture { .. } => bm::BindingType::SampledTexture,
BindingType::StorageTexture { readonly: true, .. } => {
bm::BindingType::ReadonlyStorageTexture
}
BindingType::StorageTexture { .. } => bm::BindingType::WriteonlyStorageTexture,
},
has_dynamic_offset: match bind.ty {
BindingType::UniformBuffer { dynamic }
| BindingType::StorageBuffer { dynamic, .. } => dynamic,
_ => false,
},
multisampled: match bind.ty {
BindingType::SampledTexture { multisampled, .. } => multisampled,
_ => false,
},
view_dimension: match bind.ty {
BindingType::SampledTexture { dimension, .. }
| BindingType::StorageTexture { dimension, .. } => dimension,
_ => TextureViewDimension::D2,
},
texture_component_type: match bind.ty {
BindingType::SampledTexture { component_type, .. }
| BindingType::StorageTexture { component_type, .. } => component_type,
_ => wgt::TextureComponentType::Float,
},
storage_texture_format: match bind.ty {
BindingType::StorageTexture { format, .. } => format,
_ => wgt::TextureFormat::Rgb10a2Unorm, // doesn't matter
},
})
.collect::<Vec<_>>();
let owned_label = OwnedLabel::new(desc.label.as_deref());
wgn::wgpu_device_create_bind_group_layout(
*device,
&bm::BindGroupLayoutDescriptor {
entries: temp_layouts.as_ptr(),
entries_length: temp_layouts.len(),
label: owned_label.as_ptr(),
},
)
}
pub(crate) fn create_bind_group(device: &DeviceId, desc: &BindGroupDescriptor) -> BindGroupId {
use wgc::binding_model as bm;
let bindings = desc
.bindings
.iter()
.map(|binding| bm::BindGroupEntry {
binding: binding.binding,
resource: match binding.resource {
BindingResource::Buffer {
ref buffer,
ref range,
} => bm::BindingResource::Buffer(bm::BufferBinding {
buffer: buffer.id,
offset: range.start,
size: range.end - range.start,
}),
BindingResource::Sampler(ref sampler) => bm::BindingResource::Sampler(sampler.id),
BindingResource::TextureView(ref texture_view) => {
bm::BindingResource::TextureView(texture_view.id)
}
},
})
.collect::<Vec<_>>();
let owned_label = OwnedLabel::new(desc.label.as_deref());
wgn::wgpu_device_create_bind_group(
*device,
&bm::BindGroupDescriptor {
layout: desc.layout.id,
entries: bindings.as_ptr(),
entries_length: bindings.len(),
label: owned_label.as_ptr(),
},
)
}
pub(crate) fn create_pipeline_layout(
device: &DeviceId,
desc: &PipelineLayoutDescriptor,
) -> PipelineLayoutId {
//TODO: avoid allocation here
let temp_layouts = desc
.bind_group_layouts
.iter()
.map(|bgl| bgl.id)
.collect::<Vec<_>>();
wgn::wgpu_device_create_pipeline_layout(
*device,
&wgc::binding_model::PipelineLayoutDescriptor {
bind_group_layouts: temp_layouts.as_ptr(),
bind_group_layouts_length: temp_layouts.len(),
},
)
}
pub(crate) fn create_render_pipeline(
device: &DeviceId,
desc: &RenderPipelineDescriptor,
) -> RenderPipelineId {
use wgc::pipeline as pipe;
let vertex_entry_point = CString::new(desc.vertex_stage.entry_point).unwrap();
let vertex_stage = pipe::ProgrammableStageDescriptor {
module: desc.vertex_stage.module.id,
entry_point: vertex_entry_point.as_ptr(),
};
let (_fragment_entry_point, fragment_stage) = if let Some(fragment_stage) = &desc.fragment_stage
{
let fragment_entry_point = CString::new(fragment_stage.entry_point).unwrap();
let fragment_stage = pipe::ProgrammableStageDescriptor {
module: fragment_stage.module.id,
entry_point: fragment_entry_point.as_ptr(),
};
(fragment_entry_point, Some(fragment_stage))
} else {
(CString::default(), None)
};
let temp_color_states = desc.color_states.to_vec();
let temp_vertex_buffers = desc
.vertex_state
.vertex_buffers
.iter()
.map(|vbuf| pipe::VertexBufferLayoutDescriptor {
array_stride: vbuf.stride,
step_mode: vbuf.step_mode,
attributes: vbuf.attributes.as_ptr(),
attributes_length: vbuf.attributes.len(),
})
.collect::<Vec<_>>();
wgn::wgpu_device_create_render_pipeline(
*device,
&pipe::RenderPipelineDescriptor {
layout: desc.layout.id,
vertex_stage,
fragment_stage: fragment_stage
.as_ref()
.map_or(ptr::null(), |fs| fs as *const _),
rasterization_state: desc
.rasterization_state
.as_ref()
.map_or(ptr::null(), |p| p as *const _),
primitive_topology: desc.primitive_topology,
color_states: temp_color_states.as_ptr(),
color_states_length: temp_color_states.len(),
depth_stencil_state: desc
.depth_stencil_state
.as_ref()
.map_or(ptr::null(), |p| p as *const _),
vertex_state: pipe::VertexStateDescriptor {
index_format: desc.vertex_state.index_format,
vertex_buffers: temp_vertex_buffers.as_ptr(),
vertex_buffers_length: temp_vertex_buffers.len(),
},
sample_count: desc.sample_count,
sample_mask: desc.sample_mask,
alpha_to_coverage_enabled: desc.alpha_to_coverage_enabled,
},
)
}
pub(crate) fn create_compute_pipeline(
device: &DeviceId,
desc: &ComputePipelineDescriptor,
) -> ComputePipelineId {
use wgc::pipeline as pipe;
let entry_point = CString::new(desc.compute_stage.entry_point).unwrap();
wgn::wgpu_device_create_compute_pipeline(
*device,
&pipe::ComputePipelineDescriptor {
layout: desc.layout.id,
compute_stage: pipe::ProgrammableStageDescriptor {
module: desc.compute_stage.module.id,
entry_point: entry_point.as_ptr(),
},
},
)
}
pub(crate) type CreateBufferMappedDetail = BufferDetail;
pub(crate) fn device_create_buffer_mapped<'a>(
device: &DeviceId,
desc: &BufferDescriptor,
) -> crate::CreateBufferMapped<'a> {
let owned_label = OwnedLabel::new(desc.label.as_deref());
let mut data_ptr: *mut u8 = std::ptr::null_mut();
unsafe {
let id = wgn::wgpu_device_create_buffer_mapped(
*device,
&wgt::BufferDescriptor {
label: owned_label.as_ptr(),
size: desc.size,
usage: desc.usage,
},
&mut data_ptr as *mut *mut u8,
);
let mapped_data = std::slice::from_raw_parts_mut(data_ptr as *mut u8, desc.size as usize);
crate::CreateBufferMapped {
id,
mapped_data,
detail: CreateBufferMappedDetail { device_id: *device },
}
}
}
#[derive(Debug, Hash, PartialEq)]
pub(crate) struct BufferDetail {
/// On native we need to track the device in order to later destroy the
/// buffer.
device_id: DeviceId,
}
pub(crate) fn device_create_buffer_mapped_finish(
create_buffer_mapped: crate::CreateBufferMapped<'_>,
) -> crate::Buffer {
buffer_unmap(&create_buffer_mapped.id);
crate::Buffer {
id: create_buffer_mapped.id,
detail: BufferDetail {
device_id: create_buffer_mapped.detail.device_id,
},
}
}
pub(crate) fn buffer_unmap(buffer: &BufferId) {
wgn::wgpu_buffer_unmap(*buffer);
}
pub(crate) fn buffer_drop(buffer: &BufferId) {
wgn::wgpu_buffer_destroy(*buffer);
}
pub(crate) fn device_create_buffer(device: &DeviceId, desc: &BufferDescriptor) -> crate::Buffer {
let owned_label = OwnedLabel::new(desc.label.as_deref());
crate::Buffer {
id: wgn::wgpu_device_create_buffer(
*device,
&wgt::BufferDescriptor {
label: owned_label.as_ptr(),
size: desc.size,
usage: desc.usage,
},
),
detail: BufferDetail { device_id: *device },
}
}
pub(crate) fn device_create_texture(device: &DeviceId, desc: &TextureDescriptor) -> TextureId {
let owned_label = OwnedLabel::new(desc.label.as_deref());
wgn::wgpu_device_create_texture(
*device,
&wgt::TextureDescriptor {
label: owned_label.as_ptr(),
size: desc.size,
mip_level_count: desc.mip_level_count,
sample_count: desc.sample_count,
dimension: desc.dimension,
format: desc.format,
usage: desc.usage,
},
)
}
pub(crate) fn device_create_sampler(device: &DeviceId, desc: &SamplerDescriptor) -> SamplerId {
wgn::wgpu_device_create_sampler(*device, desc)
}
pub(crate) fn create_command_encoder(
device: &DeviceId,
desc: &CommandEncoderDescriptor,
) -> CommandEncoderId {
let owned_label = OwnedLabel::new(desc.label.as_deref());
wgn::wgpu_device_create_command_encoder(
*device,
Some(&wgt::CommandEncoderDescriptor {
label: owned_label.as_ptr(),
}),
)
}
pub(crate) fn command_encoder_copy_buffer_to_buffer(
command_encoder: &CommandEncoderId,
source: &crate::Buffer,
source_offset: wgt::BufferAddress,
destination: &crate::Buffer,
destination_offset: wgt::BufferAddress,
copy_size: wgt::BufferAddress,
) {
wgn::wgpu_command_encoder_copy_buffer_to_buffer(
*command_encoder,
source.id,
source_offset,
destination.id,
destination_offset,
copy_size,
);
}
pub(crate) fn command_encoder_copy_buffer_to_texture(
command_encoder: &CommandEncoderId,
source: crate::BufferCopyView,
destination: crate::TextureCopyView,
copy_size: wgt::Extent3d,
) {
wgn::wgpu_command_encoder_copy_buffer_to_texture(
*command_encoder,
&map_buffer_copy_view(source),
&map_texture_copy_view(destination),
copy_size,
);
}
pub(crate) fn command_encoder_copy_texture_to_buffer(
command_encoder: &CommandEncoderId,
source: crate::TextureCopyView,
destination: crate::BufferCopyView,
copy_size: wgt::Extent3d,
) {
wgn::wgpu_command_encoder_copy_texture_to_buffer(
*command_encoder,
&map_texture_copy_view(source),
&map_buffer_copy_view(destination),
copy_size,
);
}
pub(crate) fn command_encoder_copy_texture_to_texture(
command_encoder: &CommandEncoderId,
source: crate::TextureCopyView,
destination: crate::TextureCopyView,
copy_size: wgt::Extent3d,
) {
wgn::wgpu_command_encoder_copy_texture_to_texture(
*command_encoder,
&map_texture_copy_view(source),
&map_texture_copy_view(destination),
copy_size,
);
}
pub(crate) fn begin_compute_pass(command_encoder: &CommandEncoderId) -> ComputePassId {
unsafe { wgn::wgpu_command_encoder_begin_compute_pass(*command_encoder, None) }
}
pub(crate) fn compute_pass_set_pipeline(
compute_pass: &ComputePassId,
pipeline: &ComputePipelineId,
) {
unsafe {
wgn::wgpu_compute_pass_set_pipeline(compute_pass.as_mut().unwrap(), *pipeline);
}
}
pub(crate) fn compute_pass_set_bind_group<'a>(
compute_pass: &ComputePassId,
index: u32,
bind_group: &BindGroupId,
offsets: &[wgt::DynamicOffset],
) {
unsafe {
wgn::wgpu_compute_pass_set_bind_group(
compute_pass.as_mut().unwrap(),
index,
*bind_group,
offsets.as_ptr(),
offsets.len(),
);
}
}
pub(crate) fn compute_pass_dispatch(compute_pass: &ComputePassId, x: u32, y: u32, z: u32) {
unsafe {
wgn::wgpu_compute_pass_dispatch(compute_pass.as_mut().unwrap(), x, y, z);
}
}
pub(crate) fn compute_pass_dispatch_indirect(
compute_pass: &ComputePassId,
indirect_buffer: &BufferId,
indirect_offset: wgt::BufferAddress,
) {
unsafe {
wgn::wgpu_compute_pass_dispatch_indirect(
compute_pass.as_mut().unwrap(),
*indirect_buffer,
indirect_offset,
);
}
}
pub(crate) fn compute_pass_end_pass(compute_pass: &ComputePassId) {
unsafe {
wgn::wgpu_compute_pass_end_pass(*compute_pass);
}
}
pub(crate) fn command_encoder_finish(command_encoder: &CommandEncoderId) -> CommandBufferId {
wgn::wgpu_command_encoder_finish(*command_encoder, None)
}
pub(crate) fn queue_submit(queue: &QueueId, command_buffers: &[crate::CommandBuffer]) {
let temp_command_buffers = command_buffers
.iter()
.map(|cb| cb.id)
.collect::<SmallVec<[_; 4]>>();
unsafe { wgn::wgpu_queue_submit(*queue, temp_command_buffers.as_ptr(), command_buffers.len()) };
}
pub(crate) fn buffer_map_read(
buffer: &crate::Buffer,
start: wgt::BufferAddress,
size: wgt::BufferAddress,
) -> impl Future<Output = Result<crate::BufferReadMapping, crate::BufferAsyncErr>> {
let (future, completion) = native_gpu_future::new_gpu_future(buffer.id, size);
extern "C" fn buffer_map_read_future_wrapper(
status: wgc::resource::BufferMapAsyncStatus,
data: *const u8,
user_data: *mut u8,
) {
let completion =
unsafe { native_gpu_future::GpuFutureCompletion::from_raw(user_data as _) };
let (buffer_id, size) = completion.get_buffer_info();
if let wgc::resource::BufferMapAsyncStatus::Success = status {
completion.complete(Ok(crate::BufferReadMapping {
detail: BufferReadMappingDetail {
data,
size: size as usize,
buffer_id,
},
}));
} else {
completion.complete(Err(crate::BufferAsyncErr));
}
}
wgn::wgpu_buffer_map_read_async(
buffer.id,
start,
size,
buffer_map_read_future_wrapper,
completion.to_raw() as _,
);
future
}
pub(crate) fn buffer_map_write(
buffer: &crate::Buffer,
start: wgt::BufferAddress,
size: wgt::BufferAddress,
) -> impl Future<Output = Result<crate::BufferWriteMapping, crate::BufferAsyncErr>> {
let (future, completion) = native_gpu_future::new_gpu_future(buffer.id, size);
extern "C" fn buffer_map_write_future_wrapper(
status: wgc::resource::BufferMapAsyncStatus,
data: *mut u8,
user_data: *mut u8,
) {
let completion =
unsafe { native_gpu_future::GpuFutureCompletion::from_raw(user_data as _) };
let (buffer_id, size) = completion.get_buffer_info();
if let wgc::resource::BufferMapAsyncStatus::Success = status {
completion.complete(Ok(crate::BufferWriteMapping {
detail: BufferWriteMappingDetail {
data,
size: size as usize,
buffer_id,
},
}));
} else {
completion.complete(Err(crate::BufferAsyncErr));
}
}
wgn::wgpu_buffer_map_write_async(
buffer.id,
start,
size,
buffer_map_write_future_wrapper,
completion.to_raw() as _,
);
future
}
pub(crate) struct BufferReadMappingDetail {
pub(crate) buffer_id: BufferId,
data: *const u8,
size: usize,
}
impl BufferReadMappingDetail {
pub(crate) fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.data as *const u8, self.size) }
}
}
pub(crate) struct BufferWriteMappingDetail {
pub(crate) buffer_id: BufferId,
data: *mut u8,
size: usize,
}
impl BufferWriteMappingDetail {
pub(crate) fn as_slice(&mut self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.data as *mut u8, self.size) }
}
}
pub(crate) fn device_create_surface<W: raw_window_handle::HasRawWindowHandle>(
window: &W,
) -> SurfaceId {
wgn::wgpu_create_surface(window.raw_window_handle())
}
pub(crate) fn device_create_swap_chain(
device: &DeviceId,
surface: &SurfaceId,
desc: &wgt::SwapChainDescriptor,
) -> SwapChainId {
wgn::wgpu_device_create_swap_chain(*device, *surface, desc)
}
pub(crate) fn device_drop(device: &DeviceId) {
#[cfg(not(target_arch = "wasm32"))]
wgn::wgpu_device_poll(*device, true);
//TODO: make this work in general
#[cfg(not(target_arch = "wasm32"))]
#[cfg(feature = "metal-auto-capture")]
wgn::wgpu_device_destroy(*device);
}
pub(crate) fn swap_chain_get_next_texture(
swap_chain: &SwapChainId,
) -> Result<crate::SwapChainOutput, crate::TimeOut> {
match wgn::wgpu_swap_chain_get_next_texture(*swap_chain).view_id {
Some(id) => Ok(crate::SwapChainOutput {
view: crate::TextureView { id, owned: false },
detail: SwapChainOutputDetail {
swap_chain_id: *swap_chain,
},
}),
None => Err(crate::TimeOut),
}
}
#[derive(Debug)]
pub(crate) struct SwapChainOutputDetail {
swap_chain_id: SwapChainId,
}
pub(crate) fn command_encoder_begin_render_pass<'a>(
command_encoder: &CommandEncoderId,
desc: &crate::RenderPassDescriptor<'a, '_>,
) -> RenderPassEncoderId {
let colors = desc
.color_attachments
.iter()
.map(|ca| wgc::command::RenderPassColorAttachmentDescriptor {
attachment: ca.attachment.id,
resolve_target: ca.resolve_target.map(|rt| rt.id),
load_op: ca.load_op,
store_op: ca.store_op,
clear_color: ca.clear_color,
})
.collect::<ArrayVec<[_; 4]>>();
let depth_stencil = desc.depth_stencil_attachment.as_ref().map(|dsa| {
wgc::command::RenderPassDepthStencilAttachmentDescriptor {
attachment: dsa.attachment.id,
depth_load_op: dsa.depth_load_op,
depth_store_op: dsa.depth_store_op,
clear_depth: dsa.clear_depth,
stencil_load_op: dsa.stencil_load_op,
stencil_store_op: dsa.stencil_store_op,
clear_stencil: dsa.clear_stencil,
}
});
unsafe {
wgn::wgpu_command_encoder_begin_render_pass(
*command_encoder,
&wgc::command::RenderPassDescriptor {
color_attachments: colors.as_ptr(),
color_attachments_length: colors.len(),
depth_stencil_attachment: depth_stencil.as_ref(),
},
)
}
}
pub(crate) fn render_pass_set_pipeline(
render_pass: &RenderPassEncoderId,
pipeline: &RenderPipelineId,
) {
unsafe {
wgn::wgpu_render_pass_set_pipeline(render_pass.as_mut().unwrap(), *pipeline);
}
}
pub(crate) fn render_pass_set_blend_color(render_pass: &RenderPassEncoderId, color: wgt::Color) {
unsafe {
wgn::wgpu_render_pass_set_blend_color(render_pass.as_mut().unwrap(), &color);
}
}
pub(crate) fn render_pass_set_bind_group(
render_pass: &RenderPassEncoderId,
index: u32,
bind_group: &BindGroupId,
offsets: &[wgt::DynamicOffset],
) {
unsafe {
wgn::wgpu_render_pass_set_bind_group(
render_pass.as_mut().unwrap(),
index,
*bind_group,
offsets.as_ptr(),
offsets.len(),
);
}
}
pub(crate) fn render_pass_set_index_buffer<'a>(
render_pass: &RenderPassEncoderId,
buffer: &'a crate::Buffer,
offset: wgt::BufferAddress,
size: wgt::BufferAddress,
) {
unsafe {
wgn::wgpu_render_pass_set_index_buffer(
render_pass.as_mut().unwrap(),
buffer.id,
offset,
size,
);
}
}
pub(crate) fn render_pass_set_vertex_buffer<'a>(
render_pass: &RenderPassEncoderId,
slot: u32,
buffer: &'a crate::Buffer,
offset: wgt::BufferAddress,
size: wgt::BufferAddress,
) {
unsafe {
wgn::wgpu_render_pass_set_vertex_buffer(
render_pass.as_mut().unwrap(),
slot,
buffer.id,
offset,
size,
)
};
}
pub(crate) fn render_pass_set_scissor_rect(
render_pass: &RenderPassEncoderId,
x: u32,
y: u32,
width: u32,
height: u32,
) {
unsafe {
wgn::wgpu_render_pass_set_scissor_rect(render_pass.as_mut().unwrap(), x, y, width, height);
}
}
pub(crate) fn render_pass_set_viewport(
render_pass: &RenderPassEncoderId,
x: f32,
y: f32,
width: f32,
height: f32,
min_depth: f32,
max_depth: f32,
) {
unsafe {
wgn::wgpu_render_pass_set_viewport(
render_pass.as_mut().unwrap(),
x,
y,
width,
height,
min_depth,
max_depth,
);
}
}
pub(crate) fn render_pass_set_stencil_reference(render_pass: &RenderPassEncoderId, reference: u32) {
unsafe {
wgn::wgpu_render_pass_set_stencil_reference(render_pass.as_mut().unwrap(), reference);
}
}
pub(crate) fn render_pass_draw(
render_pass: &RenderPassEncoderId,
vertices: Range<u32>,
instances: Range<u32>,
) {
unsafe {
wgn::wgpu_render_pass_draw(
render_pass.as_mut().unwrap(),
vertices.end - vertices.start,
instances.end - instances.start,
vertices.start,
instances.start,
);
}
}
pub(crate) fn render_pass_draw_indexed(
render_pass: &RenderPassEncoderId,
indices: Range<u32>,
base_vertex: i32,
instances: Range<u32>,
) {
unsafe {
wgn::wgpu_render_pass_draw_indexed(
render_pass.as_mut().unwrap(),
indices.end - indices.start,
instances.end - instances.start,
indices.start,
base_vertex,
instances.start,
);
}
}
pub(crate) fn render_pass_draw_indirect<'a>(
render_pass: &RenderPassEncoderId,
indirect_buffer: &'a crate::Buffer,
indirect_offset: wgt::BufferAddress,
) {
unsafe {
wgn::wgpu_render_pass_draw_indirect(
render_pass.as_mut().unwrap(),
indirect_buffer.id,
indirect_offset,
);
}
}
pub(crate) fn render_pass_draw_indexed_indirect<'a>(
render_pass: &RenderPassEncoderId,
indirect_buffer: &'a crate::Buffer,
indirect_offset: wgt::BufferAddress,
) {
unsafe {
wgn::wgpu_render_pass_draw_indexed_indirect(
render_pass.as_mut().unwrap(),
indirect_buffer.id,
indirect_offset,
);
}
}
pub(crate) fn render_pass_end_pass(render_pass: &RenderPassEncoderId) {
unsafe {
wgn::wgpu_render_pass_end_pass(*render_pass);
}
}
pub(crate) fn texture_create_view(
texture: &TextureId,
desc: Option<&TextureViewDescriptor>,
) -> TextureViewId {
wgn::wgpu_texture_create_view(*texture, desc)
}
pub(crate) fn texture_drop(texture: &TextureId) {
wgn::wgpu_texture_destroy(*texture);
}
pub(crate) fn texture_view_drop(texture_view: &TextureViewId) {
wgn::wgpu_texture_view_destroy(*texture_view);
}
pub(crate) fn bind_group_drop(bind_group: &BindGroupId) {
wgn::wgpu_bind_group_destroy(*bind_group);
}
pub(crate) fn swap_chain_present(swap_chain_output: &crate::SwapChainOutput) {
wgn::wgpu_swap_chain_present(swap_chain_output.detail.swap_chain_id);
}
pub(crate) fn device_poll(device: &DeviceId, maintain: crate::Maintain) {
wgn::wgpu_device_poll(
*device,
match maintain {
crate::Maintain::Poll => false,
crate::Maintain::Wait => true,
},
);
}
struct OwnedLabel(Option<CString>);
impl OwnedLabel {
fn new(text: Option<&str>) -> Self {
Self(text.map(|t| CString::new(t).expect("invalid label")))
}
fn as_ptr(&self) -> *const std::os::raw::c_char {
match self.0 {
Some(ref c_string) => c_string.as_ptr(),
None => ptr::null(),
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff