hal/mtl: most of the command encoding

This commit is contained in:
Dzmitry Malyshau
2021-06-10 02:31:31 -04:00
parent bdaf57dbed
commit 3109b1b63d
10 changed files with 731 additions and 181 deletions

View File

@@ -841,7 +841,7 @@ impl<A: HalApi> Device<A> {
};
let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } {
Ok(raw) => raw,
Err((error, _shader)) => {
Err(error) => {
return Err(match error {
hal::ShaderError::Device(error) => {
pipeline::CreateShaderModuleError::Device(error.into())

View File

@@ -631,8 +631,6 @@ impl NumericType {
| Tf::Bc7RgbaUnormSrgb
| Tf::Etc2RgbA1Unorm
| Tf::Etc2RgbA1UnormSrgb
| Tf::Etc2RgbA8Unorm
| Tf::Etc2RgbA8UnormSrgb
| Tf::Astc4x4RgbaUnorm
| Tf::Astc4x4RgbaUnormSrgb
| Tf::Astc5x4RgbaUnorm

View File

@@ -236,13 +236,8 @@ impl crate::CommandBuffer<Api> for Encoder {
unsafe fn fill_buffer(&mut self, buffer: &Resource, range: crate::MemoryRange, value: u8) {}
unsafe fn copy_buffer_to_buffer<T>(&mut self, src: &Resource, dst: &Resource, regions: T)
where
T: Iterator<Item = crate::BufferCopy>,
{
}
unsafe fn copy_buffer_to_buffer<T>(&mut self, src: &Resource, dst: &Resource, regions: T) {}
/// Note: `dst` current usage has to be `TextureUse::COPY_DST`.
unsafe fn copy_texture_to_texture<T>(
&mut self,
src: &Resource,
@@ -252,7 +247,6 @@ impl crate::CommandBuffer<Api> for Encoder {
) {
}
/// Note: `dst` current usage has to be `TextureUse::COPY_DST`.
unsafe fn copy_buffer_to_texture<T>(&mut self, src: &Resource, dst: &Resource, regions: T) {}
unsafe fn copy_texture_to_buffer<T>(

View File

@@ -63,6 +63,8 @@ pub const MAX_BIND_GROUPS: usize = 8;
pub const MAX_VERTEX_BUFFERS: usize = 16;
pub const MAX_COLOR_TARGETS: usize = 4;
pub const MAX_MIP_LEVELS: u32 = 16;
/// Size of a single occlusion/timestamp query, when copied into a buffer, in bytes.
pub const QUERY_SIZE: wgt::BufferAddress = 8;
pub type Label<'a> = Option<&'a str>;
pub type MemoryRange = Range<wgt::BufferAddress>;

View File

@@ -17,15 +17,12 @@ impl crate::Adapter<super::Api> for super::Adapter {
&self,
features: wgt::Features,
) -> Result<crate::OpenDevice<super::Api>, crate::DeviceError> {
let raw_device = self.shared.device.lock();
Ok(crate::OpenDevice {
device: super::Device {
shared: Arc::clone(&self.shared),
features,
},
queue: super::Queue {
},
queue: super::Queue {},
})
}
@@ -199,7 +196,9 @@ impl crate::Adapter<super::Api> for super::Adapter {
| Tf::Bc4RUnorm
| Tf::Bc4RSnorm
| Tf::Bc5RgUnorm
| Tf::Bc5RgSnorm
| Tf::Bc6hRgbSfloat
| Tf::Bc6hRgbUfloat
| Tf::Bc7RgbaUnorm
| Tf::Bc7RgbaUnormSrgb => {
if pc.format_bc {
@@ -946,7 +945,9 @@ impl super::PrivateCapabilities {
Tf::Bc4RUnorm => BC4_RUnorm,
Tf::Bc4RSnorm => BC4_RSnorm,
Tf::Bc5RgUnorm => BC5_RGUnorm,
Tf::Bc5RgSnorm => BC5_RGSnorm,
Tf::Bc6hRgbSfloat => BC6H_RGBFloat,
Tf::Bc6hRgbUfloat => BC6H_RGBUfloat,
Tf::Bc7RgbaUnorm => BC7_RGBAUnorm,
Tf::Bc7RgbaUnormSrgb => BC7_RGBAUnorm_sRGB,
Tf::Etc2RgbUnorm => ETC2_RGB8,

View File

@@ -1,98 +1,475 @@
use std::ops::Range;
use super::{conv, AsNative};
use std::{mem, ops::Range};
impl super::CommandBuffer {
fn enter_blit(&mut self) -> &mtl::BlitCommandEncoderRef {
if self.blit.is_none() {
debug_assert!(self.render.is_none() && self.compute.is_none());
self.blit = Some(self.raw.new_blit_command_encoder().to_owned());
}
self.blit.as_ref().unwrap()
}
fn leave_blit(&mut self) {
if let Some(encoder) = self.blit.take() {
encoder.end_encoding();
}
}
fn enter_any(&mut self) -> &mtl::CommandEncoderRef {
if let Some(ref encoder) = self.render {
encoder
} else if let Some(ref encoder) = self.compute {
encoder
} else {
if self.blit.is_none() {
debug_assert!(self.render.is_none() && self.compute.is_none());
self.blit = Some(self.raw.new_blit_command_encoder().to_owned());
}
self.blit.as_ref().unwrap()
}
}
}
impl crate::CommandBuffer<super::Api> for super::CommandBuffer {
unsafe fn finish(&mut self) {}
unsafe fn finish(&mut self) {
self.leave_blit();
}
unsafe fn transition_buffers<'a, T>(&mut self, barriers: T)
unsafe fn transition_buffers<'a, T>(&mut self, _barriers: T)
where
T: Iterator<Item = crate::BufferBarrier<'a, super::Api>>,
{
}
unsafe fn transition_textures<'a, T>(&mut self, barriers: T)
unsafe fn transition_textures<'a, T>(&mut self, _barriers: T)
where
T: Iterator<Item = crate::TextureBarrier<'a, super::Api>>,
{
}
unsafe fn fill_buffer(&mut self, buffer: &Resource, range: crate::MemoryRange, value: u8) {}
unsafe fn fill_buffer(&mut self, buffer: &super::Buffer, range: crate::MemoryRange, value: u8) {
let encoder = self.enter_blit();
encoder.fill_buffer(&buffer.raw, conv::map_range(&range), value);
}
unsafe fn copy_buffer_to_buffer<T>(&mut self, src: &Resource, dst: &Resource, regions: T)
where
unsafe fn copy_buffer_to_buffer<T>(
&mut self,
src: &super::Buffer,
dst: &super::Buffer,
regions: T,
) where
T: Iterator<Item = crate::BufferCopy>,
{
let encoder = self.enter_blit();
for copy in regions {
encoder.copy_from_buffer(
&src.raw,
copy.src_offset,
&dst.raw,
copy.dst_offset,
copy.size.get(),
);
}
}
/// Note: `dst` current usage has to be `TextureUse::COPY_DST`.
unsafe fn copy_texture_to_texture<T>(
&mut self,
src: &Resource,
src_usage: crate::TextureUse,
dst: &Resource,
src: &super::Texture,
_src_usage: crate::TextureUse,
dst: &super::Texture,
regions: T,
) {
) where
T: Iterator<Item = crate::TextureCopy>,
{
let encoder = self.enter_blit();
for copy in regions {
let (src_slice, src_origin) = conv::map_origin(&copy.src_base.origin, src.raw_type);
let (dst_slice, dst_origin) = conv::map_origin(&copy.dst_base.origin, dst.raw_type);
let (slice_count, extent) = conv::map_extent(&copy.size, src.raw_type);
for slice in 0..slice_count {
encoder.copy_from_texture(
&src.raw,
src_slice + slice,
copy.src_base.mip_level as u64,
src_origin,
extent,
&dst.raw,
dst_slice + slice,
copy.dst_base.mip_level as u64,
dst_origin,
);
}
}
}
/// Note: `dst` current usage has to be `TextureUse::COPY_DST`.
unsafe fn copy_buffer_to_texture<T>(&mut self, src: &Resource, dst: &Resource, regions: T) {}
unsafe fn copy_buffer_to_texture<T>(
&mut self,
src: &super::Buffer,
dst: &super::Texture,
regions: T,
) where
T: Iterator<Item = crate::BufferTextureCopy>,
{
let encoder = self.enter_blit();
for copy in regions {
let (dst_slice, dst_origin) = conv::map_origin(&copy.texture_base.origin, dst.raw_type);
let (slice_count, extent) = conv::map_extent(&copy.size, dst.raw_type);
let bytes_per_row = copy
.buffer_layout
.bytes_per_row
.map_or(0, |v| v.get() as u64);
let bytes_per_image = copy
.buffer_layout
.rows_per_image
.map_or(0, |v| v.get() as u64 * bytes_per_row);
for slice in 0..slice_count {
let offset = copy.buffer_layout.offset + bytes_per_image * slice;
encoder.copy_from_buffer_to_texture(
&src.raw,
offset,
bytes_per_row,
bytes_per_image,
extent,
&dst.raw,
dst_slice + slice_count,
copy.texture_base.mip_level as u64,
dst_origin,
mtl::MTLBlitOption::empty(),
);
}
}
}
unsafe fn copy_texture_to_buffer<T>(
&mut self,
src: &Resource,
src_usage: crate::TextureUse,
dst: &Resource,
src: &super::Texture,
_src_usage: crate::TextureUse,
dst: &super::Buffer,
regions: T,
) {
) where
T: Iterator<Item = crate::BufferTextureCopy>,
{
let encoder = self.enter_blit();
for copy in regions {
let (src_slice, src_origin) = conv::map_origin(&copy.texture_base.origin, src.raw_type);
let (slice_count, extent) = conv::map_extent(&copy.size, src.raw_type);
let bytes_per_row = copy
.buffer_layout
.bytes_per_row
.map_or(0, |v| v.get() as u64);
let bytes_per_image = copy
.buffer_layout
.rows_per_image
.map_or(0, |v| v.get() as u64 * bytes_per_row);
for slice in 0..slice_count {
let offset = copy.buffer_layout.offset + bytes_per_image * slice;
encoder.copy_from_texture_to_buffer(
&src.raw,
src_slice + slice,
copy.texture_base.mip_level as u64,
src_origin,
extent,
&dst.raw,
offset,
bytes_per_row,
bytes_per_image,
mtl::MTLBlitOption::empty(),
);
}
}
}
unsafe fn begin_query(&mut self, set: &Resource, index: u32) {}
unsafe fn end_query(&mut self, set: &Resource, index: u32) {}
unsafe fn write_timestamp(&mut self, set: &Resource, index: u32) {}
unsafe fn reset_queries(&mut self, set: &Resource, range: Range<u32>) {}
unsafe fn begin_query(&mut self, set: &super::QuerySet, index: u32) {
match set.ty {
wgt::QueryType::Occlusion => {
self.render.as_ref().unwrap().set_visibility_result_mode(
mtl::MTLVisibilityResultMode::Boolean,
index as u64 * crate::QUERY_SIZE,
);
}
_ => {}
}
}
unsafe fn end_query(&mut self, set: &super::QuerySet, _index: u32) {
match set.ty {
wgt::QueryType::Occlusion => {
self.render
.as_ref()
.unwrap()
.set_visibility_result_mode(mtl::MTLVisibilityResultMode::Disabled, 0);
}
_ => {}
}
}
unsafe fn write_timestamp(&mut self, _set: &super::QuerySet, _index: u32) {}
unsafe fn reset_queries(&mut self, set: &super::QuerySet, range: Range<u32>) {
let encoder = self.enter_blit();
let raw_range = mtl::NSRange {
location: range.start as u64 * crate::QUERY_SIZE,
length: (range.end - range.start) as u64 * crate::QUERY_SIZE,
};
encoder.fill_buffer(&set.raw_buffer, raw_range, 0);
}
unsafe fn copy_query_results(
&mut self,
set: &Resource,
set: &super::QuerySet,
range: Range<u32>,
buffer: &Resource,
buffer: &super::Buffer,
offset: wgt::BufferAddress,
) {
let encoder = self.enter_blit();
let size = (range.end - range.start) as u64 * crate::QUERY_SIZE;
encoder.copy_from_buffer(
&set.raw_buffer,
range.start as u64 * crate::QUERY_SIZE,
&buffer.raw,
offset,
size,
);
}
// render
unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor<super::Api>) {}
unsafe fn end_render_pass(&mut self) {}
unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor<super::Api>) {
self.leave_blit();
let descriptor = mtl::RenderPassDescriptor::new();
//TODO: set visibility results buffer
for (i, at) in desc.color_attachments.iter().enumerate() {
let at_descriptor = descriptor.color_attachments().object_at(i as u64).unwrap();
at_descriptor.set_texture(Some(&at.target.view.raw));
if let Some(ref resolve) = at.resolve_target {
//Note: the selection of levels and slices is already handled by `TextureView`
at_descriptor.set_resolve_texture(Some(&resolve.view.raw));
}
let load_action = if at.ops.contains(crate::AttachmentOp::LOAD) {
mtl::MTLLoadAction::Load
} else {
at_descriptor.set_clear_color(conv::map_clear_color(&at.clear_value));
mtl::MTLLoadAction::Clear
};
let store_action = conv::map_store_action(
at.ops.contains(crate::AttachmentOp::STORE),
at.resolve_target.is_some(),
);
at_descriptor.set_load_action(load_action);
at_descriptor.set_store_action(store_action);
}
if let Some(ref at) = desc.depth_stencil_attachment {
if at.target.view.aspects.contains(crate::FormatAspect::DEPTH) {
let at_descriptor = descriptor.depth_attachment().unwrap();
at_descriptor.set_texture(Some(&at.target.view.raw));
let load_action = if at.depth_ops.contains(crate::AttachmentOp::LOAD) {
mtl::MTLLoadAction::Load
} else {
at_descriptor.set_clear_depth(at.clear_value.0 as f64);
mtl::MTLLoadAction::Clear
};
let store_action = if at.depth_ops.contains(crate::AttachmentOp::STORE) {
mtl::MTLStoreAction::Store
} else {
mtl::MTLStoreAction::DontCare
};
at_descriptor.set_load_action(load_action);
at_descriptor.set_store_action(store_action);
}
if at
.target
.view
.aspects
.contains(crate::FormatAspect::STENCIL)
{
let at_descriptor = descriptor.stencil_attachment().unwrap();
at_descriptor.set_texture(Some(&at.target.view.raw));
let load_action = if at.depth_ops.contains(crate::AttachmentOp::LOAD) {
mtl::MTLLoadAction::Load
} else {
at_descriptor.set_clear_stencil(at.clear_value.1);
mtl::MTLLoadAction::Clear
};
let store_action = if at.depth_ops.contains(crate::AttachmentOp::STORE) {
mtl::MTLStoreAction::Store
} else {
mtl::MTLStoreAction::DontCare
};
at_descriptor.set_load_action(load_action);
at_descriptor.set_store_action(store_action);
}
}
let encoder = self.raw.new_render_command_encoder(descriptor);
if let Some(label) = desc.label {
encoder.set_label(label);
}
self.render = Some(encoder.to_owned());
}
unsafe fn end_render_pass(&mut self) {
self.render.take().unwrap().end_encoding();
}
unsafe fn set_bind_group(
&mut self,
layout: &Resource,
layout: &super::PipelineLayout,
index: u32,
group: &Resource,
group: &super::BindGroup,
dynamic_offsets: &[wgt::DynamicOffset],
) {
let bg_info = &layout.bind_group_infos[index as usize];
if let Some(ref encoder) = self.render {
for index in 0..group.counters.vs.buffers {
let buf = &group.buffers[index as usize];
let mut offset = buf.offset;
if let Some(dyn_index) = buf.dynamic_index {
offset += dynamic_offsets[dyn_index as usize] as wgt::BufferAddress;
}
encoder.set_vertex_buffer(
(bg_info.base_resource_indices.vs.buffers + index) as u64,
Some(buf.ptr.as_native()),
offset,
);
}
for index in 0..group.counters.fs.buffers {
let buf = &group.buffers[(group.counters.vs.buffers + index) as usize];
let mut offset = buf.offset;
if let Some(dyn_index) = buf.dynamic_index {
offset += dynamic_offsets[dyn_index as usize] as wgt::BufferAddress;
}
encoder.set_fragment_buffer(
(bg_info.base_resource_indices.fs.buffers + index) as u64,
Some(buf.ptr.as_native()),
offset,
);
}
for index in 0..group.counters.vs.samplers {
let res = group.samplers[index as usize];
encoder.set_vertex_sampler_state(
(bg_info.base_resource_indices.vs.samplers + index) as u64,
Some(res.as_native()),
);
}
for index in 0..group.counters.fs.samplers {
let res = group.samplers[(group.counters.vs.samplers + index) as usize];
encoder.set_fragment_sampler_state(
(bg_info.base_resource_indices.fs.samplers + index) as u64,
Some(res.as_native()),
);
}
for index in 0..group.counters.vs.textures {
let res = group.textures[index as usize];
encoder.set_vertex_texture(
(bg_info.base_resource_indices.vs.textures + index) as u64,
Some(res.as_native()),
);
}
for index in 0..group.counters.fs.textures {
let res = group.textures[(group.counters.vs.textures + index) as usize];
encoder.set_fragment_texture(
(bg_info.base_resource_indices.fs.textures + index) as u64,
Some(res.as_native()),
);
}
}
if let Some(ref encoder) = self.compute {
let index_base = super::ResourceData {
buffers: group.counters.vs.buffers + group.counters.fs.buffers,
samplers: group.counters.vs.samplers + group.counters.fs.samplers,
textures: group.counters.vs.textures + group.counters.fs.textures,
};
for index in 0..group.counters.cs.buffers {
let buf = &group.buffers[(index_base.buffers + index) as usize];
let mut offset = buf.offset;
if let Some(dyn_index) = buf.dynamic_index {
offset += dynamic_offsets[dyn_index as usize] as wgt::BufferAddress;
}
encoder.set_buffer(
(bg_info.base_resource_indices.cs.buffers + index) as u64,
Some(buf.ptr.as_native()),
offset,
);
}
for index in 0..group.counters.cs.samplers {
let res = group.samplers[(index_base.samplers + index) as usize];
encoder.set_sampler_state(
(bg_info.base_resource_indices.cs.samplers + index) as u64,
Some(res.as_native()),
);
}
for index in 0..group.counters.cs.textures {
let res = group.textures[(index_base.textures + index) as usize];
encoder.set_texture(
(bg_info.base_resource_indices.cs.textures + index) as u64,
Some(res.as_native()),
);
}
}
}
unsafe fn set_push_constants(
&mut self,
layout: &Resource,
stages: wgt::ShaderStage,
offset: u32,
data: &[u32],
_layout: &super::PipelineLayout,
_stages: wgt::ShaderStage,
_offset: u32,
_data: &[u32],
) {
//TODO
}
unsafe fn insert_debug_marker(&mut self, label: &str) {}
unsafe fn begin_debug_marker(&mut self, group_label: &str) {}
unsafe fn end_debug_marker(&mut self) {}
unsafe fn insert_debug_marker(&mut self, label: &str) {
self.enter_any().insert_debug_signpost(label);
}
unsafe fn begin_debug_marker(&mut self, group_label: &str) {
self.enter_any().push_debug_group(group_label);
}
unsafe fn end_debug_marker(&mut self) {
self.enter_any().pop_debug_group();
}
unsafe fn set_render_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn set_render_pipeline(&mut self, pipeline: &super::RenderPipeline) {
self.raw_primitive_type = pipeline.raw_primitive_type;
let encoder = self.render.as_ref().unwrap();
encoder.set_render_pipeline_state(&pipeline.raw);
encoder.set_front_facing_winding(pipeline.raw_front_winding);
encoder.set_cull_mode(pipeline.raw_cull_mode);
if let Some(depth_clip) = pipeline.raw_depth_clip_mode {
encoder.set_depth_clip_mode(depth_clip);
}
if let Some((ref state, bias)) = pipeline.depth_stencil {
encoder.set_depth_stencil_state(state);
encoder.set_depth_bias(bias.constant as f32, bias.slope_scale, bias.clamp);
}
}
unsafe fn set_index_buffer<'a>(
&mut self,
binding: crate::BufferBinding<'a, super::Api>,
format: wgt::IndexFormat,
) {
let (stride, raw_type) = match format {
wgt::IndexFormat::Uint16 => (2, mtl::MTLIndexType::UInt16),
wgt::IndexFormat::Uint32 => (4, mtl::MTLIndexType::UInt32),
};
self.index_state = Some(super::IndexState {
buffer_ptr: AsNative::from(binding.buffer.raw.as_ref()),
offset: binding.offset,
stride,
raw_type,
});
}
unsafe fn set_vertex_buffer<'a>(&mut self, index: u32, binding: crate::BufferBinding<'a, super::Api>) {
unsafe fn set_vertex_buffer<'a>(
&mut self,
index: u32,
binding: crate::BufferBinding<'a, super::Api>,
) {
}
unsafe fn set_viewport(&mut self, rect: &crate::Rect<f32>, depth_range: Range<f32>) {}
unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect<u32>) {}
@@ -106,7 +483,31 @@ impl crate::CommandBuffer<super::Api> for super::CommandBuffer {
start_instance: u32,
instance_count: u32,
) {
let encoder = self.render.as_ref().unwrap();
if start_instance != 0 {
encoder.draw_primitives_instanced_base_instance(
self.raw_primitive_type,
start_vertex as _,
vertex_count as _,
instance_count as _,
start_instance as _,
);
} else if instance_count != 1 {
encoder.draw_primitives_instanced(
self.raw_primitive_type,
start_vertex as _,
vertex_count as _,
instance_count as _,
);
} else {
encoder.draw_primitives(
self.raw_primitive_type,
start_vertex as _,
vertex_count as _,
);
}
}
unsafe fn draw_indexed(
&mut self,
start_index: u32,
@@ -115,47 +516,124 @@ impl crate::CommandBuffer<super::Api> for super::CommandBuffer {
start_instance: u32,
instance_count: u32,
) {
let encoder = self.render.as_ref().unwrap();
let index = self.index_state.as_ref().unwrap();
let offset = index.offset + index.stride * start_index as wgt::BufferAddress;
if base_vertex != 0 || start_instance != 0 {
encoder.draw_indexed_primitives_instanced_base_instance(
self.raw_primitive_type,
index_count as _,
index.raw_type,
index.buffer_ptr.as_native(),
offset,
instance_count as _,
base_vertex as _,
start_instance as _,
);
} else if instance_count != 1 {
encoder.draw_indexed_primitives_instanced(
self.raw_primitive_type,
index_count as _,
index.raw_type,
index.buffer_ptr.as_native(),
offset,
instance_count as _,
);
} else {
encoder.draw_indexed_primitives(
self.raw_primitive_type,
index_count as _,
index.raw_type,
index.buffer_ptr.as_native(),
offset,
);
}
}
unsafe fn draw_indirect(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
buffer: &super::Buffer,
mut offset: wgt::BufferAddress,
draw_count: u32,
) {
let encoder = self.render.as_ref().unwrap();
for _ in 0..draw_count {
encoder.draw_primitives_indirect(self.raw_primitive_type, &buffer.raw, offset);
offset += mem::size_of::<wgt::DrawIndirectArgs>() as wgt::BufferAddress;
}
}
unsafe fn draw_indexed_indirect(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
buffer: &super::Buffer,
mut offset: wgt::BufferAddress,
draw_count: u32,
) {
let encoder = self.render.as_ref().unwrap();
let index = self.index_state.as_ref().unwrap();
for _ in 0..draw_count {
encoder.draw_indexed_primitives_indirect(
self.raw_primitive_type,
index.raw_type,
index.buffer_ptr.as_native(),
index.offset,
&buffer.raw,
offset,
);
offset += mem::size_of::<wgt::DrawIndexedIndirectArgs>() as wgt::BufferAddress;
}
}
unsafe fn draw_indirect_count(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
count_buffer: &Resource,
count_offset: wgt::BufferAddress,
max_count: u32,
_buffer: &super::Buffer,
_offset: wgt::BufferAddress,
_count_buffer: &super::Buffer,
_count_offset: wgt::BufferAddress,
_max_count: u32,
) {
//TODO
}
unsafe fn draw_indexed_indirect_count(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
count_buffer: &Resource,
count_offset: wgt::BufferAddress,
max_count: u32,
_buffer: &super::Buffer,
_offset: wgt::BufferAddress,
_count_buffer: &super::Buffer,
_count_offset: wgt::BufferAddress,
_max_count: u32,
) {
//TODO
}
// compute
unsafe fn begin_compute_pass(&mut self) {}
unsafe fn end_compute_pass(&mut self) {}
unsafe fn begin_compute_pass(&mut self) {
self.leave_blit();
let encoder = self.raw.new_compute_command_encoder();
self.compute = Some(encoder.to_owned());
}
unsafe fn end_compute_pass(&mut self) {
self.compute.take().unwrap().end_encoding();
}
unsafe fn set_compute_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn set_compute_pipeline(&mut self, pipeline: &super::ComputePipeline) {
self.raw_wg_size = pipeline.work_group_size;
let encoder = self.compute.as_ref().unwrap();
encoder.set_compute_pipeline_state(&pipeline.raw);
}
unsafe fn dispatch(&mut self, count: [u32; 3]) {}
unsafe fn dispatch_indirect(&mut self, buffer: &Resource, offset: wgt::BufferAddress) {}
unsafe fn dispatch(&mut self, count: [u32; 3]) {
let encoder = self.compute.as_ref().unwrap();
let raw_count = mtl::MTLSize {
width: count[0] as u64,
height: count[1] as u64,
depth: count[2] as u64,
};
encoder.dispatch_thread_groups(raw_count, self.raw_wg_size);
}
unsafe fn dispatch_indirect(&mut self, buffer: &super::Buffer, offset: wgt::BufferAddress) {
let encoder = self.compute.as_ref().unwrap();
encoder.dispatch_thread_groups_indirect(&buffer.raw, offset, self.raw_wg_size);
}
}

View File

@@ -212,6 +212,7 @@ pub fn map_vertex_format(format: wgt::VertexFormat) -> mtl::MTLVertexFormat {
Vf::Uint32x4 => UInt4,
Vf::Sint32x4 => Int4,
Vf::Float32x4 => Float4,
Vf::Float64 | Vf::Float64x2 | Vf::Float64x3 | Vf::Float64x4 => unimplemented!(),
}
}
@@ -252,3 +253,59 @@ pub fn map_cull_mode(face: Option<wgt::Face>) -> mtl::MTLCullMode {
Some(wgt::Face::Back) => mtl::MTLCullMode::Back,
}
}
pub fn map_range(range: &crate::MemoryRange) -> mtl::NSRange {
mtl::NSRange {
location: range.start,
length: range.end - range.start,
}
}
pub fn map_extent(extent: &wgt::Extent3d, raw_type: mtl::MTLTextureType) -> (u64, mtl::MTLSize) {
let (depth, array_layers) = match raw_type {
mtl::MTLTextureType::D3 => (extent.depth_or_array_layers as u64, 1),
_ => (1, extent.depth_or_array_layers as u64),
};
(
array_layers,
mtl::MTLSize {
width: extent.width as u64,
height: extent.height as u64,
depth,
},
)
}
pub fn map_origin(origin: &wgt::Origin3d, raw_type: mtl::MTLTextureType) -> (u64, mtl::MTLOrigin) {
let (z, slice) = match raw_type {
mtl::MTLTextureType::D3 => (origin.z as u64, 0),
_ => (0, origin.z as u64),
};
(
slice,
mtl::MTLOrigin {
x: origin.x as u64,
y: origin.y as u64,
z,
},
)
}
pub fn map_store_action(store: bool, resolve: bool) -> mtl::MTLStoreAction {
use mtl::MTLStoreAction::*;
match (store, resolve) {
(true, true) => StoreAndMultisampleResolve,
(false, true) => MultisampleResolve,
(true, false) => Store,
(false, false) => DontCare,
}
}
pub fn map_clear_color(color: &wgt::Color) -> mtl::MTLClearColor {
mtl::MTLClearColor {
red: color.r,
green: color.g,
blue: color.b,
alpha: color.a,
}
}

View File

@@ -280,7 +280,8 @@ impl crate::Device<super::Api> for super::Device {
)
};
Ok(super::TextureView { raw })
let aspects = crate::FormatAspect::from(desc.format);
Ok(super::TextureView { raw, aspects })
}
unsafe fn destroy_texture_view(&self, _view: super::TextureView) {}
@@ -336,8 +337,20 @@ impl crate::Device<super::Api> for super::Device {
&self,
desc: &crate::CommandBufferDescriptor,
) -> DeviceResult<super::CommandBuffer> {
let raw = self.shared.create_command_buffer().to_owned();
Ok(super::CommandBuffer { raw })
let raw = self.shared.create_command_buffer();
if let Some(label) = desc.label {
raw.set_label(label);
}
Ok(super::CommandBuffer {
raw,
blit: None,
render: None,
compute: None,
raw_primitive_type: mtl::MTLPrimitiveType::Point,
index_state: None,
raw_wg_size: mtl::MTLSize::new(0, 0, 0),
})
}
unsafe fn destroy_command_buffer(&self, _cmd_buf: super::CommandBuffer) {}
@@ -411,33 +424,29 @@ impl crate::Device<super::Api> for super::Device {
let base_resource_indices = stage_data.map(|info| info.counters.clone());
for entry in bgl.entries.iter() {
match entry.ty {
wgt::BindingType::Buffer {
ty,
has_dynamic_offset,
min_binding_size: _,
} => {
if has_dynamic_offset {
dynamic_buffers.push(stage_data.map(|info| {
if entry.visibility.contains(map_naga_stage(info.stage)) {
info.counters.buffers
} else {
!0
}
}));
}
match ty {
wgt::BufferBindingType::Storage { .. } => {
sized_buffer_bindings.push((entry.binding, entry.visibility));
for info in stage_data.iter_mut() {
if entry.visibility.contains(map_naga_stage(info.stage)) {
info.sizes_count += 1;
}
}
if let wgt::BindingType::Buffer {
ty,
has_dynamic_offset,
min_binding_size: _,
} = entry.ty
{
if has_dynamic_offset {
dynamic_buffers.push(stage_data.map(|info| {
if entry.visibility.contains(map_naga_stage(info.stage)) {
info.counters.buffers
} else {
!0
}
}));
}
if let wgt::BufferBindingType::Storage { .. } = ty {
sized_buffer_bindings.push((entry.binding, entry.visibility));
for info in stage_data.iter_mut() {
if entry.visibility.contains(map_naga_stage(info.stage)) {
info.sizes_count += 1;
}
}
}
_ => {}
}
for info in stage_data.iter_mut() {
@@ -486,7 +495,7 @@ impl crate::Device<super::Api> for super::Device {
bind_group_infos.push(super::BindGroupLayoutInfo {
base_resource_indices,
dynamic_buffers,
//dynamic_buffers,
sized_buffer_bindings,
});
}
@@ -558,16 +567,31 @@ impl crate::Device<super::Api> for super::Device {
let mut bg = super::BindGroup::default();
for (&stage, counter) in super::NAGA_STAGES.iter().zip(bg.counters.iter_mut()) {
let stage_bit = map_naga_stage(stage);
let mut dynamic_offsets_count = 0u32;
for (entry, layout) in desc.entries.iter().zip(desc.layout.entries.iter()) {
if let wgt::BindingType::Buffer {
has_dynamic_offset: true,
..
} = layout.ty
{
dynamic_offsets_count += 1;
}
if !layout.visibility.contains(stage_bit) {
continue;
}
match layout.ty {
wgt::BindingType::Buffer { .. } => {
wgt::BindingType::Buffer {
has_dynamic_offset, ..
} => {
let source = &desc.buffers[entry.resource_index as usize];
bg.buffers.push(super::BufferResource {
ptr: source.buffer.as_raw(),
offset: source.offset,
dynamic_index: if has_dynamic_offset {
Some(dynamic_offsets_count - 1)
} else {
None
},
});
counter.buffers += 1;
}
@@ -592,7 +616,7 @@ impl crate::Device<super::Api> for super::Device {
unsafe fn create_shader_module(
&self,
desc: &crate::ShaderModuleDescriptor,
_desc: &crate::ShaderModuleDescriptor,
shader: crate::NagaShader,
) -> Result<super::ShaderModule, crate::ShaderError> {
Ok(super::ShaderModule { raw: shader })
@@ -660,7 +684,7 @@ impl crate::Device<super::Api> for super::Device {
}
}
let (raw_depth_stencil, depth_bias) = match desc.depth_stencil {
let depth_stencil = match desc.depth_stencil {
Some(ref ds) => {
let raw_format = self.shared.private_caps.map_format(ds.format);
let aspects = crate::FormatAspect::from(ds.format);
@@ -677,9 +701,9 @@ impl crate::Device<super::Api> for super::Device {
.device
.lock()
.new_depth_stencil_state(&ds_descriptor);
(Some(raw), ds.bias)
Some((raw, ds.bias))
}
None => (None, wgt::DepthBiasState::default()),
None => None,
};
if desc.layout.total_counters.vs.buffers + (desc.vertex_buffers.len() as u32)
@@ -703,9 +727,11 @@ impl crate::Device<super::Api> for super::Device {
buffer_desc.set_stride(vb.array_stride);
buffer_desc.set_step_function(conv::map_step_mode(vb.step_mode));
for (j, at) in vb.attributes.iter().enumerate() {
let attribute_desc =
vertex_descriptor.attributes().object_at(i as u64).unwrap();
for at in vb.attributes {
let attribute_desc = vertex_descriptor
.attributes()
.object_at(at.shader_location as u64)
.unwrap();
attribute_desc.set_format(conv::map_vertex_format(at.format));
attribute_desc.set_buffer_index(buffer_index);
attribute_desc.set_offset(at.offset);
@@ -763,8 +789,7 @@ impl crate::Device<super::Api> for super::Device {
} else {
None
},
raw_depth_stencil,
depth_bias,
depth_stencil,
})
}
unsafe fn destroy_render_pipeline(&self, _pipeline: super::RenderPipeline) {}
@@ -817,12 +842,15 @@ impl crate::Device<super::Api> for super::Device {
) -> DeviceResult<super::QuerySet> {
match desc.ty {
wgt::QueryType::Occlusion => {
let size = desc.count as u64 * 8;
let size = desc.count as u64 * crate::QUERY_SIZE;
let options = mtl::MTLResourceOptions::empty();
//TODO: HazardTrackingModeUntracked
let raw_buffer = self.shared.device.lock().new_buffer(size, options);
raw_buffer.set_label("_QuerySet");
Ok(super::QuerySet { raw_buffer })
Ok(super::QuerySet {
raw_buffer,
ty: desc.ty.clone(),
})
}
wgt::QueryType::Timestamp | wgt::QueryType::PipelineStatistics(_) => {
Err(crate::DeviceError::OutOfMemory)

View File

@@ -68,7 +68,9 @@ impl crate::Instance<Api> for Instance {
}
}
unsafe fn destroy_surface(&self, surface: Surface) {}
unsafe fn destroy_surface(&self, surface: Surface) {
surface.dispose();
}
unsafe fn enumerate_adapters(&self) -> Vec<crate::ExposedAdapter<Api>> {
let devices = mtl::Device::all();
@@ -224,30 +226,31 @@ impl AdapterShared {
}
}
fn create_command_buffer(&self) -> &mtl::CommandBufferRef {
fn create_command_buffer(&self) -> mtl::CommandBuffer {
let queue = self.queue.lock();
objc::rc::autoreleasepool(|| {
if self.settings.retain_command_buffer_references {
objc::rc::autoreleasepool(move || {
let cmd_buf_ref = if self.settings.retain_command_buffer_references {
queue.new_command_buffer()
} else {
queue.new_command_buffer_with_unretained_references()
}
};
cmd_buf_ref.to_owned()
})
}
}
struct Adapter {
pub struct Adapter {
shared: Arc<AdapterShared>,
}
struct Queue {}
pub struct Queue {}
struct Device {
pub struct Device {
shared: Arc<AdapterShared>,
features: wgt::Features,
}
struct Surface {
pub struct Surface {
view: Option<NonNull<objc::runtime::Object>>,
render_layer: Mutex<mtl::MetalLayer>,
raw_swapchain_format: mtl::MTLPixelFormat,
@@ -258,7 +261,7 @@ struct Surface {
}
#[derive(Debug)]
struct SurfaceTexture {
pub struct SurfaceTexture {
texture: Texture,
drawable: mtl::MetalDrawable,
present_with_transaction: bool,
@@ -321,6 +324,7 @@ unsafe impl Sync for Texture {}
#[derive(Debug)]
pub struct TextureView {
raw: mtl::Texture,
aspects: crate::FormatAspect,
}
unsafe impl Send for TextureView {}
@@ -408,7 +412,7 @@ type MultiStageResourceCounters = MultiStageData<ResourceData<ResourceIndex>>;
#[derive(Debug)]
struct BindGroupLayoutInfo {
base_resource_indices: MultiStageResourceCounters,
dynamic_buffers: Vec<MultiStageData<ResourceIndex>>,
//dynamic_buffers: Vec<MultiStageData<ResourceIndex>>,
sized_buffer_bindings: Vec<(u32, wgt::ShaderStage)>,
}
@@ -435,7 +439,6 @@ trait AsNative {
type BufferPtr = NonNull<mtl::MTLBuffer>;
type TexturePtr = NonNull<mtl::MTLTexture>;
type SamplerPtr = NonNull<mtl::MTLSamplerState>;
type ResourcePtr = NonNull<mtl::MTLResource>;
impl AsNative for BufferPtr {
type Native = mtl::BufferRef;
@@ -477,6 +480,7 @@ impl AsNative for SamplerPtr {
struct BufferResource {
ptr: BufferPtr,
offset: wgt::BufferAddress,
dynamic_index: Option<u32>,
}
#[derive(Debug, Default)]
@@ -502,21 +506,7 @@ struct PipelineStageInfo {
sized_bindings: Vec<naga::ResourceBinding>,
}
impl PipelineStageInfo {
fn clear(&mut self) {
self.push_constants = None;
self.sizes_slot = None;
self.sized_bindings.clear();
}
fn assign_from(&mut self, other: &Self) {
self.push_constants = other.push_constants;
self.sizes_slot = other.sizes_slot;
self.sized_bindings.clear();
self.sized_bindings.extend_from_slice(&other.sized_bindings);
}
}
#[allow(dead_code)] // silence xx_lib and xx_info warnings
pub struct RenderPipeline {
raw: mtl::RenderPipelineState,
vs_lib: mtl::Library,
@@ -527,10 +517,10 @@ pub struct RenderPipeline {
raw_front_winding: mtl::MTLWinding,
raw_cull_mode: mtl::MTLCullMode,
raw_depth_clip_mode: Option<mtl::MTLDepthClipMode>,
raw_depth_stencil: Option<mtl::DepthStencilState>,
depth_bias: wgt::DepthBiasState,
depth_stencil: Option<(mtl::DepthStencilState, wgt::DepthBiasState)>,
}
#[allow(dead_code)] // silence xx_lib and xx_info warnings
pub struct ComputePipeline {
raw: mtl::ComputePipelineState,
cs_lib: mtl::Library,
@@ -541,6 +531,7 @@ pub struct ComputePipeline {
#[derive(Debug)]
pub struct QuerySet {
raw_buffer: mtl::Buffer,
ty: wgt::QueryType,
}
unsafe impl Send for QuerySet {}
@@ -555,6 +546,19 @@ pub struct Fence {
unsafe impl Send for Fence {}
unsafe impl Sync for Fence {}
struct IndexState {
buffer_ptr: BufferPtr,
offset: wgt::BufferAddress,
stride: wgt::BufferAddress,
raw_type: mtl::MTLIndexType,
}
pub struct CommandBuffer {
raw: mtl::CommandBuffer,
blit: Option<mtl::BlitCommandEncoder>,
render: Option<mtl::RenderCommandEncoder>,
compute: Option<mtl::ComputeCommandEncoder>,
raw_primitive_type: mtl::MTLPrimitiveType,
index_state: Option<IndexState>,
raw_wg_size: mtl::MTLSize,
}

View File

@@ -21,13 +21,6 @@ pub struct CGPoint {
pub y: mtl::CGFloat,
}
impl CGPoint {
#[inline]
pub fn new(x: mtl::CGFloat, y: mtl::CGFloat) -> CGPoint {
CGPoint { x, y }
}
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
pub struct CGRect {
@@ -35,13 +28,6 @@ pub struct CGRect {
pub size: mtl::CGSize,
}
impl CGRect {
#[inline]
pub fn new(origin: CGPoint, size: mtl::CGSize) -> CGRect {
CGRect { origin, size }
}
}
impl super::Surface {
fn new(view: Option<NonNull<Object>>, layer: mtl::MetalLayer) -> Self {
Self {
@@ -53,6 +39,12 @@ impl super::Surface {
}
}
pub unsafe fn dispose(self) {
if let Some(view) = self.view {
let () = msg_send![view.as_ptr(), release];
}
}
#[cfg(target_os = "ios")]
pub unsafe fn from_uiview(uiview: *mut c_void) -> Self {
let view: cocoa_foundation::base::id = mem::transmute(uiview);
@@ -200,36 +192,33 @@ impl crate::Surface<super::Api> for super::Surface {
}
let device_raw = device.shared.device.lock();
unsafe {
// On iOS, unless the user supplies a view with a CAMetalLayer, we
// create one as a sublayer. However, when the view changes size,
// its sublayers are not automatically resized, and we must resize
// it here. The drawable size and the layer size don't correlate
#[cfg(target_os = "ios")]
{
if let Some(view) = self.view {
let main_layer: *mut Object = msg_send![view.as_ptr(), layer];
let bounds: CGRect = msg_send![main_layer, bounds];
let () = msg_send![*render_layer, setFrame: bounds];
}
// On iOS, unless the user supplies a view with a CAMetalLayer, we
// create one as a sublayer. However, when the view changes size,
// its sublayers are not automatically resized, and we must resize
// it here. The drawable size and the layer size don't correlate
#[cfg(target_os = "ios")]
{
if let Some(view) = self.view {
let main_layer: *mut Object = msg_send![view.as_ptr(), layer];
let bounds: CGRect = msg_send![main_layer, bounds];
let () = msg_send![*render_layer, setFrame: bounds];
}
render_layer.set_device(&*device_raw);
render_layer.set_pixel_format(mtl_format);
render_layer.set_framebuffer_only(framebuffer_only);
render_layer.set_presents_with_transaction(self.present_with_transaction);
}
render_layer.set_device(&*device_raw);
render_layer.set_pixel_format(mtl_format);
render_layer.set_framebuffer_only(framebuffer_only);
render_layer.set_presents_with_transaction(self.present_with_transaction);
// this gets ignored on iOS for certain OS/device combinations (iphone5s iOS 10.3)
let () =
msg_send![*render_layer, setMaximumDrawableCount: config.swap_chain_size as u64];
// this gets ignored on iOS for certain OS/device combinations (iphone5s iOS 10.3)
let () = msg_send![*render_layer, setMaximumDrawableCount: config.swap_chain_size as u64];
render_layer.set_drawable_size(drawable_size);
if caps.can_set_next_drawable_timeout {
let () = msg_send![*render_layer, setAllowsNextDrawableTimeout:false];
}
if caps.can_set_display_sync {
let () = msg_send![*render_layer, setDisplaySyncEnabled: display_sync];
}
};
render_layer.set_drawable_size(drawable_size);
if caps.can_set_next_drawable_timeout {
let () = msg_send![*render_layer, setAllowsNextDrawableTimeout:false];
}
if caps.can_set_display_sync {
let () = msg_send![*render_layer, setDisplaySyncEnabled: display_sync];
}
Ok(())
}
@@ -247,7 +236,6 @@ impl crate::Surface<super::Api> for super::Surface {
let drawable = render_layer.next_drawable().unwrap();
(drawable.to_owned(), drawable.texture().to_owned())
});
let size = render_layer.drawable_size();
let suf_texture = super::SurfaceTexture {
texture: super::Texture {