hal/dx12: texture and view creation

This commit is contained in:
Dzmitry Malyshau
2021-07-06 00:45:47 -04:00
committed by Dzmitry Malyshau
parent c01f6a557e
commit 23615aa55d
8 changed files with 525 additions and 91 deletions

1
Cargo.lock generated
View File

@@ -1992,6 +1992,7 @@ version = "0.9.0"
dependencies = [
"arrayvec",
"ash",
"bit-set",
"bitflags",
"block",
"core-graphics-types",

View File

@@ -16,7 +16,7 @@ default = []
metal = ["naga/msl-out", "block", "foreign-types"]
vulkan = ["naga/spv-out", "ash", "gpu-alloc", "gpu-descriptor", "libloading", "inplace_it", "renderdoc-sys"]
gles = ["naga/glsl-out", "glow", "egl", "libloading"]
dx12 = ["naga/hlsl-out", "native", "winapi/d3d12", "winapi/d3d12shader", "winapi/d3d12sdklayers", "winapi/dxgi1_6"]
dx12 = ["naga/hlsl-out", "native", "bit-set", "winapi/d3d12", "winapi/d3d12shader", "winapi/d3d12sdklayers", "winapi/dxgi1_6"]
[dependencies]
bitflags = "1.0"
@@ -41,8 +41,8 @@ renderdoc-sys = { version = "0.7.1", optional = true }
# backend: Gles
glow = { git = "https://github.com/grovesNL/glow", rev = "0864897a28bbdd43f89f4fd8fdd4ed781b719f8a", optional = true }
# backend: Dx12
bit-set = { version = "0.5", optional = true }
native = { package = "d3d12", version = "0.4", features = ["libloading"], optional = true }
#winapi = { version = "0.3", features = ["basetsd","d3dcommon","d3dcompiler","dxgi1_2","dxgi1_3","dxgi1_4","dxgi1_5","dxgi1_6","dxgidebug","dxgiformat","dxgitype","handleapi","minwindef","synchapi","unknwnbase","winbase","winerror","winnt"] }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
egl = { package = "khronos-egl", version = "4.1", features = ["dynamic"], optional = true }

View File

@@ -3,7 +3,6 @@ use std::{mem, sync::Arc};
use winapi::{
shared::{dxgi, dxgi1_2, dxgi1_5, minwindef, windef, winerror},
um::{d3d12, winuser},
Interface,
};
impl Drop for super::Adapter {
@@ -136,6 +135,7 @@ impl super::Adapter {
let mut features = wgt::Features::empty()
| wgt::Features::DEPTH_CLAMPING
| wgt::Features::MAPPABLE_PRIMARY_BUFFERS
//TODO: Naga part
//| wgt::Features::TEXTURE_BINDING_ARRAY
//| wgt::Features::BUFFER_BINDING_ARRAY
@@ -240,25 +240,9 @@ impl crate::Adapter<super::Api> for super::Adapter {
)
.to_device_result("Queue creation")?;
let mut idle_fence = native::Fence::null();
let hr = self.device.CreateFence(
0,
d3d12::D3D12_FENCE_FLAG_NONE,
&d3d12::ID3D12Fence::uuidof(),
idle_fence.mut_void(),
);
hr.to_device_result("Idle fence creation")?;
let device = super::Device::new(self.device, queue, self.private_caps)?;
Ok(crate::OpenDevice {
device: super::Device {
raw: self.device,
present_queue: queue,
idler: super::Idler {
fence: idle_fence,
event: native::Event::create(false, false),
},
private_caps: self.private_caps,
},
device,
queue: super::Queue { raw: queue },
})
}

View File

@@ -36,9 +36,9 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
unsafe fn copy_texture_to_texture<T>(
&mut self,
src: &Resource,
src: &super::Texture,
src_usage: crate::TextureUses,
dst: &Resource,
dst: &super::Texture,
regions: T,
) {
}
@@ -46,14 +46,14 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
unsafe fn copy_buffer_to_texture<T>(
&mut self,
src: &super::Buffer,
dst: &Resource,
dst: &super::Texture,
regions: T,
) {
}
unsafe fn copy_texture_to_buffer<T>(
&mut self,
src: &Resource,
src: &super::Texture,
src_usage: crate::TextureUses,
dst: &super::Buffer,
regions: T,

View File

@@ -128,3 +128,34 @@ pub fn map_buffer_usage_to_resource_flags(usage: crate::BufferUses) -> d3d12::D3
}
flags
}
pub fn map_texture_dimension(dim: wgt::TextureDimension) -> d3d12::D3D12_RESOURCE_DIMENSION {
match dim {
wgt::TextureDimension::D1 => d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE1D,
wgt::TextureDimension::D2 => d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE2D,
wgt::TextureDimension::D3 => d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE3D,
}
}
pub fn map_texture_usage_to_resource_flags(
usage: crate::TextureUses,
) -> d3d12::D3D12_RESOURCE_FLAGS {
let mut flags = 0;
if usage.contains(crate::TextureUses::COLOR_TARGET) {
flags |= d3d12::D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
}
if usage.intersects(
crate::TextureUses::DEPTH_STENCIL_READ | crate::TextureUses::DEPTH_STENCIL_WRITE,
) {
flags |= d3d12::D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
}
if usage.contains(crate::TextureUses::STORAGE_STORE) {
flags |= d3d12::D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
}
if !usage.intersects(crate::TextureUses::SAMPLED | crate::TextureUses::STORAGE_LOAD) {
flags |= d3d12::D3D12_RESOURCE_FLAG_DENY_SHADER_RESOURCE;
}
flags
}

View File

@@ -0,0 +1,170 @@
use bit_set::BitSet;
use std::fmt;
const HEAP_SIZE_FIXED: usize = 64;
#[derive(Copy, Clone)]
pub(super) struct DualHandle {
cpu: native::CpuDescriptor,
gpu: native::GpuDescriptor,
/// How large the block allocated to this handle is.
size: u64,
}
type DescriptorIndex = u64;
struct LinearHeap {
raw: native::DescriptorHeap,
handle_size: u64,
total_handles: u64,
start: DualHandle,
}
impl LinearHeap {
pub(super) fn at(&self, index: DescriptorIndex, size: u64) -> DualHandle {
assert!(index < self.total_handles);
DualHandle {
cpu: self.cpu_descriptor_at(index),
gpu: self.gpu_descriptor_at(index),
size,
}
}
pub(super) fn cpu_descriptor_at(&self, index: u64) -> native::CpuDescriptor {
native::CpuDescriptor {
ptr: self.start.cpu.ptr + (self.handle_size * index) as usize,
}
}
pub(super) fn gpu_descriptor_at(&self, index: u64) -> native::GpuDescriptor {
native::GpuDescriptor {
ptr: self.start.gpu.ptr + self.handle_size * index,
}
}
}
/// Fixed-size free-list allocator for CPU descriptors.
struct FixedSizeHeap {
raw: native::DescriptorHeap,
/// Bit flag representation of available handles in the heap.
///
/// 0 - Occupied
/// 1 - free
availability: u64,
handle_size: usize,
start: native::CpuDescriptor,
}
impl FixedSizeHeap {
fn new(device: native::Device, ty: native::DescriptorHeapType) -> Self {
let (heap, _hr) = device.create_descriptor_heap(
HEAP_SIZE_FIXED as _,
ty,
native::DescriptorHeapFlags::empty(),
0,
);
Self {
handle_size: device.get_descriptor_increment_size(ty) as _,
availability: !0, // all free!
start: heap.start_cpu_descriptor(),
raw: heap,
}
}
fn alloc_handle(&mut self) -> native::CpuDescriptor {
// Find first free slot.
let slot = self.availability.trailing_zeros() as usize;
assert!(slot < HEAP_SIZE_FIXED);
// Set the slot as occupied.
self.availability ^= 1 << slot;
native::CpuDescriptor {
ptr: self.start.ptr + self.handle_size * slot,
}
}
fn free_handle(&mut self, handle: native::CpuDescriptor) {
let slot = (handle.ptr - self.start.ptr) / self.handle_size;
assert!(slot < HEAP_SIZE_FIXED);
assert_eq!(self.availability & (1 << slot), 0);
self.availability ^= 1 << slot;
}
fn is_full(&self) -> bool {
self.availability == 0
}
unsafe fn destroy(&self) {
self.raw.destroy();
}
}
#[derive(Clone, Copy)]
pub(super) struct Handle {
pub raw: native::CpuDescriptor,
heap_index: usize,
}
impl fmt::Debug for Handle {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Handle")
.field("ptr", &self.raw.ptr)
.field("heap_index", &self.heap_index)
.finish()
}
}
pub(super) struct CpuPool {
device: native::Device,
ty: native::DescriptorHeapType,
heaps: Vec<FixedSizeHeap>,
avaliable_heap_indices: BitSet,
}
impl CpuPool {
pub(super) fn new(device: native::Device, ty: native::DescriptorHeapType) -> Self {
Self {
device,
ty,
heaps: Vec::new(),
avaliable_heap_indices: BitSet::new(),
}
}
pub(super) fn alloc_handle(&mut self) -> Handle {
let heap_index = self
.avaliable_heap_indices
.iter()
.next()
.unwrap_or_else(|| {
// Allocate a new heap
let id = self.heaps.len();
self.heaps.push(FixedSizeHeap::new(self.device, self.ty));
self.avaliable_heap_indices.insert(id);
id
});
let heap = &mut self.heaps[heap_index];
let handle = Handle {
raw: heap.alloc_handle(),
heap_index,
};
if heap.is_full() {
self.avaliable_heap_indices.remove(heap_index);
}
handle
}
pub(super) fn free_handle(&mut self, handle: Handle) {
self.heaps[handle.heap_index].free_handle(handle.raw);
self.avaliable_heap_indices.insert(handle.heap_index);
}
pub(super) unsafe fn destroy(&self) {
for heap in &self.heaps {
heap.destroy();
}
}
}

View File

@@ -1,5 +1,6 @@
use super::{conv, HResult as _};
use std::ptr;
use super::{conv, descriptor, HResult as _};
use parking_lot::Mutex;
use std::{iter, mem, ptr};
use winapi::{
shared::{dxgiformat, dxgitype},
um::{d3d12, d3d12sdklayers, synchapi, winbase},
@@ -11,7 +12,52 @@ use super::Resource;
type DeviceResult<T> = Result<T, crate::DeviceError>;
const D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING: u32 = 0x1688;
fn wide_cstr(name: &str) -> Vec<u16> {
name.encode_utf16().chain(iter::once(0)).collect()
}
impl super::Device {
pub(super) fn new(
raw: native::Device,
present_queue: native::CommandQueue,
private_caps: super::PrivateCapabilities,
) -> Result<Self, crate::DeviceError> {
let mut idle_fence = native::Fence::null();
let hr = unsafe {
raw.CreateFence(
0,
d3d12::D3D12_FENCE_FLAG_NONE,
&d3d12::ID3D12Fence::uuidof(),
idle_fence.mut_void(),
)
};
hr.to_device_result("Idle fence creation")?;
Ok(super::Device {
raw,
present_queue,
idler: super::Idler {
fence: idle_fence,
event: native::Event::create(false, false),
},
private_caps,
rtv_pool: Mutex::new(descriptor::CpuPool::new(
raw,
native::DescriptorHeapType::Rtv,
)),
dsv_pool: Mutex::new(descriptor::CpuPool::new(
raw,
native::DescriptorHeapType::Dsv,
)),
srv_uav_pool: Mutex::new(descriptor::CpuPool::new(
raw,
native::DescriptorHeapType::CbvSrvUav,
)),
})
}
pub(super) unsafe fn wait_idle(&self) -> Result<(), crate::DeviceError> {
let value = self.idler.fence.get_value() + 1;
log::info!("Waiting for idle with value {}", value);
@@ -24,15 +70,125 @@ impl super::Device {
synchapi::WaitForSingleObject(self.idler.event.0, winbase::INFINITE);
Ok(())
}
unsafe fn view_texture_as_shader_resource(
&self,
texture: &super::Texture,
desc: &crate::TextureViewDescriptor,
) -> descriptor::Handle {
let mut raw_desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC {
Format: conv::map_texture_format(desc.format),
ViewDimension: 0,
Shader4ComponentMapping: D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING,
u: mem::zeroed(),
};
#[allow(non_snake_case)]
let MipLevels = match desc.range.mip_level_count {
Some(count) => count.get(),
None => !0,
};
#[allow(non_snake_case)]
let ArraySize = match desc.range.array_layer_count {
Some(count) => count.get(),
None => texture.size.depth_or_array_layers - desc.range.base_array_layer,
};
match desc.dimension {
wgt::TextureViewDimension::D1 => {
raw_desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE1D;
*raw_desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_SRV {
MostDetailedMip: desc.range.base_mip_level,
MipLevels,
ResourceMinLODClamp: 0.0,
}
}
/*
wgt::TextureViewDimension::D1Array => {
raw_desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE1DARRAY;
*raw_desc.u.Texture1DArray_mut() = d3d12::D3D12_TEX1D_ARRAY_SRV {
MostDetailedMip: desc.range.base_mip_level,
MipLevels,
FirstArraySlice: desc.range.base_array_layer,
ArraySize,
ResourceMinLODClamp: 0.0,
}
}*/
wgt::TextureViewDimension::D2 if texture.sample_count > 1 => {
raw_desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DMS;
*raw_desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_SRV {
UnusedField_NothingToDefine: 0,
}
}
wgt::TextureViewDimension::D2 => {
raw_desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2D;
*raw_desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_SRV {
MostDetailedMip: desc.range.base_mip_level,
MipLevels,
PlaneSlice: 0,
ResourceMinLODClamp: 0.0,
}
}
wgt::TextureViewDimension::D2Array if texture.sample_count > 1 => {
raw_desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY;
*raw_desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_SRV {
FirstArraySlice: desc.range.base_array_layer,
ArraySize,
}
}
wgt::TextureViewDimension::D2Array => {
raw_desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
*raw_desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_SRV {
MostDetailedMip: desc.range.base_mip_level,
MipLevels,
FirstArraySlice: desc.range.base_array_layer,
ArraySize,
PlaneSlice: 0,
ResourceMinLODClamp: 0.0,
}
}
wgt::TextureViewDimension::D3 => {
raw_desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE3D;
*raw_desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_SRV {
MostDetailedMip: desc.range.base_mip_level,
MipLevels,
ResourceMinLODClamp: 0.0,
}
}
wgt::TextureViewDimension::Cube => {
raw_desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURECUBE;
*raw_desc.u.TextureCube_mut() = d3d12::D3D12_TEXCUBE_SRV {
MostDetailedMip: desc.range.base_mip_level,
MipLevels,
ResourceMinLODClamp: 0.0,
}
}
wgt::TextureViewDimension::CubeArray => {
raw_desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
*raw_desc.u.TextureCubeArray_mut() = d3d12::D3D12_TEXCUBE_ARRAY_SRV {
MostDetailedMip: desc.range.base_mip_level,
MipLevels,
First2DArrayFace: desc.range.base_array_layer,
NumCubes: ArraySize / 6,
ResourceMinLODClamp: 0.0,
}
}
}
let handle = self.srv_uav_pool.lock().alloc_handle();
self.raw
.CreateShaderResourceView(texture.resource.as_mut_ptr(), &raw_desc, handle.raw);
handle
}
}
impl crate::Device<super::Api> for super::Device {
unsafe fn exit(self) {
//self.heap_srv_cbv_uav.0.destroy();
//self.samplers.destroy();
//self.rtv_pool.lock().destroy();
//self.dsv_pool.lock().destroy();
//self.srv_uav_pool.lock().destroy();
self.rtv_pool.into_inner().destroy();
self.dsv_pool.into_inner().destroy();
self.srv_uav_pool.into_inner().destroy();
//self.descriptor_updater.lock().destroy();
@@ -76,10 +232,10 @@ impl crate::Device<super::Api> for super::Device {
let heap_properties = d3d12::D3D12_HEAP_PROPERTIES {
Type: d3d12::D3D12_HEAP_TYPE_CUSTOM,
CPUPageProperty: if is_cpu_write {
d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE
} else if is_cpu_read {
CPUPageProperty: if is_cpu_read {
d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_BACK
} else if is_cpu_write {
d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE
} else {
d3d12::D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE
},
@@ -113,27 +269,132 @@ impl crate::Device<super::Api> for super::Device {
&self,
buffer: &super::Buffer,
range: crate::MemoryRange,
) -> DeviceResult<crate::BufferMapping> {
Err(crate::DeviceError::Lost)
) -> Result<crate::BufferMapping, crate::DeviceError> {
let mut ptr = ptr::null_mut();
let hr = (*buffer.resource).Map(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 }, &mut ptr);
hr.to_device_result("Map buffer")?;
Ok(crate::BufferMapping {
ptr: ptr::NonNull::new(ptr.offset(range.start as isize) as *mut _).unwrap(),
//TODO: double-check this. Documentation is a bit misleading -
// it implies that Map/Unmap is needed to invalidate/flush memory.
is_coherent: true,
})
}
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> DeviceResult<()> {
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> {
(*buffer.resource).Unmap(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 });
Ok(())
}
unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I) {}
unsafe fn invalidate_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I) {}
unsafe fn flush_mapped_ranges<I>(&self, _buffer: &super::Buffer, ranges: I) {}
unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, ranges: I) {}
unsafe fn create_texture(&self, desc: &crate::TextureDescriptor) -> DeviceResult<Resource> {
Ok(Resource)
unsafe fn create_texture(
&self,
desc: &crate::TextureDescriptor,
) -> Result<super::Texture, crate::DeviceError> {
let mut resource = native::Resource::null();
let raw_desc = d3d12::D3D12_RESOURCE_DESC {
Dimension: conv::map_texture_dimension(desc.dimension),
Alignment: 0,
Width: desc.size.width as u64,
Height: desc.size.height,
DepthOrArraySize: desc.size.depth_or_array_layers as u16,
MipLevels: desc.mip_level_count as u16,
//TODO: map to surface format to allow view casting
Format: conv::map_texture_format(desc.format),
SampleDesc: dxgitype::DXGI_SAMPLE_DESC {
Count: desc.sample_count,
Quality: 0,
},
Layout: d3d12::D3D12_TEXTURE_LAYOUT_64KB_UNDEFINED_SWIZZLE,
Flags: conv::map_texture_usage_to_resource_flags(desc.usage),
};
let heap_properties = d3d12::D3D12_HEAP_PROPERTIES {
Type: d3d12::D3D12_HEAP_TYPE_CUSTOM,
CPUPageProperty: d3d12::D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE,
MemoryPoolPreference: match self.private_caps.memory_architecture {
super::MemoryArchitecture::NonUnified => d3d12::D3D12_MEMORY_POOL_L1,
super::MemoryArchitecture::Unified { .. } => d3d12::D3D12_MEMORY_POOL_L0,
},
CreationNodeMask: 0,
VisibleNodeMask: 0,
};
let hr = self.raw.CreateCommittedResource(
&heap_properties,
d3d12::D3D12_HEAP_FLAG_NONE,
&raw_desc,
d3d12::D3D12_RESOURCE_STATE_COMMON,
ptr::null(),
&d3d12::ID3D12Resource::uuidof(),
resource.mut_void(),
);
if let Some(label) = desc.label {
let cwstr = wide_cstr(label);
resource.SetName(cwstr.as_ptr());
}
hr.to_device_result("Texture creation")?;
Ok(super::Texture {
resource,
size: desc.size,
sample_count: desc.sample_count,
})
}
unsafe fn destroy_texture(&self, texture: Resource) {}
unsafe fn destroy_texture(&self, texture: super::Texture) {
texture.resource.destroy();
}
unsafe fn create_texture_view(
&self,
texture: &Resource,
texture: &super::Texture,
desc: &crate::TextureViewDescriptor,
) -> DeviceResult<Resource> {
Ok(Resource)
) -> Result<super::TextureView, crate::DeviceError> {
Ok(super::TextureView {
handle_srv: if desc
.usage
.intersects(crate::TextureUses::SAMPLED | crate::TextureUses::STORAGE_LOAD)
{
Some(self.view_texture_as_shader_resource(texture, desc))
} else {
None
},
handle_rtv: if desc.usage.intersects(crate::TextureUses::COLOR_TARGET) {
unimplemented!()
} else {
None
},
handle_dsv: if desc.usage.intersects(
crate::TextureUses::DEPTH_STENCIL_READ | crate::TextureUses::DEPTH_STENCIL_WRITE,
) {
unimplemented!()
} else {
None
},
handle_uav: if desc.usage.intersects(crate::TextureUses::STORAGE_STORE) {
unimplemented!()
} else {
None
},
})
}
unsafe fn destroy_texture_view(&self, view: Resource) {}
unsafe fn destroy_texture_view(&self, view: super::TextureView) {
if let Some(handle) = view.handle_srv {
self.srv_uav_pool.lock().free_handle(handle);
}
if let Some(handle) = view.handle_uav {
self.srv_uav_pool.lock().free_handle(handle);
}
if let Some(handle) = view.handle_rtv {
self.rtv_pool.lock().free_handle(handle);
}
if let Some(handle) = view.handle_dsv {
self.dsv_pool.lock().free_handle(handle);
}
}
unsafe fn create_sampler(&self, desc: &crate::SamplerDescriptor) -> DeviceResult<Resource> {
Ok(Resource)
}

View File

@@ -10,8 +10,10 @@
mod adapter;
mod command;
mod conv;
mod descriptor;
mod device;
use parking_lot::Mutex;
use std::{borrow::Cow, ptr, sync::Arc};
use winapi::{
shared::{dxgi, dxgi1_2, dxgi1_4, dxgi1_6, dxgitype, windef, winerror},
@@ -36,9 +38,9 @@ impl crate::Api for Api {
type CommandBuffer = Resource;
type Buffer = Buffer;
type Texture = Resource;
type SurfaceTexture = Resource;
type TextureView = Resource;
type Texture = Texture;
type SurfaceTexture = Texture;
type TextureView = TextureView;
type Sampler = Resource;
type QuerySet = Resource;
type Fence = Resource;
@@ -107,46 +109,6 @@ impl Drop for Instance {
unsafe impl Send for Instance {}
unsafe impl Sync for Instance {}
#[derive(Copy, Clone)]
struct DualHandle {
cpu: native::CpuDescriptor,
gpu: native::GpuDescriptor,
/// How large the block allocated to this handle is.
size: u64,
}
type DescriptorIndex = u64;
struct DescriptorHeap {
raw: native::DescriptorHeap,
handle_size: u64,
total_handles: u64,
start: DualHandle,
}
impl DescriptorHeap {
fn at(&self, index: DescriptorIndex, size: u64) -> DualHandle {
assert!(index < self.total_handles);
DualHandle {
cpu: self.cpu_descriptor_at(index),
gpu: self.gpu_descriptor_at(index),
size,
}
}
fn cpu_descriptor_at(&self, index: u64) -> native::CpuDescriptor {
native::CpuDescriptor {
ptr: self.start.cpu.ptr + (self.handle_size * index) as usize,
}
}
fn gpu_descriptor_at(&self, index: u64) -> native::GpuDescriptor {
native::GpuDescriptor {
ptr: self.start.gpu.ptr + self.handle_size * index,
}
}
}
struct SwapChain {
raw: native::WeakPtr<dxgi1_4::IDXGISwapChain3>,
// need to associate raw image pointers with the swapchain so they can be properly released
@@ -206,6 +168,10 @@ pub struct Device {
present_queue: native::CommandQueue,
idler: Idler,
private_caps: PrivateCapabilities,
// CPU only pools
rtv_pool: Mutex<descriptor::CpuPool>,
dsv_pool: Mutex<descriptor::CpuPool>,
srv_uav_pool: Mutex<descriptor::CpuPool>,
}
unsafe impl Send for Device {}
@@ -218,6 +184,8 @@ pub struct Queue {
unsafe impl Send for Queue {}
unsafe impl Sync for Queue {}
pub struct CommandEncoder {}
#[derive(Debug)]
pub struct Buffer {
resource: native::Resource,
@@ -226,7 +194,26 @@ pub struct Buffer {
unsafe impl Send for Buffer {}
unsafe impl Sync for Buffer {}
pub struct CommandEncoder {}
#[derive(Debug)]
pub struct Texture {
resource: native::Resource,
size: wgt::Extent3d,
sample_count: u32,
}
unsafe impl Send for Texture {}
unsafe impl Sync for Texture {}
#[derive(Debug)]
pub struct TextureView {
handle_srv: Option<descriptor::Handle>,
handle_rtv: Option<descriptor::Handle>,
handle_dsv: Option<descriptor::Handle>,
handle_uav: Option<descriptor::Handle>,
}
unsafe impl Send for TextureView {}
unsafe impl Sync for TextureView {}
impl crate::Instance<Api> for Instance {
unsafe fn init(desc: &crate::InstanceDescriptor) -> Result<Self, crate::InstanceError> {
@@ -530,7 +517,7 @@ impl crate::Surface<Api> for Surface {
) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError> {
Ok(None)
}
unsafe fn discard_texture(&mut self, texture: Resource) {}
unsafe fn discard_texture(&mut self, texture: Texture) {}
}
impl crate::Queue<Api> for Queue {
@@ -544,7 +531,7 @@ impl crate::Queue<Api> for Queue {
unsafe fn present(
&mut self,
surface: &mut Surface,
texture: Resource,
texture: Texture,
) -> Result<(), crate::SurfaceError> {
Ok(())
}