149: Remove wgpu_buffer_set_sub_data function r=kvark a=Atul9

closes #145 

Co-authored-by: Atul Bhosale <atul1bhosale@gmail.com>
This commit is contained in:
bors[bot]
2019-05-03 13:50:39 +00:00
3 changed files with 5 additions and 86 deletions

View File

@@ -648,11 +648,6 @@ void wgpu_buffer_map_write_async(WGPUBufferId buffer_id,
WGPUBufferMapWriteCallback callback,
uint8_t *userdata);
void wgpu_buffer_set_sub_data(WGPUBufferId buffer_id,
uint32_t start,
uint32_t count,
const uint8_t *data);
void wgpu_buffer_unmap(WGPUBufferId buffer_id);
void wgpu_command_buffer_copy_buffer_to_buffer(WGPUCommandBufferId command_buffer_id,

View File

@@ -856,7 +856,6 @@ pub extern "C" fn wgpu_texture_create_default_view(texture_id: TextureId) -> Tex
id
}
#[no_mangle]
pub extern "C" fn wgpu_texture_destroy(texture_id: TextureId) {
let texture_guard = HUB.textures.read();
@@ -956,8 +955,8 @@ pub fn device_create_bind_group_layout(
dynamic_count: bindings
.iter()
.filter(|b| match b.ty {
binding_model::BindingType::UniformBufferDynamic |
binding_model::BindingType::StorageBufferDynamic => true,
binding_model::BindingType::UniformBufferDynamic
| binding_model::BindingType::StorageBufferDynamic => true,
_ => false,
})
.count(),
@@ -1051,8 +1050,9 @@ pub fn device_create_bind_group(
device.limits.min_storage_buffer_offset_alignment
}
binding_model::BindingType::Sampler
| binding_model::BindingType::SampledTexture =>
panic!("Mismatched buffer binding for {:?}", decl),
| binding_model::BindingType::SampledTexture => {
panic!("Mismatched buffer binding for {:?}", decl)
}
};
assert_eq!(
bb.offset as hal::buffer::Offset % alignment,
@@ -1816,78 +1816,6 @@ pub extern "C" fn wgpu_device_create_swap_chain(
surface_id
}
#[no_mangle]
pub extern "C" fn wgpu_buffer_set_sub_data(
buffer_id: BufferId,
start: u32,
count: u32,
data: *const u8,
) {
let buffer_guard = HUB.buffers.read();
let buffer = &buffer_guard[buffer_id];
let mut device_guard = HUB.devices.write();
let device = &mut device_guard[buffer.device_id.value];
//Note: this is just doing `update_buffer`, which is limited to 64KB
trace!("transit {:?} to transfer dst", buffer_id);
let barrier = device
.trackers
.lock()
.buffers
.transit(
buffer_id,
&buffer.life_guard.ref_count,
resource::BufferUsageFlags::TRANSFER_DST,
TrackPermit::REPLACE,
)
.unwrap()
.into_source()
.map(|old| hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(old)..hal::buffer::State::TRANSFER_WRITE,
target: &buffer.raw,
families: None,
range: None..None, //TODO: could be partial
});
// Note: this is not pretty. If we need one-time service command buffers,
// we'll need to have some internal abstractions for them to be safe.
let mut comb = device
.com_allocator
.allocate(buffer.device_id.clone(), &device.raw);
// mark as used by the next submission, conservatively
let submit_index = 1 + device.life_guard.submission_index.load(Ordering::Acquire);
unsafe {
let raw = comb.raw.last_mut().unwrap();
raw.begin(
hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,
hal::command::CommandBufferInheritanceInfo::default(),
);
raw.pipeline_barrier(
all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(),
barrier,
);
raw.update_buffer(
&buffer.raw,
start as hal::buffer::Offset,
slice::from_raw_parts(data, count as usize),
);
raw.finish();
let submission = hal::queue::Submission {
command_buffers: iter::once(&*raw),
wait_semaphores: None,
signal_semaphores: None,
};
device.queue_group.queues[0]
.as_raw_mut()
.submit::<_, _, <back::Backend as hal::Backend>::Semaphore, _, _>(submission, None);
}
device.com_allocator.after_submit(comb, submit_index);
}
#[no_mangle]
pub extern "C" fn wgpu_device_poll(device_id: DeviceId, force_wait: bool) {
HUB.devices.read()[device_id].maintain(force_wait);

View File

@@ -574,10 +574,6 @@ where
}
impl Buffer {
pub fn set_sub_data(&self, offset: u32, data: &[u8]) {
wgn::wgpu_buffer_set_sub_data(self.id, offset, data.len() as u32, data.as_ptr());
}
pub fn map_read_async<T, F>(&self, start: u32, size: u32, callback: F)
where
T: 'static + Copy,