159: Add GPU timeout, fix active submission processing order r=kvark a=kvark

Fixes #156 

The timeouts are going to be configurable later down the road.

Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
This commit is contained in:
bors[bot]
2019-05-10 12:00:47 +00:00
3 changed files with 33 additions and 20 deletions

View File

@@ -63,6 +63,7 @@ use std::{
use std::sync::atomic::AtomicBool;
const CLEANUP_WAIT_MS: u64 = 5000;
pub const MAX_COLOR_TARGETS: usize = 4;
pub fn all_buffer_stages() -> hal::pso::PipelineStage {
@@ -148,6 +149,7 @@ struct PendingResources<B: hal::Backend> {
referenced: Vec<(ResourceId, RefCount)>,
/// Resources that are not referenced any more but still used by GPU.
/// Grouped by submissions associated with a fence and a submission index.
/// The active submissions have to be stored in FIFO order: oldest come first.
active: Vec<ActiveSubmission<B>>,
/// Resources that are neither referenced or used, just pending
/// actual deletion.
@@ -173,29 +175,37 @@ impl<B: hal::Backend> PendingResources<B> {
/// Returns the last submission index that is done.
fn cleanup(&mut self, device: &B::Device, force_wait: bool) -> SubmissionIndex {
let mut last_done = 0;
if force_wait {
unsafe {
let status = unsafe {
device.wait_for_fences(
self.active.iter().map(|a| &a.fence),
hal::device::WaitFor::All,
!0,
CLEANUP_WAIT_MS * 1_000_000,
)
}
.unwrap();
};
assert_eq!(status, Ok(true), "GPU got stuck :(");
}
for i in (0..self.active.len()).rev() {
if force_wait || unsafe { device.get_fence_status(&self.active[i].fence).unwrap() } {
let a = self.active.swap_remove(i);
trace!("Active submission {} is done", a.index);
last_done = last_done.max(a.index);
self.free.extend(a.resources.into_iter().map(|(_, r)| r));
self.ready_to_map.extend(a.mapped);
unsafe {
device.destroy_fence(a.fence);
}
//TODO: enable when `is_sorted_by_key` is stable
//debug_assert!(self.active.is_sorted_by_key(|a| a.index));
let done_count = self.active
.iter()
.position(|a| unsafe {
!device.get_fence_status(&a.fence).unwrap()
})
.unwrap_or(self.active.len());
let last_done = if done_count != 0 {
self.active[done_count - 1].index
} else {
0
};
for a in self.active.drain(..done_count) {
trace!("Active submission {} is done", a.index);
self.free.extend(a.resources.into_iter().map(|(_, r)| r));
self.ready_to_map.extend(a.mapped);
unsafe {
device.destroy_fence(a.fence);
}
}

View File

@@ -216,7 +216,7 @@ macro_rules! typed_id {
fn epoch(&self) -> Epoch {
(self.raw()).1
}
}
}
)
}

View File

@@ -22,6 +22,8 @@ use std::{
pub type SwapImageEpoch = u16;
const FRAME_TIMEOUT_MS: u64 = 1000;
pub(crate) struct SwapChainLink<E> {
pub swap_chain_id: SwapChainId, //TODO: strongly
pub epoch: E,
@@ -152,9 +154,10 @@ pub extern "C" fn wgpu_swap_chain_get_next_texture(swap_chain_id: SwapChainId) -
swap_chain.acquired.push(image_index);
let frame = &mut swap_chain.frames[image_index as usize];
unsafe {
device.raw.wait_for_fence(&frame.fence, !0).unwrap();
}
let status = unsafe {
device.raw.wait_for_fence(&frame.fence, FRAME_TIMEOUT_MS * 1_000_000)
};
assert_eq!(status, Ok(true), "GPU got stuck on a frame (image {}) :(", image_index);
mem::swap(&mut frame.sem_available, &mut swap_chain.sem_available);
frame.need_waiting.store(true, Ordering::Release);