Reformat comments in wgpu-core. (#3102)

This commit is contained in:
Jim Blandy
2022-10-13 11:34:44 -07:00
committed by GitHub
parent fa4d8401e8
commit 215884184b
22 changed files with 525 additions and 289 deletions

View File

@@ -309,8 +309,9 @@ struct PushConstantChange {
enable: bool,
}
/// Break up possibly overlapping push constant ranges into a set of non-overlapping ranges
/// which contain all the stage flags of the original ranges. This allows us to zero out (or write any value)
/// Break up possibly overlapping push constant ranges into a set of
/// non-overlapping ranges which contain all the stage flags of the
/// original ranges. This allows us to zero out (or write any value)
/// to every possible value.
pub fn compute_nonoverlapping_ranges(
ranges: &[wgt::PushConstantRange],

View File

@@ -110,18 +110,28 @@ use hal::CommandEncoder as _;
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct RenderBundleEncoderDescriptor<'a> {
/// Debug label of the render bundle encoder. This will show up in graphics debuggers for easy identification.
/// Debug label of the render bundle encoder.
///
/// This will show up in graphics debuggers for easy identification.
pub label: Label<'a>,
/// The formats of the color attachments that this render bundle is capable to rendering to. This
/// must match the formats of the color attachments in the renderpass this render bundle is executed in.
/// The formats of the color attachments that this render bundle is capable
/// to rendering to.
///
/// This must match the formats of the color attachments in the
/// renderpass this render bundle is executed in.
pub color_formats: Cow<'a, [Option<wgt::TextureFormat>]>,
/// Information about the depth attachment that this render bundle is capable to rendering to. The format
/// must match the format of the depth attachments in the renderpass this render bundle is executed in.
/// Information about the depth attachment that this render bundle is
/// capable to rendering to.
///
/// The format must match the format of the depth attachments in the
/// renderpass this render bundle is executed in.
pub depth_stencil: Option<wgt::RenderBundleDepthStencil>,
/// Sample count this render bundle is capable of rendering to. This must match the pipelines and
/// the renderpasses it is used in.
/// Sample count this render bundle is capable of rendering to.
///
/// This must match the pipelines and the renderpasses it is used in.
pub sample_count: u32,
/// If this render bundle will rendering to multiple array layers in the attachments at the same time.
/// If this render bundle will rendering to multiple array layers in the
/// attachments at the same time.
pub multiview: Option<NonZeroU32>,
}

View File

@@ -266,12 +266,19 @@ pub(crate) fn clear_texture<A: HalApi>(
layers: range.layer_range.clone(),
};
// If we're in a texture-init usecase, we know that the texture is already tracked since whatever caused the init requirement,
// will have caused the usage tracker to be aware of the texture. Meaning, that it is safe to call call change_replace_tracked if the life_guard is already gone
// (i.e. the user no longer holds on to this texture).
// On the other hand, when coming via command_encoder_clear_texture, the life_guard is still there since in order to call it a texture object is needed.
// If we're in a texture-init usecase, we know that the texture is already
// tracked since whatever caused the init requirement, will have caused the
// usage tracker to be aware of the texture. Meaning, that it is safe to
// call call change_replace_tracked if the life_guard is already gone (i.e.
// the user no longer holds on to this texture).
//
// We could in theory distinguish these two scenarios in the internal clear_texture api in order to remove this check and call the cheaper change_replace_tracked whenever possible.
// On the other hand, when coming via command_encoder_clear_texture, the
// life_guard is still there since in order to call it a texture object is
// needed.
//
// We could in theory distinguish these two scenarios in the internal
// clear_texture api in order to remove this check and call the cheaper
// change_replace_tracked whenever possible.
let dst_barrier = texture_tracker
.set_single(storage, dst_texture_id.0, selector, clear_usage)
.unwrap()
@@ -332,8 +339,13 @@ fn clear_texture_via_buffer_copies<A: hal::Api>(
// round down to a multiple of rows needed by the texture format
let max_rows_per_copy = max_rows_per_copy / format_desc.block_dimensions.1 as u32
* format_desc.block_dimensions.1 as u32;
assert!(max_rows_per_copy > 0, "Zero buffer size is too small to fill a single row of a texture with format {:?} and desc {:?}",
texture_desc.format, texture_desc.size);
assert!(
max_rows_per_copy > 0,
"Zero buffer size is too small to fill a single row \
of a texture with format {:?} and desc {:?}",
texture_desc.format,
texture_desc.size
);
let z_range = 0..(if texture_desc.dimension == wgt::TextureDimension::D3 {
mip_size.depth_or_array_layers
@@ -344,7 +356,8 @@ fn clear_texture_via_buffer_copies<A: hal::Api>(
for array_layer in range.layer_range.clone() {
// TODO: Only doing one layer at a time for volume textures right now.
for z in z_range.clone() {
// May need multiple copies for each subresource! However, we assume that we never need to split a row.
// May need multiple copies for each subresource! However, we
// assume that we never need to split a row.
let mut num_rows_left = mip_size.height;
while num_rows_left > 0 {
let num_rows = num_rows_left.min(max_rows_per_copy);
@@ -400,7 +413,8 @@ fn clear_texture_via_render_passes<A: hal::Api>(
for mip_level in range.mip_range {
let extent = extent_base.mip_level_size(mip_level, is_3d_texture);
let layer_or_depth_range = if dst_texture.desc.dimension == wgt::TextureDimension::D3 {
// TODO: We assume that we're allowed to do clear operations on volume texture slices, this is not properly specified.
// TODO: We assume that we're allowed to do clear operations on
// volume texture slices, this is not properly specified.
0..extent.depth_or_array_layers
} else {
range.layer_range.clone()

View File

@@ -268,7 +268,8 @@ impl<A: HalApi> State<A> {
Ok(())
}
// `extra_buffer` is there to represent the indirect buffer that is also part of the usage scope.
// `extra_buffer` is there to represent the indirect buffer that is also
// part of the usage scope.
fn flush_states(
&mut self,
raw_encoder: &mut A::CommandEncoder,
@@ -391,7 +392,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
raw.begin_compute_pass(&hal_desc);
}
// Immediate texture inits required because of prior discards. Need to be inserted before texture reads.
// Immediate texture inits required because of prior discards. Need to
// be inserted before texture reads.
let mut pending_discard_init_fixups = SurfacesInDiscardState::new();
for command in base.commands {
@@ -763,8 +765,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
cmd_buf.status = CommandEncoderStatus::Recording;
// There can be entries left in pending_discard_init_fixups if a bind group was set, but not used (i.e. no Dispatch occurred)
// However, we already altered the discard/init_action state on this cmd_buf, so we need to apply the promised changes.
// There can be entries left in pending_discard_init_fixups if a bind
// group was set, but not used (i.e. no Dispatch occurred)
//
// However, we already altered the discard/init_action state on this
// cmd_buf, so we need to apply the promised changes.
fixup_discarded_surfaces(
pending_discard_init_fixups.into_iter(),
raw,

View File

@@ -27,10 +27,12 @@ pub(crate) type SurfacesInDiscardState = Vec<TextureSurfaceDiscard>;
#[derive(Default)]
pub(crate) struct CommandBufferTextureMemoryActions {
// init actions describe the tracker actions that we need to be executed before the command buffer is executed
/// The tracker actions that we need to be executed before the command
/// buffer is executed.
init_actions: Vec<TextureInitTrackerAction>,
// discards describe all the discards that haven't been followed by init again within the command buffer
// i.e. everything in this list resets the texture init state *after* the command buffer execution
/// All the discards that haven't been followed by init again within the
/// command buffer i.e. everything in this list resets the texture init
/// state *after* the command buffer execution
discards: Vec<TextureSurfaceDiscard>,
}
@@ -54,19 +56,22 @@ impl CommandBufferTextureMemoryActions {
) -> SurfacesInDiscardState {
let mut immediately_necessary_clears = SurfacesInDiscardState::new();
// Note that within a command buffer we may stack arbitrary memory init actions on the same texture
// Since we react to them in sequence, they are going to be dropped again at queue submit
// Note that within a command buffer we may stack arbitrary memory init
// actions on the same texture Since we react to them in sequence, they
// are going to be dropped again at queue submit
//
// We don't need to add MemoryInitKind::NeedsInitializedMemory to init_actions if a surface is part of the discard list.
// But that would mean splitting up the action which is more than we'd win here.
// We don't need to add MemoryInitKind::NeedsInitializedMemory to
// init_actions if a surface is part of the discard list. But that would
// mean splitting up the action which is more than we'd win here.
self.init_actions
.extend(match texture_guard.get(action.id) {
Ok(texture) => texture.initialization_status.check_action(action),
Err(_) => return immediately_necessary_clears, // texture no longer exists
});
// We expect very few discarded surfaces at any point in time which is why a simple linear search is likely best.
// (i.e. most of the time self.discards is empty!)
// We expect very few discarded surfaces at any point in time which is
// why a simple linear search is likely best. (i.e. most of the time
// self.discards is empty!)
let init_actions = &mut self.init_actions;
self.discards.retain(|discarded_surface| {
if discarded_surface.texture == action.id
@@ -79,7 +84,9 @@ impl CommandBufferTextureMemoryActions {
if let MemoryInitKind::NeedsInitializedMemory = action.kind {
immediately_necessary_clears.push(discarded_surface.clone());
// Mark surface as implicitly initialized (this is relevant because it might have been uninitialized prior to discarding
// Mark surface as implicitly initialized (this is relevant
// because it might have been uninitialized prior to
// discarding
init_actions.push(TextureInitTrackerAction {
id: discarded_surface.texture,
range: TextureInitRange {
@@ -99,7 +106,8 @@ impl CommandBufferTextureMemoryActions {
immediately_necessary_clears
}
// Shortcut for register_init_action when it is known that the action is an implicit init, not requiring any immediate resource init.
// Shortcut for register_init_action when it is known that the action is an
// implicit init, not requiring any immediate resource init.
pub(crate) fn register_implicit_init<A: hal::Api>(
&mut self,
id: id::Valid<TextureId>,
@@ -118,7 +126,9 @@ impl CommandBufferTextureMemoryActions {
}
}
// Utility function that takes discarded surfaces from (several calls to) register_init_action and initializes them on the spot.
// Utility function that takes discarded surfaces from (several calls to)
// register_init_action and initializes them on the spot.
//
// Takes care of barriers as well!
pub(crate) fn fixup_discarded_surfaces<
A: HalApi,
@@ -148,14 +158,16 @@ pub(crate) fn fixup_discarded_surfaces<
}
impl<A: HalApi> BakedCommands<A> {
// inserts all buffer initializations that are going to be needed for executing the commands and updates resource init states accordingly
// inserts all buffer initializations that are going to be needed for
// executing the commands and updates resource init states accordingly
pub(crate) fn initialize_buffer_memory(
&mut self,
device_tracker: &mut Tracker<A>,
buffer_guard: &mut Storage<Buffer<A>, id::BufferId>,
) -> Result<(), DestroyedBufferError> {
// Gather init ranges for each buffer so we can collapse them.
// It is not possible to do this at an earlier point since previously executed command buffer change the resource init state.
// It is not possible to do this at an earlier point since previously
// executed command buffer change the resource init state.
let mut uninitialized_ranges_per_buffer = FastHashMap::default();
for buffer_use in self.buffer_memory_init_actions.drain(..) {
let buffer = buffer_guard
@@ -194,15 +206,19 @@ impl<A: HalApi> BakedCommands<A> {
// Collapse touching ranges.
ranges.sort_by_key(|r| r.start);
for i in (1..ranges.len()).rev() {
assert!(ranges[i - 1].end <= ranges[i].start); // The memory init tracker made sure of this!
// The memory init tracker made sure of this!
assert!(ranges[i - 1].end <= ranges[i].start);
if ranges[i].start == ranges[i - 1].end {
ranges[i - 1].end = ranges[i].end;
ranges.swap_remove(i); // Ordering not important at this point
}
}
// Don't do use_replace since the buffer may already no longer have a ref_count.
// However, we *know* that it is currently in use, so the tracker must already know about it.
// Don't do use_replace since the buffer may already no longer have
// a ref_count.
//
// However, we *know* that it is currently in use, so the tracker
// must already know about it.
let transition = device_tracker
.buffers
.set_single(buffer_guard, buffer_id, hal::BufferUses::COPY_DST)
@@ -223,8 +239,20 @@ impl<A: HalApi> BakedCommands<A> {
}
for range in ranges.iter() {
assert!(range.start % wgt::COPY_BUFFER_ALIGNMENT == 0, "Buffer {:?} has an uninitialized range with a start not aligned to 4 (start was {})", raw_buf, range.start);
assert!(range.end % wgt::COPY_BUFFER_ALIGNMENT == 0, "Buffer {:?} has an uninitialized range with an end not aligned to 4 (end was {})", raw_buf, range.end);
assert!(
range.start % wgt::COPY_BUFFER_ALIGNMENT == 0,
"Buffer {:?} has an uninitialized range with a start \
not aligned to 4 (start was {})",
raw_buf,
range.start
);
assert!(
range.end % wgt::COPY_BUFFER_ALIGNMENT == 0,
"Buffer {:?} has an uninitialized range with an end \
not aligned to 4 (end was {})",
raw_buf,
range.end
);
unsafe {
self.encoder.clear_buffer(raw_buf, range.clone());
@@ -234,8 +262,10 @@ impl<A: HalApi> BakedCommands<A> {
Ok(())
}
// inserts all texture initializations that are going to be needed for executing the commands and updates resource init states accordingly
// any textures that are left discarded by this command buffer will be marked as uninitialized
// inserts all texture initializations that are going to be needed for
// executing the commands and updates resource init states accordingly any
// textures that are left discarded by this command buffer will be marked as
// uninitialized
pub(crate) fn initialize_texture_memory(
&mut self,
device_tracker: &mut Tracker<A>,
@@ -290,7 +320,9 @@ impl<A: HalApi> BakedCommands<A> {
}
}
// Now that all buffers/textures have the proper init state for before cmdbuf start, we discard init states for textures it left discarded after its execution.
// Now that all buffers/textures have the proper init state for before
// cmdbuf start, we discard init states for textures it left discarded
// after its execution.
for surface_discard in self.texture_memory_actions.discards.iter() {
let texture = texture_guard
.get_mut(surface_discard.texture)

View File

@@ -511,7 +511,8 @@ impl BindGroupStateChange {
) -> bool {
// For now never deduplicate bind groups with dynamic offsets.
if offset_length == 0 {
// If this get returns None, that means we're well over the limit, so let the call through to get a proper error
// If this get returns None, that means we're well over the limit,
// so let the call through to get a proper error
if let Some(current_bind_group) = self.last_states.get_mut(index as usize) {
// Bail out if we're binding the same bind group.
if current_bind_group.set_and_check_redundant(bind_group_id) {

View File

@@ -180,7 +180,8 @@ impl<A: HalApi> QuerySet<A> {
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
) -> Result<&A::QuerySet, QueryUseError> {
// We need to defer our resets because we are in a renderpass, add the usage to the reset map.
// We need to defer our resets because we are in a renderpass,
// add the usage to the reset map.
if let Some(reset) = reset_state {
let used = reset.use_query_set(query_set_id, self, query_index);
if used {

View File

@@ -63,7 +63,9 @@ pub enum LoadOp {
#[cfg_attr(any(feature = "serial-pass", feature = "replay"), derive(Deserialize))]
#[cfg_attr(feature = "serde", serde(rename_all = "kebab-case"))]
pub enum StoreOp {
/// Discards the content of the render target. If you don't care about the contents of the target, this can be faster.
/// Discards the content of the render target.
///
/// If you don't care about the contents of the target, this can be faster.
Discard = 0,
/// Store the result of the renderpass.
Store = 1,
@@ -75,15 +77,20 @@ pub enum StoreOp {
#[cfg_attr(any(feature = "serial-pass", feature = "trace"), derive(Serialize))]
#[cfg_attr(any(feature = "serial-pass", feature = "replay"), derive(Deserialize))]
pub struct PassChannel<V> {
/// Operation to perform to the output attachment at the start of a renderpass. This must be clear if it
/// is the first renderpass rendering to a swap chain image.
/// Operation to perform to the output attachment at the start of a
/// renderpass.
///
/// This must be clear if it is the first renderpass rendering to a swap
/// chain image.
pub load_op: LoadOp,
/// Operation to perform to the output attachment at the end of a renderpass.
pub store_op: StoreOp,
/// If load_op is [`LoadOp::Clear`], the attachment will be cleared to this color.
/// If load_op is [`LoadOp::Clear`], the attachment will be cleared to this
/// color.
pub clear_value: V,
/// If true, the relevant channel is not changed by a renderpass, and the corresponding attachment
/// can be used inside the pass by other read-only usages.
/// If true, the relevant channel is not changed by a renderpass, and the
/// corresponding attachment can be used inside the pass by other read-only
/// usages.
pub read_only: bool,
}
@@ -596,7 +603,8 @@ type AttachmentDataVec<T> = ArrayVec<T, MAX_TOTAL_ATTACHMENTS>;
struct RenderPassInfo<'a, A: HalApi> {
context: RenderPassContext,
usage_scope: UsageScope<A>,
render_attachments: AttachmentDataVec<RenderAttachment<'a>>, // All render attachments, including depth/stencil
/// All render attachments, including depth/stencil
render_attachments: AttachmentDataVec<RenderAttachment<'a>>,
is_depth_read_only: bool,
is_stencil_read_only: bool,
extent: wgt::Extent3d,
@@ -634,8 +642,9 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
);
}
if channel.store_op == StoreOp::Discard {
// the discard happens at the *end* of a pass
// but recording the discard right away be alright since the texture can't be used during the pass anyways
// the discard happens at the *end* of a pass, but recording the
// discard right away be alright since the texture can't be used
// during the pass anyways
texture_memory_actions.discard(TextureSurfaceDiscard {
texture: view.parent_id.value.0,
mip_level: view.selector.mips.start,
@@ -770,15 +779,27 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
&mut pending_discard_init_fixups,
);
} else {
// This is the only place (anywhere in wgpu) where Stencil & Depth init state can diverge.
// To safe us the overhead of tracking init state of texture aspects everywhere,
// we're going to cheat a little bit in order to keep the init state of both Stencil and Depth aspects in sync.
// The expectation is that we hit this path extremely rarely!
// This is the only place (anywhere in wgpu) where Stencil &
// Depth init state can diverge.
//
// To safe us the overhead of tracking init state of texture
// aspects everywhere, we're going to cheat a little bit in
// order to keep the init state of both Stencil and Depth
// aspects in sync. The expectation is that we hit this path
// extremely rarely!
//
// Diverging LoadOp, i.e. Load + Clear:
// Record MemoryInitKind::NeedsInitializedMemory for the entire surface, a bit wasteful on unit but no negative effect!
// Rationale: If the loaded channel is uninitialized it needs clearing, the cleared channel doesn't care. (If everything is already initialized nothing special happens)
// (possible minor optimization: Clear caused by NeedsInitializedMemory should know that it doesn't need to clear the aspect that was set to C)
//
// Record MemoryInitKind::NeedsInitializedMemory for the entire
// surface, a bit wasteful on unit but no negative effect!
//
// Rationale: If the loaded channel is uninitialized it needs
// clearing, the cleared channel doesn't care. (If everything is
// already initialized nothing special happens)
//
// (possible minor optimization: Clear caused by
// NeedsInitializedMemory should know that it doesn't need to
// clear the aspect that was set to C)
let need_init_beforehand =
at.depth.load_op == LoadOp::Load || at.stencil.load_op == LoadOp::Load;
if need_init_beforehand {
@@ -795,8 +816,12 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
}
// Diverging Store, i.e. Discard + Store:
// Immediately zero out channel that is set to discard after we're done with the render pass.
// This allows us to set the entire surface to MemoryInitKind::ImplicitlyInitialized (if it isn't already set to NeedsInitializedMemory).
//
// Immediately zero out channel that is set to discard after
// we're done with the render pass. This allows us to set the
// entire surface to MemoryInitKind::ImplicitlyInitialized (if
// it isn't already set to NeedsInitializedMemory).
//
// (possible optimization: Delay and potentially drop this zeroing)
if at.depth.store_op != at.stencil.store_op {
if !need_init_beforehand {
@@ -1026,10 +1051,15 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
};
}
// If either only stencil or depth was discarded, we put in a special clear pass to keep the init status of the aspects in sync.
// We do this so we don't need to track init state for depth/stencil aspects individually.
// Note that we don't go the usual route of "brute force" initializing the texture when need arises here,
// since this path is actually something a user may genuinely want (where as the other cases are more seen along the lines as gracefully handling a user error).
// If either only stencil or depth was discarded, we put in a special
// clear pass to keep the init status of the aspects in sync. We do this
// so we don't need to track init state for depth/stencil aspects
// individually.
//
// Note that we don't go the usual route of "brute force" initializing
// the texture when need arises here, since this path is actually
// something a user may genuinely want (where as the other cases are
// more seen along the lines as gracefully handling a user error).
if let Some((aspect, view)) = self.divergent_discarded_depth_stencil_aspect {
let (depth_ops, stencil_ops) = if aspect == wgt::TextureAspect::DepthOnly {
(
@@ -1631,7 +1661,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
};
state.is_ready(indexed).map_pass_err(scope)?;
//TODO: validate that base_vertex + max_index() is within the provided range
//TODO: validate that base_vertex + max_index() is
// within the provided range
let last_index = first_index + index_count;
let index_limit = state.index.limit;
if last_index > index_limit {

View File

@@ -201,8 +201,15 @@ pub(crate) fn extract_texture_selector<A: hal::Api>(
Ok((selector, base, format))
}
/// Function copied with some modifications from webgpu standard <https://gpuweb.github.io/gpuweb/#copy-between-buffer-texture>
/// If successful, returns (number of buffer bytes required for this copy, number of bytes between array layers).
/// WebGPU's [validating linear texture data][vltd] algorithm.
///
/// Copied with some modifications from WebGPU standard.
///
/// If successful, returns a pair `(bytes, stride)`, where:
/// - `bytes` is the number of buffer bytes required for this copy, and
/// - `stride` number of bytes between array layers.
///
/// [vltd]: https://gpuweb.github.io/gpuweb/#abstract-opdef-validating-linear-texture-data
pub(crate) fn validate_linear_texture_data(
layout: &wgt::ImageDataLayout,
format: wgt::TextureFormat,
@@ -292,8 +299,13 @@ pub(crate) fn validate_linear_texture_data(
Ok((required_bytes_in_copy, bytes_per_image))
}
/// Function copied with minor modifications from webgpu standard <https://gpuweb.github.io/gpuweb/#valid-texture-copy-range>
/// WebGPU's [validating texture copy range][vtcr] algorithm.
///
/// Copied with minor modifications from WebGPU standard.
///
/// Returns the HAL copy extent and the layer count.
///
/// [vtcr]: https://gpuweb.github.io/gpuweb/#valid-texture-copy-range
pub(crate) fn validate_texture_copy_range(
texture_copy_view: &ImageCopyTexture,
desc: &wgt::TextureDescriptor<()>,
@@ -445,7 +457,10 @@ fn handle_texture_init<A: HalApi>(
}
}
// Ensures the source texture of a transfer is in the right initialization state and records the state for after the transfer operation.
/// Prepare a transfer's source texture.
///
/// Ensure the source texture of a transfer is in the right initialization
/// state, and record the state for after the transfer operation.
fn handle_src_texture_init<A: HalApi>(
cmd_buf: &mut CommandBuffer<A>,
device: &Device<A>,
@@ -468,7 +483,10 @@ fn handle_src_texture_init<A: HalApi>(
Ok(())
}
// Ensures the destination texture of a transfer is in the right initialization state and records the state for after the transfer operation.
/// Prepare a transfer's destination texture.
///
/// Ensure the destination texture of a transfer is in the right initialization
/// state, and record the state for after the transfer operation.
fn handle_dst_texture_init<A: HalApi>(
cmd_buf: &mut CommandBuffer<A>,
device: &Device<A>,
@@ -480,8 +498,10 @@ fn handle_dst_texture_init<A: HalApi>(
.get(destination.texture)
.map_err(|_| TransferError::InvalidTexture(destination.texture))?;
// Attention: If we don't write full texture subresources, we need to a full clear first since we don't track subrects.
// This means that in rare cases even a *destination* texture of a transfer may need an immediate texture init.
// Attention: If we don't write full texture subresources, we need to a full
// clear first since we don't track subrects. This means that in rare cases
// even a *destination* texture of a transfer may need an immediate texture
// init.
let dst_init_kind = if has_copy_partial_init_tracker_coverage(
copy_size,
destination.mip_level,
@@ -667,7 +687,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (dst_range, dst_base, _) =
extract_texture_selector(destination, copy_size, &*texture_guard)?;
// Handle texture init *before* dealing with barrier transitions so we have an easier time inserting "immediate-inits" that may be required by prior discards in rare cases.
// Handle texture init *before* dealing with barrier transitions so we
// have an easier time inserting "immediate-inits" that may be required
// by prior discards in rare cases.
handle_dst_texture_init(cmd_buf, device, destination, copy_size, &texture_guard)?;
let (src_buffer, src_pending) = cmd_buf
@@ -794,7 +816,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (src_range, src_base, _) =
extract_texture_selector(source, copy_size, &*texture_guard)?;
// Handle texture init *before* dealing with barrier transitions so we have an easier time inserting "immediate-inits" that may be required by prior discards in rare cases.
// Handle texture init *before* dealing with barrier transitions so we
// have an easier time inserting "immediate-inits" that may be required
// by prior discards in rare cases.
handle_src_texture_init(cmd_buf, device, source, copy_size, &texture_guard)?;
let (src_texture, src_pending) = cmd_buf
@@ -956,7 +980,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Err(TransferError::MismatchedAspects.into());
}
// Handle texture init *before* dealing with barrier transitions so we have an easier time inserting "immediate-inits" that may be required by prior discards in rare cases.
// Handle texture init *before* dealing with barrier transitions so we
// have an easier time inserting "immediate-inits" that may be required
// by prior discards in rare cases.
handle_src_texture_init(cmd_buf, device, source, copy_size, &texture_guard)?;
handle_dst_texture_init(cmd_buf, device, destination, copy_size, &texture_guard)?;