[naga hlsl-out] Avoid undefined behaviour for integer division, modulo, negation, and abs

Emit helper functions for MathFunction::Abs and UnaryOperator::Negate
with a signed integer scalar or vector operand. And for
BinaryOperator::Divide and BinaryOperator::Modulo with signed or
unsigned integer scalar or vector operands.

Abs and Negate are written to avoid signed integer overflow when the
operand equals INT_MIN. This is achieved by bitcasting the value to
unsigned, using the negation operator, then bitcasting the result back
to signed. As HLSL's bitcast functions asint() and asuint() only work
for 32-bit types, we only use this workaround in such cases.

Division and Modulo avoid undefined behaviour for INT_MIN / -1 and
divide-by-zero by using 1 for the divisor instead. Additionally we
avoid undefined behaviour when using the modulo operator on operands
of mixed signedness by using the equation from the WGSL spec, using
division, subtraction and multiplication, rather than HLSL's modulus
operator.
This commit is contained in:
Jamie Nicol
2025-01-24 12:49:13 +00:00
committed by Jim Blandy
parent 88f6e9b8f8
commit b6186ba332
8 changed files with 384 additions and 48 deletions

View File

@@ -28,7 +28,10 @@ int dim_1d = NagaDimensions1D(image_1d);
use super::{
super::FunctionCtx,
writer::{EXTRACT_BITS_FUNCTION, INSERT_BITS_FUNCTION},
writer::{
ABS_FUNCTION, DIV_FUNCTION, EXTRACT_BITS_FUNCTION, INSERT_BITS_FUNCTION, MOD_FUNCTION,
NEG_FUNCTION,
},
BackendResult,
};
use crate::{arena::Handle, proc::NameKey, ScalarKind};
@@ -75,6 +78,23 @@ pub(super) struct WrappedZeroValue {
pub(super) ty: Handle<crate::Type>,
}
#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
pub(super) struct WrappedUnaryOp {
pub(super) op: crate::UnaryOperator,
// This can only represent scalar or vector types. If we ever need to wrap
// unary ops with other types, we'll need a better representation.
pub(super) ty: (Option<crate::VectorSize>, crate::Scalar),
}
#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
pub(super) struct WrappedBinaryOp {
pub(super) op: crate::BinaryOperator,
// This can only represent scalar or vector types. If we ever need to wrap
// binary ops with other types, we'll need a better representation.
pub(super) left_ty: (Option<crate::VectorSize>, crate::Scalar),
pub(super) right_ty: (Option<crate::VectorSize>, crate::Scalar),
}
/// HLSL backend requires its own `ImageQuery` enum.
///
/// It is used inside `WrappedImageQuery` and should be unique per ImageQuery function.
@@ -1105,6 +1125,202 @@ impl<W: Write> super::Writer<'_, W> {
// End of function body
writeln!(self.out, "}}")?;
}
crate::MathFunction::Abs
if matches!(
func_ctx.resolve_type(arg, &module.types).scalar(),
Some(crate::Scalar {
kind: ScalarKind::Sint,
width: 4,
})
) =>
{
let arg_ty = func_ctx.resolve_type(arg, &module.types);
let scalar = arg_ty.scalar().unwrap();
let components = arg_ty.components();
let wrapped = WrappedMath {
fun,
scalar,
components,
};
if !self.wrapped.math.insert(wrapped) {
continue;
}
self.write_value_type(module, arg_ty)?;
write!(self.out, " {ABS_FUNCTION}(")?;
self.write_value_type(module, arg_ty)?;
writeln!(self.out, " val) {{")?;
let level = crate::back::Level(1);
writeln!(
self.out,
"{level}return val >= 0 ? val : asint(-asuint(val));"
)?;
writeln!(self.out, "}}")?;
writeln!(self.out)?;
}
_ => {}
}
}
}
Ok(())
}
pub(super) fn write_wrapped_unary_ops(
&mut self,
module: &crate::Module,
func_ctx: &FunctionCtx,
) -> BackendResult {
for (_, expression) in func_ctx.expressions.iter() {
if let crate::Expression::Unary { op, expr } = *expression {
let expr_ty = func_ctx.resolve_type(expr, &module.types);
let Some((vector_size, scalar)) = expr_ty.vector_size_and_scalar() else {
continue;
};
let wrapped = WrappedUnaryOp {
op,
ty: (vector_size, scalar),
};
match (op, scalar) {
(
crate::UnaryOperator::Negate,
crate::Scalar {
kind: ScalarKind::Sint,
width: 4,
},
) => {
if !self.wrapped.unary_op.insert(wrapped) {
continue;
}
self.write_value_type(module, expr_ty)?;
write!(self.out, " {NEG_FUNCTION}(")?;
self.write_value_type(module, expr_ty)?;
writeln!(self.out, " val) {{")?;
let level = crate::back::Level(1);
writeln!(self.out, "{level}return asint(-asuint(val));",)?;
writeln!(self.out, "}}")?;
writeln!(self.out)?;
}
_ => {}
}
}
}
Ok(())
}
pub(super) fn write_wrapped_binary_ops(
&mut self,
module: &crate::Module,
func_ctx: &FunctionCtx,
) -> BackendResult {
for (expr_handle, expression) in func_ctx.expressions.iter() {
if let crate::Expression::Binary { op, left, right } = *expression {
let expr_ty = func_ctx.resolve_type(expr_handle, &module.types);
let left_ty = func_ctx.resolve_type(left, &module.types);
let right_ty = func_ctx.resolve_type(right, &module.types);
match (op, expr_ty.scalar()) {
(
crate::BinaryOperator::Divide,
Some(
scalar @ crate::Scalar {
kind: ScalarKind::Sint | ScalarKind::Uint,
..
},
),
) => {
let Some(left_wrapped_ty) = left_ty.vector_size_and_scalar() else {
continue;
};
let Some(right_wrapped_ty) = right_ty.vector_size_and_scalar() else {
continue;
};
let wrapped = WrappedBinaryOp {
op,
left_ty: left_wrapped_ty,
right_ty: right_wrapped_ty,
};
if !self.wrapped.binary_op.insert(wrapped) {
continue;
}
self.write_value_type(module, expr_ty)?;
write!(self.out, " {DIV_FUNCTION}(")?;
self.write_value_type(module, left_ty)?;
write!(self.out, " lhs, ")?;
self.write_value_type(module, right_ty)?;
writeln!(self.out, " rhs) {{")?;
let level = crate::back::Level(1);
match scalar.kind {
ScalarKind::Sint => {
let min = -1i64 << (scalar.width as u32 * 8 - 1);
writeln!(self.out, "{level}return lhs / (((lhs == {min} & rhs == -1) | (rhs == 0)) ? 1 : rhs);")?
}
ScalarKind::Uint => {
writeln!(self.out, "{level}return lhs / (rhs == 0u ? 1u : rhs);")?
}
_ => unreachable!(),
}
writeln!(self.out, "}}")?;
writeln!(self.out)?;
}
(
crate::BinaryOperator::Modulo,
Some(
scalar @ crate::Scalar {
kind: ScalarKind::Sint | ScalarKind::Uint,
..
},
),
) => {
let Some(left_wrapped_ty) = left_ty.vector_size_and_scalar() else {
continue;
};
let Some(right_wrapped_ty) = right_ty.vector_size_and_scalar() else {
continue;
};
let wrapped = WrappedBinaryOp {
op,
left_ty: left_wrapped_ty,
right_ty: right_wrapped_ty,
};
if !self.wrapped.binary_op.insert(wrapped) {
continue;
}
self.write_value_type(module, expr_ty)?;
write!(self.out, " {MOD_FUNCTION}(")?;
self.write_value_type(module, left_ty)?;
write!(self.out, " lhs, ")?;
self.write_value_type(module, right_ty)?;
writeln!(self.out, " rhs) {{")?;
let level = crate::back::Level(1);
match scalar.kind {
ScalarKind::Sint => {
let min = -1i64 << (scalar.width as u32 * 8 - 1);
write!(self.out, "{level}")?;
self.write_value_type(module, right_ty)?;
writeln!(self.out, " divisor = ((lhs == {min} & rhs == -1) | (rhs == 0)) ? 1 : rhs;")?;
writeln!(
self.out,
"{level}return lhs - (lhs / divisor) * divisor;"
)?
}
ScalarKind::Uint => {
writeln!(self.out, "{level}return lhs % (rhs == 0u ? 1u : rhs);")?
}
_ => unreachable!(),
}
writeln!(self.out, "}}")?;
writeln!(self.out)?;
}
_ => {}
}
}
@@ -1120,6 +1336,8 @@ impl<W: Write> super::Writer<'_, W> {
func_ctx: &FunctionCtx,
) -> BackendResult {
self.write_wrapped_math_functions(module, func_ctx)?;
self.write_wrapped_unary_ops(module, func_ctx)?;
self.write_wrapped_binary_ops(module, func_ctx)?;
self.write_wrapped_expression_functions(module, func_ctx.expressions, Some(func_ctx))?;
self.write_wrapped_zero_value_functions(module, func_ctx.expressions)?;

View File

@@ -822,6 +822,10 @@ pub const RESERVED: &[&str] = &[
super::writer::INSERT_BITS_FUNCTION,
super::writer::SAMPLER_HEAP_VAR,
super::writer::COMPARISON_SAMPLER_HEAP_VAR,
super::writer::ABS_FUNCTION,
super::writer::DIV_FUNCTION,
super::writer::MOD_FUNCTION,
super::writer::NEG_FUNCTION,
];
// DXC scalar types, from https://github.com/microsoft/DirectXShaderCompiler/blob/18c9e114f9c314f93e68fbc72ce207d4ed2e65ae/tools/clang/lib/AST/ASTContextHLSL.cpp#L48-L254

View File

@@ -365,6 +365,8 @@ struct Wrapped {
struct_matrix_access: crate::FastHashSet<help::WrappedStructMatrixAccess>,
mat_cx2s: crate::FastHashSet<help::WrappedMatCx2>,
math: crate::FastHashSet<help::WrappedMath>,
unary_op: crate::FastHashSet<help::WrappedUnaryOp>,
binary_op: crate::FastHashSet<help::WrappedBinaryOp>,
/// If true, the sampler heaps have been written out.
sampler_heaps: bool,
// Mapping from SamplerIndexBufferKey to the name the namer returned.
@@ -379,6 +381,8 @@ impl Wrapped {
self.struct_matrix_access.clear();
self.mat_cx2s.clear();
self.math.clear();
self.unary_op.clear();
self.binary_op.clear();
}
}

View File

@@ -27,6 +27,10 @@ pub(crate) const EXTRACT_BITS_FUNCTION: &str = "naga_extractBits";
pub(crate) const INSERT_BITS_FUNCTION: &str = "naga_insertBits";
pub(crate) const SAMPLER_HEAP_VAR: &str = "nagaSamplerHeap";
pub(crate) const COMPARISON_SAMPLER_HEAP_VAR: &str = "nagaComparisonSamplerHeap";
pub(crate) const ABS_FUNCTION: &str = "naga_abs";
pub(crate) const DIV_FUNCTION: &str = "naga_div";
pub(crate) const MOD_FUNCTION: &str = "naga_mod";
pub(crate) const NEG_FUNCTION: &str = "naga_neg";
struct EpStructMember {
name: String,
@@ -2800,19 +2804,39 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
write!(self.out, ")")?;
}
// TODO: handle undefined behavior of BinaryOperator::Modulo
//
// sint:
// if right == 0 return 0
// if left == min(type_of(left)) && right == -1 return 0
// if sign(left) != sign(right) return result as defined by WGSL
//
// uint:
// if right == 0 return 0
//
// float:
// if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
Expression::Binary {
op: crate::BinaryOperator::Divide,
left,
right,
} if matches!(
func_ctx.resolve_type(expr, &module.types).scalar_kind(),
Some(ScalarKind::Sint | ScalarKind::Uint)
) =>
{
write!(self.out, "{DIV_FUNCTION}(")?;
self.write_expr(module, left, func_ctx)?;
write!(self.out, ", ")?;
self.write_expr(module, right, func_ctx)?;
write!(self.out, ")")?;
}
Expression::Binary {
op: crate::BinaryOperator::Modulo,
left,
right,
} if matches!(
func_ctx.resolve_type(expr, &module.types).scalar_kind(),
Some(ScalarKind::Sint | ScalarKind::Uint)
) =>
{
write!(self.out, "{MOD_FUNCTION}(")?;
self.write_expr(module, left, func_ctx)?;
write!(self.out, ", ")?;
self.write_expr(module, right, func_ctx)?;
write!(self.out, ")")?;
}
// TODO: if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
// While HLSL supports float operands with the % operator it is only
// defined in cases where both sides are either positive or negative.
Expression::Binary {
@@ -3312,7 +3336,15 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
Expression::Unary { op, expr } => {
// https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-operators#unary-operators
let op_str = match op {
crate::UnaryOperator::Negate => "-",
crate::UnaryOperator::Negate => {
match func_ctx.resolve_type(expr, &module.types).scalar() {
Some(Scalar {
kind: ScalarKind::Sint,
width: 4,
}) => NEG_FUNCTION,
_ => "-",
}
}
crate::UnaryOperator::LogicalNot => "!",
crate::UnaryOperator::BitwiseNot => "~",
};
@@ -3411,7 +3443,13 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
let fun = match fun {
// comparison
Mf::Abs => Function::Regular("abs"),
Mf::Abs => match func_ctx.resolve_type(arg, &module.types).scalar() {
Some(Scalar {
kind: ScalarKind::Sint,
width: 4,
}) => Function::Regular(ABS_FUNCTION),
_ => Function::Regular("abs"),
},
Mf::Min => Function::Regular("min"),
Mf::Max => Function::Regular("max"),
Mf::Clamp => Function::Regular("clamp"),

View File

@@ -1,5 +1,13 @@
RWByteAddressBuffer v_indices : register(u0);
uint naga_mod(uint lhs, uint rhs) {
return lhs % (rhs == 0u ? 1u : rhs);
}
uint naga_div(uint lhs, uint rhs) {
return lhs / (rhs == 0u ? 1u : rhs);
}
uint collatz_iterations(uint n_base)
{
uint n = (uint)0;
@@ -17,9 +25,9 @@ uint collatz_iterations(uint n_base)
}
{
uint _e7 = n;
if (((_e7 % 2u) == 0u)) {
if ((naga_mod(_e7, 2u) == 0u)) {
uint _e12 = n;
n = (_e12 / 2u);
n = naga_div(_e12, 2u);
} else {
uint _e16 = n;
n = ((3u * _e16) + 1u);

View File

@@ -24,6 +24,11 @@ Texture2D<float> image_2d_depth : register(t2, space1);
Texture2DArray<float> image_2d_array_depth : register(t3, space1);
TextureCube<float> image_cube_depth : register(t4, space1);
int2 naga_mod(int2 lhs, int2 rhs) {
int2 divisor = ((lhs == -2147483648 & rhs == -1) | (rhs == 0)) ? 1 : rhs;
return lhs - (lhs / divisor) * divisor;
}
uint2 NagaRWDimensions2D(RWTexture2D<uint4> tex)
{
uint4 ret;
@@ -35,7 +40,7 @@ uint2 NagaRWDimensions2D(RWTexture2D<uint4> tex)
void main(uint3 local_id : SV_GroupThreadID)
{
uint2 dim = NagaRWDimensions2D(image_storage_src);
int2 itc = (int2((dim * local_id.xy)) % int2(int(10), int(20)));
int2 itc = naga_mod(int2((dim * local_id.xy)), int2(int(10), int(20)));
uint4 value1_ = image_mipmapped_src.Load(int3(itc, int(local_id.z)));
uint4 value2_ = image_multisampled_src.Load(itc, int(local_id.z));
uint4 value4_ = image_storage_src.Load(itc);
@@ -57,7 +62,7 @@ void main(uint3 local_id : SV_GroupThreadID)
void depth_load(uint3 local_id_1 : SV_GroupThreadID)
{
uint2 dim_1 = NagaRWDimensions2D(image_storage_src);
int2 itc_1 = (int2((dim_1 * local_id_1.xy)) % int2(int(10), int(20)));
int2 itc_1 = naga_mod(int2((dim_1 * local_id_1.xy)), int2(int(10), int(20)));
float val = image_depth_multisampled_src.Load(itc_1, int(local_id_1.z)).x;
image_dst[itc_1.x] = (uint(val)).xxxx;
return;

View File

@@ -16,10 +16,15 @@ float4 builtins()
return (((((float4(asint(asuint((s1_).xxxx) + asuint(v_i32_zero))) + s2_) + m1_) + m2_) + (b1_).xxxx) + b2_);
}
int4 naga_mod(int4 lhs, int4 rhs) {
int4 divisor = ((lhs == -2147483648 & rhs == -1) | (rhs == 0)) ? 1 : rhs;
return lhs - (lhs / divisor) * divisor;
}
float4 splat(float m, int n)
{
float2 a_2 = ((((2.0).xx + (m).xx) - (4.0).xx) / (8.0).xx);
int4 b = ((n).xxxx % (int(2)).xxxx);
int4 b = naga_mod((n).xxxx, (int(2)).xxxx);
return (a_2.xyxy + float4(b));
}
@@ -56,6 +61,52 @@ void logical()
return;
}
int2 naga_neg(int2 val) {
return asint(-asuint(val));
}
int naga_div(int lhs, int rhs) {
return lhs / (((lhs == -2147483648 & rhs == -1) | (rhs == 0)) ? 1 : rhs);
}
uint naga_div(uint lhs, uint rhs) {
return lhs / (rhs == 0u ? 1u : rhs);
}
int2 naga_div(int2 lhs, int2 rhs) {
return lhs / (((lhs == -2147483648 & rhs == -1) | (rhs == 0)) ? 1 : rhs);
}
uint3 naga_div(uint3 lhs, uint3 rhs) {
return lhs / (rhs == 0u ? 1u : rhs);
}
int naga_mod(int lhs, int rhs) {
int divisor = ((lhs == -2147483648 & rhs == -1) | (rhs == 0)) ? 1 : rhs;
return lhs - (lhs / divisor) * divisor;
}
uint naga_mod(uint lhs, uint rhs) {
return lhs % (rhs == 0u ? 1u : rhs);
}
int2 naga_mod(int2 lhs, int2 rhs) {
int2 divisor = ((lhs == -2147483648 & rhs == -1) | (rhs == 0)) ? 1 : rhs;
return lhs - (lhs / divisor) * divisor;
}
uint3 naga_mod(uint3 lhs, uint3 rhs) {
return lhs % (rhs == 0u ? 1u : rhs);
}
uint2 naga_div(uint2 lhs, uint2 rhs) {
return lhs / (rhs == 0u ? 1u : rhs);
}
uint2 naga_mod(uint2 lhs, uint2 rhs) {
return lhs % (rhs == 0u ? 1u : rhs);
}
float3x3 ZeroValuefloat3x3() {
return (float3x3)0;
}
@@ -71,7 +122,7 @@ float3x4 ZeroValuefloat3x4() {
void arithmetic()
{
float neg0_1 = -(1.0);
int2 neg1_1 = -((int(1)).xx);
int2 neg1_1 = naga_neg((int(1)).xx);
float2 neg2_ = -((1.0).xx);
int add0_ = asint(asuint(int(2)) + asuint(int(1)));
uint add1_ = (2u + 1u);
@@ -91,17 +142,17 @@ void arithmetic()
int2 mul3_ = asint(asuint((int(2)).xx) * asuint((int(1)).xx));
uint3 mul4_ = ((2u).xxx * (1u).xxx);
float4 mul5_ = ((2.0).xxxx * (1.0).xxxx);
int div0_ = (int(2) / int(1));
uint div1_ = (2u / 1u);
int div0_ = naga_div(int(2), int(1));
uint div1_ = naga_div(2u, 1u);
float div2_ = (2.0 / 1.0);
int2 div3_ = ((int(2)).xx / (int(1)).xx);
uint3 div4_ = ((2u).xxx / (1u).xxx);
int2 div3_ = naga_div((int(2)).xx, (int(1)).xx);
uint3 div4_ = naga_div((2u).xxx, (1u).xxx);
float4 div5_ = ((2.0).xxxx / (1.0).xxxx);
int rem0_ = (int(2) % int(1));
uint rem1_ = (2u % 1u);
int rem0_ = naga_mod(int(2), int(1));
uint rem1_ = naga_mod(2u, 1u);
float rem2_ = fmod(2.0, 1.0);
int2 rem3_ = ((int(2)).xx % (int(1)).xx);
uint3 rem4_ = ((2u).xxx % (1u).xxx);
int2 rem3_ = naga_mod((int(2)).xx, (int(1)).xx);
uint3 rem4_ = naga_mod((2u).xxx, (1u).xxx);
float4 rem5_ = fmod((2.0).xxxx, (1.0).xxxx);
{
int2 add0_1 = asint(asuint((int(2)).xx) + asuint((int(1)).xx));
@@ -122,16 +173,16 @@ void arithmetic()
uint2 mul3_1 = (2u * (1u).xx);
float2 mul4_1 = ((2.0).xx * 1.0);
float2 mul5_1 = (2.0 * (1.0).xx);
int2 div0_1 = ((int(2)).xx / (int(1)).xx);
int2 div1_1 = ((int(2)).xx / (int(1)).xx);
uint2 div2_1 = ((2u).xx / (1u).xx);
uint2 div3_1 = ((2u).xx / (1u).xx);
int2 div0_1 = naga_div((int(2)).xx, (int(1)).xx);
int2 div1_1 = naga_div((int(2)).xx, (int(1)).xx);
uint2 div2_1 = naga_div((2u).xx, (1u).xx);
uint2 div3_1 = naga_div((2u).xx, (1u).xx);
float2 div4_1 = ((2.0).xx / (1.0).xx);
float2 div5_1 = ((2.0).xx / (1.0).xx);
int2 rem0_1 = ((int(2)).xx % (int(1)).xx);
int2 rem1_1 = ((int(2)).xx % (int(1)).xx);
uint2 rem2_1 = ((2u).xx % (1u).xx);
uint2 rem3_1 = ((2u).xx % (1u).xx);
int2 rem0_1 = naga_mod((int(2)).xx, (int(1)).xx);
int2 rem1_1 = naga_mod((int(2)).xx, (int(1)).xx);
uint2 rem2_1 = naga_mod((2u).xx, (1u).xx);
uint2 rem3_1 = naga_mod((2u).xx, (1u).xx);
float2 rem4_1 = fmod((2.0).xx, (1.0).xx);
float2 rem5_1 = fmod((2.0).xx, (1.0).xx);
}
@@ -234,9 +285,9 @@ void assignment()
a_1 = asint(asuint(_e10) * asuint(_e9));
int _e12 = a_1;
int _e13 = a_1;
a_1 = (_e13 / _e12);
a_1 = naga_div(_e13, _e12);
int _e15 = a_1;
a_1 = (_e15 % int(1));
a_1 = naga_mod(_e15, int(1));
int _e17 = a_1;
a_1 = (_e17 & int(0));
int _e19 = a_1;
@@ -258,16 +309,20 @@ void assignment()
return;
}
int naga_neg(int val) {
return asint(-asuint(val));
}
void negation_avoids_prefix_decrement()
{
int i0_ = -(int(1));
int i1_ = -(-(int(1)));
int i2_ = -(-(int(1)));
int i3_ = -(-(int(1)));
int i4_ = -(-(-(int(1))));
int i5_ = -(-(-(-(int(1)))));
int i6_ = -(-(-(-(-(int(1))))));
int i7_ = -(-(-(-(-(int(1))))));
int i0_ = naga_neg(int(1));
int i1_ = naga_neg(naga_neg(int(1)));
int i2_ = naga_neg(naga_neg(int(1)));
int i3_ = naga_neg(naga_neg(int(1)));
int i4_ = naga_neg(naga_neg(naga_neg(int(1))));
int i5_ = naga_neg(naga_neg(naga_neg(naga_neg(int(1)))));
int i6_ = naga_neg(naga_neg(naga_neg(naga_neg(naga_neg(int(1))))));
int i7_ = naga_neg(naga_neg(naga_neg(naga_neg(naga_neg(int(1))))));
float f0_ = -(1.0);
float f1_ = -(-(1.0));
float f2_ = -(-(1.0));

View File

@@ -32,6 +32,10 @@ struct FragmentInput_fs_main {
float4 position_1 : SV_Position;
};
int naga_div(int lhs, int rhs) {
return lhs / (((lhs == -2147483648 & rhs == -1) | (rhs == 0)) ? 1 : rhs);
}
VertexOutput ConstructVertexOutput(float4 arg0, float3 arg1) {
VertexOutput ret = (VertexOutput)0;
ret.position = arg0;
@@ -44,7 +48,7 @@ VertexOutput_vs_main vs_main(uint vertex_index : SV_VertexID)
int tmp1_ = (int)0;
int tmp2_ = (int)0;
tmp1_ = (int((_NagaConstants.first_vertex + vertex_index)) / int(2));
tmp1_ = naga_div(int((_NagaConstants.first_vertex + vertex_index)), int(2));
tmp2_ = (int((_NagaConstants.first_vertex + vertex_index)) & int(1));
int _e9 = tmp1_;
int _e15 = tmp2_;