375: Add water example.  r=kvark,cwfitzgerald a=OptimisticPeach

Solves #329, water example requested by @kvark.

I tuned it to my personal preference of visuals, however it might be different for you. 

Note: I used https://github.com/ashima/webgl-noise for 3D open simplex noise.

I've commented to explain what most of the things in the rust side of the example mean. However, I'm not 100% sure I did the best job at giving a brief overview, and wouldn't mind someone making sure my terminology/definitions are correct. 

Thanks!
Patrik

Co-authored-by: OptimisticPeach <patrikbuhring@yahoo.com>
This commit is contained in:
bors[bot]
2020-06-17 02:02:43 +00:00
committed by GitHub
13 changed files with 1445 additions and 0 deletions

View File

@@ -57,6 +57,7 @@ png = "0.16"
winit = { version = "0.22.1", features = ["web-sys"] }
rand = { version = "0.7.2", features = ["wasm-bindgen"] }
bytemuck = "1"
noise = "0.6.0"
[[example]]
name="hello-compute"

View File

@@ -0,0 +1,34 @@
# Water example
This example renders animated water.
It demonstrates Read only Depth/Stencil (abbreviated RODS), where a depth/stencil buffer is used as an attachment which is read-only. In this case it's used in the shaders to calculate reflections and depth.
## Files:
```
water
├── main.rs ------------------ Main program
├── point_gen.rs ------------- Hexagon point generation
├── README.md ---------------- This readme
├── screenshot.png ----------- Screenshot
├── terrain_shader.frag ------ Terrain fragment shader
├── terrain_shader.frag.spv -- Compiled terrain fragment shader
├── terrain_shader.vert ------ Terrain vertex shader
├── terrain_shader.vert.spv -- Compiled terrain vertex shader
├── water_shader.frag -------- Water fragment shader
├── water_shader.frag.spv ---- Compiled water fragment shader
├── water_shader.vert -------- Water vertex shader
└── water_shader.vert.spv ---- Compiled water vertex shader
```
## To run
```
cargo run --example water
```
## To recompile shaders
(requires make and `glslangvalidator`)
```
make
```
## Screenshot
![Water example](./screenshot.png)

805
wgpu/examples/water/main.rs Normal file
View File

@@ -0,0 +1,805 @@
#[path = "../framework.rs"]
mod framework;
mod point_gen;
use cgmath::Point3;
use wgpu::vertex_attr_array;
///
/// Radius of the terrain.
///
/// Changing this value will change the size of the
/// water and terrain. Note however, that changes to
/// this value will require modification of the time
/// scale in the `render` method below.
///
const SIZE: f32 = 10.0;
///
/// Location of the camera.
/// Location of light is in terrain/water shaders.
///
const CAMERA: Point3<f32> = Point3 { x: -100.0, y: 50.0, z: 100.0 };
struct Matrices {
view: cgmath::Matrix4<f32>,
flipped_view: cgmath::Matrix4<f32>,
projection: cgmath::Matrix4<f32>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq)]
struct TerrainUniforms {
view_projection: [f32; 16],
clipping_plane: [f32; 4],
}
unsafe impl bytemuck::Zeroable for TerrainUniforms {}
unsafe impl bytemuck::Pod for TerrainUniforms {}
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq)]
struct WaterUniforms {
view: [f32; 16],
projection: [f32; 16],
time_size_width: [f32; 4],
height: f32,
}
struct Uniforms {
terrain_normal: TerrainUniforms,
terrain_flipped: TerrainUniforms,
water: WaterUniforms,
}
unsafe impl bytemuck::Zeroable for WaterUniforms {}
unsafe impl bytemuck::Pod for WaterUniforms {}
struct Example {
water_vertex_buf: wgpu::Buffer,
water_vertex_count: usize,
water_bind_group_layout: wgpu::BindGroupLayout,
water_bind_group: wgpu::BindGroup,
water_uniform_buf: wgpu::Buffer,
water_pipeline: wgpu::RenderPipeline,
terrain_vertex_buf: wgpu::Buffer,
terrain_vertex_count: usize,
terrain_normal_bind_group: wgpu::BindGroup,
///
/// Binds to the uniform buffer where the
/// camera has been placed underwater.
///
terrain_flipped_bind_group: wgpu::BindGroup,
terrain_normal_uniform_buf: wgpu::Buffer,
///
/// Contains uniform variables where the camera
/// has been placed underwater.
///
terrain_flipped_uniform_buf: wgpu::Buffer,
terrain_pipeline: wgpu::RenderPipeline,
reflect_view: wgpu::TextureView,
depth_buffer: wgpu::TextureView,
current_frame: usize,
///
/// Used to prevent issues when rendering after
/// minimizing the window.
///
active: Option<usize>,
}
impl Example {
///
/// Creates the view matrices, and the corrected projection matrix.
///
fn generate_matrices(aspect_ratio: f32) -> Matrices {
let projection = cgmath::perspective(cgmath::Deg(45f32), aspect_ratio, 10.0, 400.0);
let reg_view = cgmath::Matrix4::look_at(
CAMERA,
cgmath::Point3::new(0f32, 0.0, 0.0),
cgmath::Vector3::unit_y(), //Note that y is up. Differs from other examples.
);
let scale = cgmath::Matrix4::from_nonuniform_scale(8.0, 1.5, 8.0);
let reg_view = reg_view * scale;
let flipped_view = cgmath::Matrix4::look_at(
cgmath::Point3::new(CAMERA.x, -CAMERA.y, CAMERA.z),
cgmath::Point3::new(0f32, 0.0, 0.0),
cgmath::Vector3::unit_y(),
);
let correction = framework::OPENGL_TO_WGPU_MATRIX;
let flipped_view = flipped_view * scale;
Matrices {
view: reg_view,
flipped_view,
projection: correction * projection,
}
}
fn generate_uniforms(width: u32, height: u32) -> Uniforms {
let Matrices {
view,
flipped_view,
projection
} = Self::generate_matrices(width as f32 / height as f32);
Uniforms {
terrain_normal: TerrainUniforms {
view_projection: *(projection * view).as_ref(),
clipping_plane: [0.0; 4],
},
terrain_flipped: TerrainUniforms {
view_projection: *(projection * flipped_view).as_ref(),
clipping_plane: [0., 1., 0., 0.],
},
water: WaterUniforms {
view: *view.as_ref(),
projection: *projection.as_ref(),
time_size_width: [0.0, 1.0, SIZE * 2.0, width as f32],
height: height as f32
}
}
}
///
/// Initializes Uniforms and textures.
///
fn initialize_resources(
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
queue: &wgpu::Queue,
water_uniforms: &wgpu::Buffer,
terrain_normal_uniforms: &wgpu::Buffer,
terrain_flipped_uniforms: &wgpu::Buffer,
water_bind_group_layout: &wgpu::BindGroupLayout,
) -> (wgpu::TextureView, wgpu::TextureView, wgpu::BindGroup) {
// Matrices for our projection and view.
// flipped_view is the view from under the water.
let Uniforms {
terrain_normal,
terrain_flipped,
water,
} = Self::generate_uniforms(sc_desc.width, sc_desc.height);
// Put the uniforms into buffers on the GPU
queue.write_buffer(terrain_normal_uniforms, 0, bytemuck::cast_slice(&[terrain_normal]));
queue.write_buffer(terrain_flipped_uniforms, 0, bytemuck::cast_slice(&[terrain_flipped]));
queue.write_buffer(water_uniforms, 0, bytemuck::cast_slice(&[water]));
let texture_extent = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
};
let reflection_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Reflection Render Texture"),
size: texture_extent,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: sc_desc.format,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST | wgpu::TextureUsage::OUTPUT_ATTACHMENT,
});
let draw_depth_buffer = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Depth Buffer"),
size: texture_extent,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Depth32Float,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST | wgpu::TextureUsage::OUTPUT_ATTACHMENT,
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("Texture Sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let water_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: water_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer(water_uniforms.slice(..)),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::TextureView(&reflection_texture.create_default_view()),
},
wgpu::Binding {
binding: 2,
resource: wgpu::BindingResource::TextureView(&draw_depth_buffer.create_default_view())
},
wgpu::Binding {
binding: 3,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: Some("Water Bind Group"),
});
(reflection_texture.create_default_view(), draw_depth_buffer.create_default_view(), water_bind_group)
}
}
impl framework::Example for Example {
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> (Self, Option<wgpu::CommandBuffer>) {
use std::mem;
// Size of one water vertex
let water_vertex_size = mem::size_of::<point_gen::WaterVertexAttributes>();
let water_vertices = point_gen::HexWaterMesh::generate(SIZE).generate_points();
// Size of one terrain vertex
let terrain_vertex_size = mem::size_of::<point_gen::TerrainVertexAttributes>();
// Noise generation
let terrain_noise = noise::OpenSimplex::new();
// Random colouration
let mut terrain_random = rand::thread_rng();
// Generate terrain. The closure determines what each hexagon will look like.
let terrain = point_gen::HexTerrainMesh::generate(SIZE, |point| -> point_gen::TerrainVertex {
use rand::Rng;
use noise::NoiseFn;
let noise = terrain_noise.get([point[0] as f64 / 5.0, point[1] as f64 / 5.0]) + 0.1;
let y = noise as f32 * 8.0;
// Multiplies a colour by some random amount.
fn mul_arr(mut arr: [u8; 4], by: f32) -> [u8; 4] {
arr[0] = (arr[0] as f32 * by).min(255.0) as u8;
arr[1] = (arr[1] as f32 * by).min(255.0) as u8;
arr[2] = (arr[2] as f32 * by).min(255.0) as u8;
arr
}
// Under water
const DARK_SAND: [u8; 4] = [235, 175, 71, 255];
// Coast
const SAND: [u8; 4] = [217, 191, 76, 255];
// Normal
const GRASS: [u8; 4] = [122, 170, 19, 255];
// Mountain
const SNOW: [u8; 4] = [175, 224, 237, 255];
// Random colouration.
let random = terrain_random.gen::<f32>() * 0.2 + 0.9;
// Choose colour.
let colour = if y <= 0.0 {
DARK_SAND
} else if y <= 0.8 {
SAND
} else if y <= 3.0 {
GRASS
} else {
SNOW
};
point_gen::TerrainVertex {
position: Point3 { x: point[0], y, z: point[1] },
colour: mul_arr(colour, random),
}
});
// Generate the buffer data.
let terrain_vertices = terrain.make_buffer_data();
// Create the buffers on the GPU to hold the data.
let water_vertex_buf = device.create_buffer_with_data(
bytemuck::cast_slice(&water_vertices),
wgpu::BufferUsage::VERTEX,
);
let terrain_vertex_buf = device.create_buffer_with_data(
bytemuck::cast_slice(&terrain_vertices),
wgpu::BufferUsage::VERTEX,
);
// Create the bind group layout. This is what our uniforms will look like.
let water_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Water Bind Group Layout"),
bindings: &[
// Uniform variables such as projection/view.
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer { dynamic: false },
count: None,
..Default::default()
},
// Reflection texture.
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
component_type: wgpu::TextureComponentType::Float,
dimension: wgpu::TextureViewDimension::D2,
},
count: None,
..Default::default()
},
// Depth texture for terrain.
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
component_type: wgpu::TextureComponentType::Float,
dimension: wgpu::TextureViewDimension::D2,
},
count: None,
..Default::default()
},
// Sampler to be able to sample the textures.
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
..Default::default()
},
],
});
let terrain_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Terrain Bind Group Layout"),
bindings: &[
// Regular uniform variables like view/projection.
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer { dynamic: false },
count: None,
..Default::default()
},
],
});
// Create our pipeline layouts.
let water_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&water_bind_group_layout],
});
let terrain_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&terrain_bind_group_layout],
});
let water_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Water Uniforms"),
size: std::mem::size_of::<WaterUniforms>() as _,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
let terrain_normal_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Normal Terrain Uniforms"),
size: std::mem::size_of::<TerrainUniforms>() as _,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
let terrain_flipped_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Flipped Terrain Uniforms"),
size: std::mem::size_of::<TerrainUniforms>() as _,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
// Create bind group.
// This puts values behind what was laid out in the bind group layout.
let (reflect_view, depth_buffer, water_bind_group) = Self::initialize_resources(
sc_desc,
device,
queue,
&water_uniform_buf,
&terrain_normal_uniform_buf,
&terrain_flipped_uniform_buf,
&water_bind_group_layout
);
let terrain_normal_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &terrain_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer(terrain_normal_uniform_buf.slice(..)),
},
],
label: Some("Terrain Normal Bind Group"),
});
let terrain_flipped_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &terrain_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer(terrain_flipped_uniform_buf.slice(..))
}
],
label: Some("Terrain Flipped Bind Group"),
});
// Read shaders from file.
let water_vs_bytes = include_bytes!("water_shader.vert.spv");
let water_fs_bytes = include_bytes!("water_shader.frag.spv");
// Upload/compile them to GPU code.
let water_vs_module = device
.create_shader_module(&wgpu::read_spirv(std::io::Cursor::new(&water_vs_bytes[..])).unwrap());
let water_fs_module = device
.create_shader_module(&wgpu::read_spirv(std::io::Cursor::new(&water_fs_bytes[..])).unwrap());
let terrain_vs_bytes = include_bytes!("terrain_shader.vert.spv");
let terrain_fs_bytes = include_bytes!("terrain_shader.frag.spv");
let terrain_vs_module = device
.create_shader_module(&wgpu::read_spirv(std::io::Cursor::new(&terrain_vs_bytes[..])).unwrap());
let terrain_fs_module = device
.create_shader_module(&wgpu::read_spirv(std::io::Cursor::new(&terrain_fs_bytes[..])).unwrap());
// Create the render pipelines. These describe how the data will flow through the GPU, and what
// constraints and modifiers it will have.
let water_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
// The "layout" is what uniforms will be needed.
layout: &water_pipeline_layout,
// Vertex & Fragment shaders
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &water_vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &water_fs_module,
entry_point: "main",
}),
// How the triangles will be rasterized. This is more important
// for the terrain because of the beneath-the water shot.
// This is also dependent on how the triangles are being generated.
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Cw,
cull_mode: wgpu::CullMode::None,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
// What kind of data are we passing in?
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
// Describes how the colour will be interpolated
// and assigned to the output attachment.
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Max,
},
write_mask: wgpu::ColorWrite::ALL,
}],
// Describes how us writing to the depth/stencil buffer
// will work. Since this is water, we need to read from the
// depth buffer both as a texture in the shader, and as an
// input attachment to do depth-testing. We don't write, so
// depth_write_enabled is set to false. This is called
// RODS or read-only depth stencil.
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
// We don't use stencil.
format: wgpu::TextureFormat::Depth32Float,
depth_write_enabled: false,
depth_compare: wgpu::CompareFunction::Less,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
}),
// Layout of our vertices. This should match the structs
// which are uploaded to the GPU. This should also be
// ensured by tagging on either a `#[repr(C)]` onto a
// struct, or a `#[repr(transparent)]` if it only contains
// one item, which is itself `repr(C)`.
vertex_state: wgpu::VertexStateDescriptor {
// We don't actually use indices, since it's unnecessary
// because we duplicate all the data anyway. This is
// necessary to achieve the low-poly effect.
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
wgpu::VertexBufferDescriptor {
stride: water_vertex_size as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &vertex_attr_array![0 => Short2, 1 => Char4],
},
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
// Same idea as the water pipeline.
let terrain_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &terrain_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &terrain_vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &terrain_fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Front,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
}),
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
wgpu::VertexBufferDescriptor {
stride: terrain_vertex_size as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &vertex_attr_array![0 => Float3, 1 => Float3, 2 => Uchar4Norm],
},
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
// Done
let this = Example {
water_vertex_buf,
water_vertex_count: water_vertices.len(),
water_bind_group_layout,
water_bind_group,
water_uniform_buf,
water_pipeline,
terrain_vertex_buf,
terrain_vertex_count: terrain_vertices.len(),
terrain_normal_bind_group,
terrain_flipped_bind_group,
terrain_normal_uniform_buf,
terrain_flipped_uniform_buf,
terrain_pipeline,
reflect_view,
depth_buffer,
current_frame: 0,
active: Some(0),
};
(this, None)
}
fn update(&mut self, _event: winit::event::WindowEvent) {
//empty
}
fn resize(
&mut self,
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
queue: &wgpu::Queue,
) {
if sc_desc.width == 0 && sc_desc.height == 0 {
// Stop rendering altogether.
self.active = None;
return;
} else {
// The next frame queued is the wrong size: (0, 0),
// so we skip a frame to avoid crashes where our
// textures are the correct (window) size, and the
// frame is still (0, 0).
self.active = Some(self.current_frame + 1);
}
// Regenerate all of the buffers and textures.
let (reflect_view, depth_buffer, water_bind_group) = Self::initialize_resources(
sc_desc,
device,
queue,
&self.water_uniform_buf,
&self.terrain_normal_uniform_buf,
&self.terrain_flipped_uniform_buf,
&self.water_bind_group_layout
);
self.water_bind_group = water_bind_group;
self.depth_buffer = depth_buffer;
self.reflect_view = reflect_view;
}
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> wgpu::CommandBuffer {
// Increment frame count regardless of if we draw.
self.current_frame += 1;
// Write the sin/cos values to the uniform buffer for the water.
let (water_sin, water_cos) = ((self.current_frame as f32) / 600.0).sin_cos();
queue.write_buffer(&self.water_uniform_buf, std::mem::size_of::<[f32; 16]>() as wgpu::BufferAddress * 2, bytemuck::cast_slice(&[water_sin, water_cos]));
// The encoder provides a way to turn our instructions here, into
// a command buffer the GPU can understand.
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("Main Command Encoder") });
// Only render valid frames. See resize method.
if let Some(active) = self.active {
if active >= self.current_frame {
return encoder.finish();
}
} else {
return encoder.finish();
}
// First pass: render the reflection.
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &self.reflect_view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 161.0 / 255.0,
g: 246.0 / 255.0,
b: 255.0 / 255.0,
a: 1.0,
},
},
],
// We still need to use the depth buffer here
// since the pipeline requires it.
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_buffer,
depth_load_op: wgpu::LoadOp::Clear,
depth_store_op: wgpu::StoreOp::Clear,
clear_depth: 1.0,
depth_read_only: false,
stencil_load_op: wgpu::LoadOp::Clear,
stencil_store_op: wgpu::StoreOp::Store,
clear_stencil: 0,
stencil_read_only: false,
}),
});
rpass.set_pipeline(&self.terrain_pipeline);
rpass.set_bind_group(0, &self.terrain_flipped_bind_group, &[]);
rpass.set_vertex_buffer(0, self.terrain_vertex_buf.slice(..));
rpass.draw(0..self.terrain_vertex_count as u32, 0..1);
}
// Terrain right side up. This time we need to use the
// depth values, so we must use StoreOp::Store.
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 161.0 / 255.0,
g: 246.0 / 255.0,
b: 255.0 / 255.0,
a: 1.0,
},
},
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_buffer,
depth_load_op: wgpu::LoadOp::Clear,
depth_store_op: wgpu::StoreOp::Store,
clear_depth: 1.0,
depth_read_only: false,
stencil_load_op: wgpu::LoadOp::Clear,
stencil_store_op: wgpu::StoreOp::Store,
clear_stencil: 0,
stencil_read_only: false,
}),
});
rpass.set_pipeline(&self.terrain_pipeline);
rpass.set_bind_group(0, &self.terrain_normal_bind_group, &[]);
rpass.set_vertex_buffer(0, self.terrain_vertex_buf.slice(..));
rpass.draw(0..self.terrain_vertex_count as u32, 0..1);
}
// Render the water. This reads from the depth buffer, but does not write
// to it, so it cannot be in the same render pass.
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Load,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 161.0 / 255.0,
g: 246.0 / 255.0,
b: 255.0 / 255.0,
a: 1.0,
},
},
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_buffer,
depth_load_op: wgpu::LoadOp::Load,
depth_store_op: wgpu::StoreOp::Store,
clear_depth: 1.0,
depth_read_only: true,
stencil_load_op: wgpu::LoadOp::Load,
stencil_store_op: wgpu::StoreOp::Store,
clear_stencil: 0,
stencil_read_only: false,
}),
});
rpass.set_pipeline(&self.water_pipeline);
rpass.set_bind_group(0, &self.water_bind_group, &[]);
rpass.set_vertex_buffer(0, self.water_vertex_buf.slice(..));
rpass.draw(0..self.water_vertex_count as u32, 0..1);
}
encoder.finish()
}
}
fn main() {
framework::run::<Example>("water");
}

View File

@@ -0,0 +1,283 @@
//!
//! This module covers generating points in a hexagonal fashion.
//!
use std::collections::HashMap;
use cgmath::{Vector3, Point3, InnerSpace};
// The following constants are used in calculations.
// A and B are multiplication factors for x and y.
///
/// X multiplication factor.
/// 1.0 / sqrt(2)
///
const A: f32 = std::f32::consts::FRAC_1_SQRT_2;
///
/// Y multiplication factor.
/// sqrt(3) / sqrt(2) == sqrt(1.5)
///
const B: f32 = SQRT_3 * A;
///
/// `sin(45deg)` is used to rotate the points.
///
const S45: f32 = std::f32::consts::FRAC_1_SQRT_2;
///
/// `cos(45deg)` is used to rotate the points.
///
const C45: f32 = S45;
const SQRT_3: f32 = 1.73205080757;
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct TerrainVertexAttributes {
position: [f32; 3],
normal: [f32; 3],
colour: [u8; 4],
}
unsafe impl bytemuck::Pod for TerrainVertexAttributes {}
unsafe impl bytemuck::Zeroable for TerrainVertexAttributes {}
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct WaterVertexAttributes {
position: [i16; 2],
offsets: [i8; 4],
}
unsafe impl bytemuck::Pod for WaterVertexAttributes {}
unsafe impl bytemuck::Zeroable for WaterVertexAttributes {}
///
/// Represents the center of a single hexagon.
///
#[derive(Copy, Clone, Debug)]
pub struct TerrainVertex {
pub position: Point3<f32>,
pub colour: [u8; 4],
}
///
/// Gets the surrounding hexagonal points from a point.
///
/// +---0---1
/// | / | |
/// 5---p---2
/// | | / |
/// 4---3---+
///
fn surrounding_hexagonal_points(x: isize, y: isize) -> [(isize, isize); 6] {
[
(x, y - 1),
(x + 1, y - 1),
(x + 1, y),
(x, y + 1),
(x - 1, y + 1),
(x - 1, y),
]
}
fn surrounding_point_values_iter<T>(hashmap: &HashMap<(isize, isize), T>, x: isize, y: isize, for_each: impl FnMut((&T, &T))) {
let points = surrounding_hexagonal_points(x, y);
let points = [points[0], points[1], points[2], points[3], points[4], points[5], points[0]];
points
.windows(2)
.map(|x| (hashmap.get(&x[0]), hashmap.get(&x[1])))
.flat_map(|(a, b)| a.and_then(|x| b.map(|y| (x, y))))
.for_each(for_each);
}
///
/// Used in calculating terrain normals.
///
pub fn calculate_normal(a: Point3<f32>, b: Point3<f32>, c: Point3<f32>) -> Vector3<f32> {
(b - a).normalize().cross((c - a).normalize()).normalize()
}
///
/// Given the radius, how large of a square do we need to make a unit hexagon grid?
///
fn q_given_r(radius: f32) -> usize {
((((((4.0 * radius) / SQRT_3) + 1.0).floor() / 2.0).floor() * 2.0) + 1.0) as usize
}
///
/// Represents terrain, however it contains the vertices only once.
///
#[derive(Clone)]
pub struct HexTerrainMesh {
pub vertices: HashMap<(isize, isize), TerrainVertex>,
half_size: isize,
}
impl HexTerrainMesh {
///
/// Generates the vertices (or the centers of the hexagons). The colour and height is determined by
/// a function passed in by the user.
///
pub fn generate(radius: f32, mut gen_vertex: impl FnMut([f32; 2]) -> TerrainVertex) -> Self {
let width = q_given_r(radius);
let half_width = (width / 2) as isize;
let mut map = HashMap::new();
let mut max = std::f32::NEG_INFINITY;
for i in -half_width..=half_width {
let x_o = i as f32;
for j in -half_width..=half_width {
let y_o = j as f32;
let x = A * (x_o * C45 - y_o * S45);
let z = B * (x_o * S45 + y_o * C45);
if x.hypot(z) < radius {
let vertex = gen_vertex([x, z]);
if vertex.position.y > max {
max = vertex.position.y;
}
map.insert((i, j), vertex);
}
}
}
Self {
vertices: map,
half_size: width as isize / 2,
}
}
///
/// Creates the points required to render the mesh.
///
pub fn make_buffer_data(&self) -> Vec<TerrainVertexAttributes> {
let mut vertices = Vec::new();
fn middle(p1: &TerrainVertex, p2: &TerrainVertex, p: &TerrainVertex) -> Point3<f32> {
Point3 {
x: (p1.position.x + p2.position.x + p.position.x) / 3.0,
y: (p1.position.y + p2.position.y + p.position.y) / 3.0,
z: (p1.position.z + p2.position.z + p.position.z) / 3.0,
}
}
fn half(p1: &TerrainVertex, p2: &TerrainVertex) -> Point3<f32> {
Point3 { x: (p1.position.x + p2.position.x) / 2.0, y: (p1.position.y + p2.position.y) / 2.0, z: (p1.position.z + p2.position.z) / 2.0 }
}
let mut push_triangle = |p1: &TerrainVertex, p2: &TerrainVertex, p: &TerrainVertex, c: [u8; 4]| {
let m = middle(p1, p2, p);
let ap = half(p1, p);
let bp = half(p2, p);
let p = p.position;
let n1 = calculate_normal(ap, m, p);
let n2 = calculate_normal(m, bp, p);
vertices
.extend(
[ap, m, p, m, bp, p]
.iter()
.zip(std::iter::repeat::<[f32; 3]>(n1.into()).chain(std::iter::repeat::<[f32; 3]>(n2.into())))
.zip(std::iter::repeat(c))
.map(|((pos, normal), colour)| TerrainVertexAttributes {
position: *pos.as_ref(),
normal,
colour,
})
);
};
for i in -self.half_size..=self.half_size {
for j in -self.half_size..=self.half_size {
if let Some(p) = self.vertices.get(&(i, j)) {
surrounding_point_values_iter(
&self.vertices,
i,
j,
|(a, b)| push_triangle(a, b, p, p.colour)
);
}
}
}
vertices
}
}
///
/// Water mesh which contains vertex data for the water mesh.
///
/// It stores the values multiplied and rounded to the
/// nearest whole number to be more efficient with space when
/// sending large meshes to the GPU.
///
pub struct HexWaterMesh {
pub vertices: HashMap<(isize, isize), [i16; 2]>,
half_size: isize,
}
impl HexWaterMesh {
pub fn generate(radius: f32) -> Self {
let width = q_given_r(radius);
let half_width = (width / 2) as isize;
let mut map = HashMap::new();
for i in -half_width..=half_width {
let x_o = i as f32;
for j in -half_width..=half_width {
let y_o = j as f32;
let x = A * (x_o * C45 - y_o * S45);
let z = B * (x_o * S45 + y_o * C45);
if x.hypot(z) < radius {
let x = (x * 2.0).round() as i16;
let z = ((z / B) * std::f32::consts::SQRT_2).round() as i16;
map.insert((i, j), [x, z]);
}
}
}
Self {
vertices: map,
half_size: half_width,
}
}
///
/// Generates the points required to render the mesh.
///
pub fn generate_points(&self) -> Vec<WaterVertexAttributes> {
let mut vertices = Vec::new();
fn calculate_differences(a: [i16; 2], b: [i16; 2], c: [i16; 2]) -> [i8; 4] {
[
(b[0] - a[0]) as i8,
(b[1] - a[1]) as i8,
(c[0] - a[0]) as i8,
(c[1] - a[1]) as i8
]
}
let mut push_triangle = |a: [i16; 2], b: [i16; 2], c: [i16; 2]| {
let bc = calculate_differences(a, b, c);
let ca = calculate_differences(b, c, a);
let ab = calculate_differences(c, a, b);
vertices.extend(
[a, b, c]
.iter()
.zip([bc, ca, ab].iter())
.map(|(&position, &offsets)| WaterVertexAttributes { position, offsets })
);
};
for i in -self.half_size..=self.half_size {
for j in -self.half_size..=self.half_size {
if (i - j) % 3 == 0 {
if let Some(&p) = self.vertices.get(&(i, j)) {
surrounding_point_values_iter(
&self.vertices,
i,
j,
|(a, b)| push_triangle(*a, *b, p)
);
}
}
}
}
vertices
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 196 KiB

View File

@@ -0,0 +1,21 @@
#version 450
layout(early_fragment_tests) in;
layout(location = 0) in PerVertex {
vec4 v_Colour;
// Comment this out if using user-clipping planes:
float v_ClipDist;
};
layout(location = 0) out vec4 outColour;
void main() {
// Comment this out if using user-clipping planes:
if(v_ClipDist < 0.0) {
discard;
}
outColour = v_Colour;
outColour.a = 1.0;
}

Binary file not shown.

View File

@@ -0,0 +1,40 @@
#version 450
layout(set = 0, binding = 0) uniform Uniforms {
mat4x4 projection_view;
vec4 clipping_plane;
};
const vec3 light = vec3(150.0, 70.0, 0.0);
const vec3 light_colour = vec3(1.0, 250.0 / 255.0, 209.0 / 255.0);
const float ambient = 0.2;
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec4 colour;
layout(location = 0) out PerVertex {
vec4 v_Colour;
// Comment this out if using user-clipping planes:
float v_ClipDist;
};
void main() {
gl_Position = projection_view * vec4(position, 1.0);
// https://www.desmos.com/calculator/nqgyaf8uvo
vec3 normalized_light_direction = normalize(position - light);
float brightness_diffuse = clamp(dot(normalized_light_direction, normal), 0.2, 1.0);
v_Colour.rgb = max((brightness_diffuse + ambient) * light_colour * colour.rgb, 0.0);
v_Colour.a = colour.a;
// Comment this out if using user-clipping planes:
v_ClipDist = dot(vec4(position, 1.0), clipping_plane);
// Uncomment this if using user-clipping planes:
// gl_ClipDistance[0] = dot(vec4(position, 1.0), clipping_plane);
}

Binary file not shown.

View File

@@ -0,0 +1,48 @@
#version 450
const vec3 water_colour = vec3(0.0, 117.0 / 255.0, 242.0 / 255.0);
const float zNear = 10.0;
const float zFar = 400.0;
layout(set = 0, binding = 0) uniform Uniforms {
mat4x4 _view;
mat4x4 _projection;
vec4 time_size_width;
float viewport_height;
};
layout(set = 0, binding = 1) uniform texture2D reflection;
layout(set = 0, binding = 2) uniform texture2D terrain_depth_tex;
layout(set = 0, binding = 3) uniform sampler colour_sampler;
layout(location = 0) in PerVertex {
vec2 f_WaterScreenPos;
float f_Fresnel;
vec3 f_Light;
} f_In;
layout(location = 0) out vec4 outColor;
float to_linear_depth(float depth) {
float z_n = 2.0 * depth - 1.0;
float z_e = 2.0 * zNear * zFar / (zFar + zNear - z_n * (zFar - zNear));
return z_e;
}
void main() {
vec3 reflection_colour = texture(sampler2D(reflection, colour_sampler), f_In.f_WaterScreenPos.xy).xyz;
float pixel_depth = to_linear_depth(gl_FragCoord.z);
float terrain_depth = to_linear_depth(texture(sampler2D(terrain_depth_tex, colour_sampler), gl_FragCoord.xy / vec2(time_size_width.w, viewport_height)).r);
float dist = terrain_depth - pixel_depth;
float clamped = smoothstep(0.0, 1.0, dist);
outColor.a = clamped * (1.0 - f_In.f_Fresnel);
vec3 final_colour = f_In.f_Light + reflection_colour;
vec3 depth_colour = mix(final_colour, water_colour, smoothstep(1.0, 5.0, dist) * 0.2);
outColor.xyz = depth_colour;
}

Binary file not shown.

View File

@@ -0,0 +1,213 @@
#version 450
layout(set = 0, binding = 0) uniform Uniforms {
mat4x4 view;
mat4x4 projection;
vec4 time_size_width;
float _viewport_height;
};
const vec3 light_point = vec3(150.0, 70.0, 0.0);
const vec3 light_colour = vec3(1.0, 250.0 / 255.0, 209.0 / 255.0);
const float Y_SCL = 0.86602540378443864676372317075294;
const float CURVE_BIAS = -0.1;
const float INV_1_CURVE_BIAS = 1.0 / (1.0 + CURVE_BIAS);
layout(location = 0) in ivec2 position;
layout(location = 1) in ivec4 offsets;
layout(location = 0) out PerVertex {
vec2 f_WaterScreenPos;
float f_Fresnel;
vec3 f_Light;
} f_In;
//
// The following code to calculate simplex 3D
// is from https://github.com/ashima/webgl-noise
//
// Simplex 3D Noise
// by Ian McEwan, Ashima Arts.
//
vec4 permute(vec4 x) {
return mod(((x*34.0)+1.0)*x, 289.0);
}
vec4 taylorInvSqrt(vec4 r){
return 1.79284291400159 - 0.85373472095314 * r;
}
float snoise(vec3 v){
const vec2 C = vec2(1.0/6.0, 1.0/3.0);
const vec4 D = vec4(0.0, 0.5, 1.0, 2.0);
// First corner
vec3 i = floor(v + dot(v, C.yyy));
vec3 x0 = v - i + dot(i, C.xxx);
// Other corners
vec3 g = step(x0.yzx, x0.xyz);
vec3 l = 1.0 - g;
vec3 i1 = min(g.xyz, l.zxy);
vec3 i2 = max(g.xyz, l.zxy);
// x0 = x0 - 0.0 + 0.0 * C.xxx;
// x1 = x0 - i1 + 1.0 * C.xxx;
// x2 = x0 - i2 + 2.0 * C.xxx;
// x3 = x0 - 1.0 + 3.0 * C.xxx;
vec3 x1 = x0 - i1 + C.xxx;
vec3 x2 = x0 - i2 + C.yyy;// 2.0*C.x = 1/3 = C.y
vec3 x3 = x0 - D.yyy;// -1.0+3.0*C.x = -0.5 = -D.y
// Permutations
i = mod(i, 289.0);
vec4 p = permute(permute(permute(
i.z + vec4(0.0, i1.z, i2.z, 1.0))
+ i.y + vec4(0.0, i1.y, i2.y, 1.0))
+ i.x + vec4(0.0, i1.x, i2.x, 1.0));
// Gradients: 7x7 points over a square, mapped onto an octahedron.
// The ring size 17*17 = 289 is close to a multiple of 49 (49*6 = 294)
float n_ = 0.142857142857;// 1.0/7.0
vec3 ns = n_ * D.wyz - D.xzx;
vec4 j = p - 49.0 * floor(p * ns.z * ns.z);// mod(p,7*7)
vec4 x_ = floor(j * ns.z);
vec4 y_ = floor(j - 7.0 * x_);// mod(j,N)
vec4 x = x_ *ns.x + ns.yyyy;
vec4 y = y_ *ns.x + ns.yyyy;
vec4 h = 1.0 - abs(x) - abs(y);
vec4 b0 = vec4(x.xy, y.xy);
vec4 b1 = vec4(x.zw, y.zw);
//vec4 s0 = vec4(lessThan(b0,0.0))*2.0 - 1.0;
//vec4 s1 = vec4(lessThan(b1,0.0))*2.0 - 1.0;
vec4 s0 = floor(b0)*2.0 + 1.0;
vec4 s1 = floor(b1)*2.0 + 1.0;
vec4 sh = -step(h, vec4(0.0));
vec4 a0 = b0.xzyw + s0.xzyw*sh.xxyy;
vec4 a1 = b1.xzyw + s1.xzyw*sh.zzww;
vec3 p0 = vec3(a0.xy, h.x);
vec3 p1 = vec3(a0.zw, h.y);
vec3 p2 = vec3(a1.xy, h.z);
vec3 p3 = vec3(a1.zw, h.w);
//Normalise gradients
vec4 norm = taylorInvSqrt(vec4(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3)));
p0 *= norm.x;
p1 *= norm.y;
p2 *= norm.z;
p3 *= norm.w;
// Mix final noise value
vec4 m = max(0.6 - vec4(dot(x0, x0), dot(x1, x1), dot(x2, x2), dot(x3, x3)), 0.0);
m = m * m;
return 9.0 * dot(m*m, vec4(dot(p0, x0), dot(p1, x1),
dot(p2, x2), dot(p3, x3)));
}
// End of 3D simplex code.
vec3 apply_distortion(vec3 pos) {
vec3 perlin_pos = pos;
//Do noise transformation to permit for smooth,
//continuous movement.
float sin = time_size_width.x;
float cos = time_size_width.y;
float size = time_size_width.z;
// Rotate 90 Z
perlin_pos.xy = perlin_pos.yx;
perlin_pos.x = -perlin_pos.x;
// Move Left Size / 2
perlin_pos.x -= size;
float xcos = perlin_pos.x * cos;
float xsin = perlin_pos.x * sin;
float ycos = perlin_pos.y * cos;
float ysin = perlin_pos.y * sin;
float zcos = perlin_pos.z * cos;
float zsin = perlin_pos.z * sin;
// Rotate Time Y
vec3 perlin_pos_y = vec3(xcos + zsin, perlin_pos.y, -xsin + xcos);
// Rotate Time Z
vec3 perlin_pos_z = vec3(xcos - ysin, xsin + ycos, perlin_pos.x);
// Rotate 90 Y
perlin_pos.xz = perlin_pos.zx;
perlin_pos.x = -perlin_pos.x;
// Rotate Time X
vec3 perlin_pos_x = vec3(perlin_pos.x, ycos - zsin, ysin + zcos);
// Sample at different places for x/y/z to get random-looking water.
return vec3(pos.x + snoise(perlin_pos_x + 2.0) * 0.4, pos.y + snoise(perlin_pos_y - 2.0) * 1.8, pos.z + snoise(perlin_pos_z) * 0.4);
}
// Multiply the input by the scale values.
vec3 make_position(vec2 original) {
vec3 interpreted = vec3(original.x * 0.5, 0.0, original.y * Y_SCL);
return apply_distortion(interpreted);
}
// Create the normal, and apply the curve. Change the Curve Bias above.
vec3 make_normal(vec3 a, vec3 b, vec3 c) {
vec3 norm = normalize(cross(b - c, a - c));
vec3 center = (a + b + c) / 3.0;
return (normalize(a - center) * CURVE_BIAS + norm) * INV_1_CURVE_BIAS;
}
// Calculate the fresnel effect.
float calc_fresnel(vec3 view, vec3 normal) {
float refractive = abs(dot(view, normal));
refractive = pow(refractive, 1.33333333333);
return refractive;
}
// Calculate the specular lighting.
float calc_specular(vec3 eye, vec3 normal, vec3 light) {
vec3 light_reflected = reflect(light, normal);
float specular = max(dot(eye, light_reflected), 0.0);
specular = pow(specular, 10.0);
return specular;
}
void main() {
vec2 p_pos = position;
vec3 b_pos = make_position(p_pos + offsets.xy);
vec3 c_pos = make_position(p_pos + offsets.zw);
vec4 a_pos = vec4(make_position(p_pos), 1.0);
vec4 original_pos = vec4(p_pos.x * 0.5, 0.0, p_pos.y * Y_SCL, 1.0);
vec4 water_pos = a_pos;
mat4x4 vm = view;
vec4 transformed_pos = vm * water_pos;
water_pos.xyz = transformed_pos.xyz / transformed_pos.w;
vec3 normal = make_normal((vm * a_pos).xyz, (vm * vec4(b_pos, 1.0)).xyz, (vm * vec4(c_pos, 1.0)).xyz);
vec3 eye = normalize(-water_pos.xyz);
vec4 transformed_light = vm * vec4(light_point, 1.0);
f_In.f_Light = light_colour * calc_specular(eye, normal, normalize(water_pos.xyz - (transformed_light.xyz / transformed_light.w)));
f_In.f_Fresnel = calc_fresnel(eye, normal);
vec4 projected_pos = projection * transformed_pos;
gl_Position = projected_pos;
vec4 gridpos = projection * vm * original_pos;
f_In.f_WaterScreenPos.xy = (0.5 * gridpos.xy / gridpos.w) + 0.5;
}

Binary file not shown.