2022-08-26 14:24:33 -07:00
|
|
|
struct Uniforms {
|
|
|
|
dimensions: vec2<f32>,
|
|
|
|
field_of_view: f32,
|
|
|
|
}
|
|
|
|
|
|
|
|
@group(1)
|
|
|
|
@binding(0)
|
|
|
|
var<uniform> uniforms: Uniforms;
|
|
|
|
|
2023-07-23 20:19:25 -07:00
|
|
|
const PI: f32 = 3.14159265358979323846264338327950288; // 3.14159274
|
2022-08-26 14:24:33 -07:00
|
|
|
|
|
|
|
struct Ray {
|
|
|
|
pos: vec3<f32>, // POSition (aka the origin)
|
|
|
|
dir: vec3<f32>, // DIRection (normalized)
|
|
|
|
}
|
|
|
|
|
2023-07-23 20:19:25 -07:00
|
|
|
/// Map a rectangle with the provided dimensions onto a square from (-1,-1) to (1,1).
|
|
|
|
/// This is a linear scaling transformation. Some of the output square will
|
|
|
|
/// be cropped if the rectangle dimensions are not square.
|
2022-08-26 14:24:33 -07:00
|
|
|
///
|
2023-07-23 20:19:25 -07:00
|
|
|
/// We use this function because the coordinates provided to our shader are
|
|
|
|
/// pixel coordinates, but we want the shader to behave the same way
|
|
|
|
/// regardless of the shape or size of the window.
|
|
|
|
fn rectangle_to_square(rect: vec2<f32>, dims: vec2<f32>) -> vec2<f32> {
|
|
|
|
var sq = rect / dims * 2.0 - 1.0;
|
|
|
|
// Scale the rectangle's smaller aspect ratio to make the coordinates square.
|
|
|
|
// For example, a 16:9 rectangle will have an x coordinate from -1 to 1 and
|
2022-08-26 14:24:33 -07:00
|
|
|
// a y coordinate from -9/16ths to 9/16ths. The rest of the image lying outside
|
|
|
|
// of that range will be cropped out.
|
2023-07-23 20:19:25 -07:00
|
|
|
if (dims.x > dims.y) {
|
|
|
|
return vec2<f32>(sq.x, sq.y * dims.y / dims.x);
|
2022-08-26 14:24:33 -07:00
|
|
|
} else {
|
2023-07-23 20:19:25 -07:00
|
|
|
return vec2<f32>(sq.x * dims.x / dims.y, sq.y);
|
2022-08-26 14:24:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-23 20:19:25 -07:00
|
|
|
/// Map from a square grid to the surface of a (double-covered) sphere.
|
|
|
|
/// We use this as a (curvilinear) perspective projection for the camera.
|
|
|
|
fn project(grid: vec2<f32>) -> vec3<f32> {
|
|
|
|
// The real plane is the product of two lines, R x R, and the torus
|
|
|
|
// is the product of two circles, S^1 x S^1. Therefore, we can map
|
|
|
|
// from the real plane to the torus by taking each axis modulo tau.
|
2022-08-26 14:24:33 -07:00
|
|
|
//
|
2023-07-23 20:19:25 -07:00
|
|
|
// If we set the major radius of the torus to 0, then the torus
|
|
|
|
// becomes a double-covered sphere. The points on the double-covered
|
|
|
|
// sphere are identical to the points on the regular sphere,
|
|
|
|
// but the coordinate system is different:
|
|
|
|
//
|
|
|
|
// Rotate left-right and what you see behind you is backwards; rotate
|
|
|
|
// up-down and what you see is upside-down. This means that the projection
|
|
|
|
// is continous, and a translation on the grid corresponds with
|
|
|
|
// a rotation of the camera. (Compare with the behavior of a mirror,
|
|
|
|
// which is z-inverted, resulting in text appearing backwards.)
|
|
|
|
|
|
|
|
// The parametric definition of a torus with R = 0 and r = 1.
|
|
|
|
return vec3<f32>(
|
|
|
|
cos(grid.x) * cos(grid.y),
|
|
|
|
cos(grid.x) * sin(grid.y),
|
|
|
|
sin(grid.x)
|
|
|
|
);
|
2022-08-26 14:24:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// After converting pixel coordinates to screen coordinates, we still have a problem:
|
|
|
|
/// screen coordinates are 2d, but our world is 3d! The camera assigns each screen
|
|
|
|
/// coordinate to a ray in 3d space, indicating the position and angle which
|
|
|
|
/// we will be receiving light from.
|
|
|
|
fn camera_project(square: vec2<f32>) -> Ray {
|
|
|
|
// Our coordinates already range from -1 to 1, corresponding with the
|
|
|
|
// edges of the window, but we want the edges of the window to correspond
|
|
|
|
// with the angle of the FOV instead.
|
2023-07-23 20:19:25 -07:00
|
|
|
let circle = square * uniforms.field_of_view;
|
2022-08-26 14:24:33 -07:00
|
|
|
let sphere = project(circle);
|
|
|
|
return Ray(vec3<f32>(0.), sphere);
|
|
|
|
}
|
|
|
|
|
|
|
|
@group(0)
|
|
|
|
@binding(0)
|
|
|
|
var dither_texture: texture_2d<f32>;
|
|
|
|
|
|
|
|
/// Apply ordered dithering, which reduces color banding and produces the appearance
|
|
|
|
/// of more colors when in a limited color space (e.g. dark colors with a typical
|
|
|
|
/// 8-bit sRGB monitor).
|
|
|
|
// FIXME: document, don't hardcode width/bit depth
|
|
|
|
fn dither(pixel: vec2<u32>, color: vec4<f32>) -> vec4<f32> {
|
|
|
|
// FIXME: issues with bars at edge caused by bad modulus? (should be %256 but pixel rounding incorrect?)
|
|
|
|
let bias = textureLoad(dither_texture, vec2<i32>(i32(pixel.x % u32(255)), i32(pixel.y % u32(255))), 0) - 0.5;
|
|
|
|
// FIXME: hack to avoid srgb issues
|
|
|
|
return color + (bias / 256.);
|
|
|
|
}
|
|
|
|
|
|
|
|
////
|
|
|
|
//// AUTHOR: Sam Hocevar (http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl)
|
|
|
|
////
|
|
|
|
fn rgb2hsv(c: vec3<f32>) -> vec3<f32> {
|
|
|
|
let K = vec4<f32>(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0);
|
|
|
|
let p = mix(vec4<f32>(c.bg, K.wz), vec4<f32>(c.gb, K.xy), step(c.b, c.g));
|
|
|
|
let q = mix(vec4<f32>(p.xyw, c.r), vec4<f32>(c.r, p.yzx), step(p.x, c.r));
|
|
|
|
|
|
|
|
let d = q.x - min(q.w, q.y);
|
|
|
|
let e = 1.0e-10;
|
|
|
|
return vec3<f32>(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn hsv2rgb(c: vec3<f32>) -> vec3<f32> {
|
|
|
|
let K = vec4<f32>(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
|
|
|
|
let p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
|
|
|
|
return c.z * mix(K.xxx, clamp(p - K.xxx, vec3<f32>(0.0), vec3<f32>(1.0)), c.y);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Given a color which clips outside the color space (some channel is >1.0),
|
|
|
|
/// reduce the brightness (without affecting hue or saturation) until it no
|
|
|
|
/// longer clips. (The default behavior without doing this is just clipping,
|
|
|
|
/// which affects the saturation of the color dramatically, often turning colors
|
|
|
|
/// into 100% white pixels.)
|
|
|
|
fn clamp_value(_color: vec3<f32>) -> vec3<f32> {
|
|
|
|
// TODO: Adjust value directly, without going through HSV conversion.
|
|
|
|
var color = rgb2hsv(_color.rgb);
|
|
|
|
color.z = min(color.z, 1.); // clamp value (brightness) from 0 to 1, preserving saturation and chroma
|
|
|
|
return hsv2rgb(color);
|
|
|
|
}
|
|
|
|
|
|
|
|
@fragment
|
|
|
|
fn fs_main(@builtin(position) position: vec4<f32>) -> @location(0) vec4<f32> {
|
2023-07-23 20:19:25 -07:00
|
|
|
let ray = camera_project(rectangle_to_square(position.xy, uniforms.dimensions));
|
2022-08-26 14:24:33 -07:00
|
|
|
var color = ray.dir / 2.0 + 0.5;
|
|
|
|
|
|
|
|
// TODO: Separate postprocessing pass.
|
|
|
|
|
|
|
|
// It is possible for this renderer to emit colors brighter than 1.0,
|
|
|
|
// for example if you use very bright or many light sources. These colors will be
|
|
|
|
// displayed incorrectly, appearing desaturated and having their brightness
|
|
|
|
// clamped to whatever color output is supported.
|
|
|
|
//
|
|
|
|
// This is common in particular if you have very bright lights in a scene,
|
|
|
|
// which is sometimes necessary for objects to be clearly visible. The result
|
|
|
|
// will be you seeing flashes of over-bright white pixels where you should
|
|
|
|
// see color. One way to mitigate this is by increasing the number of samples per
|
|
|
|
// pixel; the average brightness per pixel is generally less than 1.0 when averaged
|
|
|
|
// out with the (more common) black pixels when no light source is encountered.
|
|
|
|
//
|
|
|
|
// Another mitigation approach is to do color correction, where instead of
|
|
|
|
// trying to preserve the brightness by clamping the RGB values and losing saturation,
|
|
|
|
// you try to preserve the saturation by scaling down the brightness until the
|
|
|
|
// full saturation of the colors is visible (or at least part of it).
|
|
|
|
color = clamp_value(color);
|
|
|
|
|
|
|
|
// Dithering after sRGB conversion is slightly worse because the bayer matrix
|
|
|
|
// is linear whereas sRGB is non-linear, but if you do it *before* conversion,
|
|
|
|
// then adjusted colors won't be *quite* close enough to nearest_color that they
|
|
|
|
// should be closest to, which has the potential to create nasty artifacts.
|
|
|
|
//
|
|
|
|
// FIXME: This shader uses linear color space.
|
|
|
|
return dither(
|
|
|
|
vec2<u32>(u32(position.x), u32(position.y)),
|
|
|
|
vec4<f32>(color, 1.0)
|
|
|
|
);
|
|
|
|
}
|