You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
496 lines
17 KiB
GLSL
496 lines
17 KiB
GLSL
// See the descriptions for these in `project`. They're only relevant if you zoom out.
|
|
//#define TILE_PERSPECTIVE
|
|
//#define CLAMP_PERSPECTIVE
|
|
// "FOV", poorly-defined. affects *zoom*.
|
|
#define FOV (1.5)
|
|
#define SAMPLES 64
|
|
// The maximum number of times light can reflect or scatter before it is extinguished.
|
|
#define PATH_SEGMENTS 6
|
|
|
|
// The maximum number of steps a ray can take during marching before giving up
|
|
// and colliding with nothing. This prevents scenes from taking infinite time to render.
|
|
#define MAX_STEPS 200
|
|
// The maximum distance a ray can travel before we give up and just say it collides
|
|
// with nothing. This helps prevent the background from appearing warped by the foreground
|
|
// due to rays which march close to a foreground object run out of steps before
|
|
// reaching their destination when slightly farther rays do reach their target.
|
|
#define MAX_DIST 20.
|
|
// The minimum distance between two points before they are considered the same point.
|
|
// Setting a minimum distance prevents graphical glitches when ray marching parallel
|
|
// to a surface, where the ray does not intersect an object, but comes close enough
|
|
// that the march becomes so slow that it fails to reach its actual destination.
|
|
#define MIN_DIST (0.001953125/256.)
|
|
// The distance between samples when estimating a surface's normal.
|
|
#define NORMAL_DELTA (MIN_DIST/2.)
|
|
// How far away from the source to start marching a scatter ray, to prevent accidental collision.
|
|
#define SCATTER_MAGIC (MIN_DIST*16.)
|
|
// Only march this much of MIN_DIST at a time to account for imprecision in the distance
|
|
// calculations. Chosen by experimentation. If you have to set this low, that often means
|
|
// that there's a bug somewhere (e.g. you forgot to call `normalize`).
|
|
#define IMPRECISION_FACTOR 0.95
|
|
|
|
/*
|
|
Helpful resources:
|
|
* Inigo Quilez (website, youtube, shadertoy)
|
|
* Importance Sampling Techniques for Path Tracing in Participating Media (Christopher Kulla and Marcos Fajardo)
|
|
* Wikipedia articles:
|
|
* Rendering equation
|
|
* Bidirectional reflectance distribution function
|
|
* Bidirectional scattering distribution function
|
|
*/
|
|
|
|
#define INF (1./0.)
|
|
#define NAN sqrt(-1.)
|
|
#define NULL_RAY ray(vec3(0.), vec3(0.))
|
|
|
|
// stolen from iq: https://www.shadertoy.com/view/4sfGzS
|
|
//------------------------------------------------------------------
|
|
// oldschool rand() from Visual Studio
|
|
//------------------------------------------------------------------
|
|
int seed = 1;
|
|
void srand(int s ) { seed = s; }
|
|
int irand(void) { seed = seed*0x343fd+0x269ec3; return (seed>>16)&32767; }
|
|
float rand(void) { return float(irand())/32767.0; }
|
|
//------------------------------------------------------------------
|
|
// hash to initialize the random sequence (copied from Hugo Elias)
|
|
//------------------------------------------------------------------
|
|
int hash( int n )
|
|
{
|
|
n = (n << 13) ^ n;
|
|
return n * (n * n * 15731 + 789221) + 1376312589;
|
|
}
|
|
|
|
void initRandoms(vec2 fragCoord) {
|
|
ivec2 q = ivec2(fragCoord);
|
|
srand( hash(q.x+hash(q.y+hash(iFrame))));
|
|
}
|
|
//------------------------------------------------------------------
|
|
// end stolen from iq
|
|
|
|
/// We use a global variable `time` instead of `iTime` so that we can apply
|
|
/// temporal antialiasing/motion blur by randomizing relative to `iTime`.
|
|
float time;
|
|
|
|
/// Randomly select a direction on a hemisphere facing the direction of the normal,
|
|
/// with a bias toward directions close to the normal.
|
|
/// Specifically, the bias is by `dot(norm, dir)`, making this a cosine-weighted
|
|
/// distribution, which is useful because `dot(norm, dir)` is a term in the
|
|
/// rendering equation.
|
|
// by fizzer via IQ: http://www.amietia.com/lambertnotangent.html
|
|
vec3 cosine_direction(vec3 norm) {
|
|
float u = rand();
|
|
float v = rand();
|
|
|
|
float a = 6.2831853 * v;
|
|
u = 2.0*u - 1.0;
|
|
return normalize(norm + vec3(sqrt(1.0 - u*u)*vec2(cos(a), sin(a)), u));
|
|
}
|
|
// end by fizzer
|
|
|
|
struct ray {
|
|
vec3 pos;
|
|
vec3 dir;
|
|
};
|
|
|
|
struct transmission {
|
|
/// The new ray position and direction after colliding with a surface or scattering.
|
|
ray outgoing;
|
|
/// The reciprocal proportion of all light extinguished along the path
|
|
float extinction;
|
|
};
|
|
|
|
// Cheat and calculate distance-related stuff with globals
|
|
// to reduce the amount of argument-passing we have to do.
|
|
vec3 _p;
|
|
int _inner_medium;
|
|
float _inner;
|
|
float _outer;
|
|
|
|
/// A helper function to update distance-related calculations.
|
|
/// `inner_medium` is the medium we are currently "most" inside. `inner` is the
|
|
/// distance to the edge of the medium we are currently "most" inside.
|
|
/// `outer` is the nearest medium we are *not* inside.
|
|
/// `nm` and `nd` are the medium and distance to the object we're currently
|
|
/// comparing to, respectively.
|
|
///
|
|
/// mnemomic: "Update Distances"
|
|
void ud(int nm, float nd) {
|
|
if (nd < _inner && nd <= 0.) {
|
|
_inner_medium = nm;
|
|
_inner = nd;
|
|
} else if (nd < _outer && nd > 0.) {
|
|
_outer = nd;
|
|
}
|
|
}
|
|
|
|
float dist_plane(float y) {
|
|
return distance(_p.y, y);
|
|
}
|
|
|
|
// a plane with infinite depth
|
|
float dist_floor(float y) {
|
|
return _p.y - y;
|
|
}
|
|
|
|
float dist_sphere(vec3 pos, float r) {
|
|
return distance(_p, pos) - r;
|
|
}
|
|
|
|
float sub(float a, float b) {
|
|
return max(a, -b);
|
|
}
|
|
|
|
/// Given a position, return the distance to the nearest change of medium,
|
|
/// and whatever medium we happen to currently be in.
|
|
vec2 dist(out int medium, vec3 pos) {
|
|
_p = pos;
|
|
_inner_medium = 0;
|
|
_inner = INF;
|
|
_outer = INF;
|
|
|
|
// the floor
|
|
ud(1, dist_floor(-1.));
|
|
|
|
// the sphere
|
|
vec3 sphere_pos = vec3(0., -0.15, 8.);
|
|
vec3 sphere_sub_pos = sphere_pos + .7*vec3(sin(time), 0., cos(time));
|
|
ud(2, sub(dist_sphere(sphere_pos, 1.),
|
|
dist_sphere(sphere_sub_pos, 0.5)));
|
|
|
|
|
|
// the light sources
|
|
ud(3, dist_sphere(vec3(1.5, 1.2, 7.), 0.7));
|
|
ud(3, dist_sphere(vec3(-1.2, 0.5, 5.0), 0.5));
|
|
|
|
medium = _inner_medium;
|
|
return vec2(_outer, min(_outer, _inner));
|
|
}
|
|
|
|
// Estimate the angle from the nearest surface to a point.
|
|
vec3 normal(vec3 pos) {
|
|
int m;
|
|
vec2 delta = vec2(NORMAL_DELTA, 0.);
|
|
vec3 dq = (dist(m, pos).y - vec3(
|
|
dist(m, pos - delta.xyy).y,
|
|
dist(m, pos - delta.yxy).y,
|
|
dist(m, pos - delta.yyx).y
|
|
))/delta.x;
|
|
|
|
return normalize(dq);
|
|
}
|
|
|
|
/// Approximate the distance to the nearest object along a ray
|
|
/// using our signed distance function (`dist`).
|
|
float march(ray r) {
|
|
float total_dist = 0.;
|
|
float delta = SCATTER_MAGIC;
|
|
for (int steps = 0; steps < MAX_STEPS && total_dist < MAX_DIST && delta > MIN_DIST; steps++) {
|
|
total_dist += delta * IMPRECISION_FACTOR;
|
|
vec3 pos = r.pos + r.dir * total_dist;
|
|
int m;
|
|
delta = dist(m, pos).x;
|
|
}
|
|
return delta <= MIN_DIST ? total_dist : INF;
|
|
}
|
|
|
|
vec3 intersect(ray r) {
|
|
return r.pos + r.dir*march(r);
|
|
}
|
|
|
|
///
|
|
/// Determine how a ray interacts and/or is transmitted through a medium
|
|
/// until it enters a new medium, reflects off a surface, or scatters.
|
|
/// This function returns the outgoing ray in the new medium,
|
|
/// the proportion of light extinguished, and the cumulative light emitted by the medium.
|
|
///
|
|
|
|
//
|
|
// It is generally preferable to use a weighted distribution compared to uniform extinction
|
|
// so that the average ray returns more meaningful information. It is also preferable
|
|
// to use uniform extinction to color-biased extinction so that we can probabilistically
|
|
// choose to extinguish a ray early to save computation time.
|
|
//
|
|
// Note that this function, in conjunction with `emit`, is designed to work only in one
|
|
// direction (from the camera, to the light source). This is because some transformations
|
|
// are dependent on the color of the light involved (e.g. the BRDF), but we don't
|
|
// *know* the color of the light involved until the ray has reached a light source.
|
|
// Thus we first try to statistically choose a path based on the best weighted distribution
|
|
// we can come up *without* knowing the specific wavelength of light, and then
|
|
// retroactively fix it up and compute the *specific* color of light using the `emit` function.
|
|
// This split would still *work* travelling from light to camera, but would not be
|
|
// as effective as if they were simply combined in the first place.
|
|
//
|
|
// The combination of surface and volumetric lighting into `transmit` and the `transmit`/`emit`
|
|
// dichotomy is original to the best of my knowledge, but that's more likely
|
|
// because I'm new to the field than that no-one's come up with those things before.
|
|
//
|
|
// With regards to the rendering equation, this function should incorporate the following
|
|
// (and more, depending on the medium; this list is not exhaustive).
|
|
//
|
|
// If the medium is a surface:
|
|
// * the cosine distribution
|
|
// * the BSSDF, to the extent that it is independent of wavelength
|
|
//
|
|
// If the medium is not a surface (e.g. a gas):
|
|
// * the transmittance of the medium
|
|
// * the extinction coefficient, to the extent that it is independent of wavelength
|
|
// * the scattering coefficient
|
|
// * the phase function
|
|
//
|
|
transmission transmit(ray r) {
|
|
vec3 norm = normal(r.pos);
|
|
|
|
// save a position *outside* the medium in case we want to reflect
|
|
vec3 np = r.pos + SCATTER_MAGIC*norm;
|
|
// put ourselves *inside* the medium so we can calculate the material
|
|
r.pos -= SCATTER_MAGIC*norm;
|
|
int medium;
|
|
dist(medium, r.pos);
|
|
|
|
switch (medium) {
|
|
|
|
case 0: // air material
|
|
// pass through doing nothing
|
|
vec3 p = intersect(ray(np, r.dir));
|
|
if (any(isinf(p))) {
|
|
// we're sailing through the void!
|
|
return transmission(ray(p, r.dir), INF);
|
|
}
|
|
return transmission(ray(p, r.dir), 0.);
|
|
|
|
case 1: // floor material
|
|
if (rand() < 0.25) {
|
|
vec3 refl = 2.*dot(-r.dir, norm)*norm + r.dir;
|
|
return transmission(ray(np, refl), 0.);
|
|
}
|
|
|
|
// fall-through
|
|
case 2: // sphere material
|
|
if (rand() < 0.1) {
|
|
vec3 refl = 2.*dot(-r.dir, norm)*norm + r.dir;
|
|
return transmission(ray(np, cosine_direction(refl) + norm), 0.);
|
|
}
|
|
return transmission(ray(np, cosine_direction(norm)), 0.);
|
|
|
|
case 3: // light material
|
|
// don't bother reflecting off the lights.
|
|
return transmission(NULL_RAY, INF);
|
|
//return transmission(ray(np, cosine_direction(norm)), 0.);
|
|
}
|
|
|
|
return transmission(ray(vec3(0.), vec3(0.)), 1.);
|
|
}
|
|
|
|
/// Determine how much light to emit on the outgoing ray based on the incoming light.
|
|
/// This is affected primarily by the emittance and color-biased extinction
|
|
/// (e.g. the color of the object).
|
|
//
|
|
// With regards to the rendering equation, this function should incorporate the following:
|
|
// * the emittance of the surface/medium
|
|
// * the BSSDF to the extent that it is dependent on wavelength
|
|
// * the extinction coefficient to the extent that it is dependent on wavelength
|
|
//
|
|
// This function does not have to incorporate anything which is not color-related,
|
|
// and does not have to be probabilistic.
|
|
//
|
|
// Generally we want to move as much as possible from this function to `transmit`
|
|
// so that the information can be used to inform our path-making choices better.
|
|
vec3 emit(ray i, ray o, vec3 color) {
|
|
int medium;
|
|
dist(medium, i.pos - SCATTER_MAGIC*normal(i.pos));
|
|
switch (medium) {
|
|
|
|
case 0: // air material
|
|
if (any(isinf(o.pos))) return vec3(0.);
|
|
color += sqrt(distance(i.pos, o.pos)) * 0.0005;
|
|
return color;
|
|
|
|
case 1: // floor material
|
|
color.r *= 0.3;
|
|
color.g *= 0.2;
|
|
color.b *= 0.9;
|
|
color += vec3(0., 0., 0.01);
|
|
return color;
|
|
|
|
case 2: // sphere material
|
|
color.gb *= 0.3;
|
|
color += vec3(0.004, 0., 0.);
|
|
return color;
|
|
|
|
case 3: // light material
|
|
color += vec3(5.) * dot(-i.dir, normal(i.pos));
|
|
return color;
|
|
|
|
}
|
|
// unknown material
|
|
return vec3(1., 0., 1.);
|
|
}
|
|
|
|
vec3 light(ray o) {
|
|
ray path[uint(PATH_SEGMENTS + 1)];
|
|
int seg = 0;
|
|
|
|
for (; seg < PATH_SEGMENTS + 1; seg++) {
|
|
path[seg] = o;
|
|
transmission tr = transmit(o);
|
|
o = tr.outgoing;
|
|
|
|
// We could also simply divide the color by the extinction,
|
|
// but on average simply extinguishing the ray is more efficient,
|
|
// and extinguishing the ray is far more computationally efficient.
|
|
if (tr.extinction > rand()) break;
|
|
}
|
|
|
|
// debugging stuff
|
|
//return (path[int(mod(iTime, float(PATH_SEGMENTS)))].pos + 2.) / 4.;
|
|
//return path[int(mod(iTime, float(PATH_SEGMENTS)))].dir;
|
|
//return vec3(float(seg))/float((PATH_SEGMENTS + 1));
|
|
|
|
vec3 color = vec3(0.);
|
|
for (; seg >= 0; seg--) {
|
|
ray i = path[seg];
|
|
color = emit(i, o, color);
|
|
o = i;
|
|
}
|
|
|
|
|
|
return color;
|
|
}
|
|
|
|
/// Project a coordinate on the unit circle onto the unit hemisphere.
|
|
/// This is used for curvilinear perspective.
|
|
vec3 project(vec2 coord) {
|
|
// The sign of the direction we're facing. 1 is forward, -1 is backward.
|
|
float dir = 1.;
|
|
#ifdef TILE_PERSPECTIVE
|
|
// This projection only supports coordinates within the unit circle
|
|
// and only projects into the unit hemisphere. Ideally we'd want
|
|
// some sort of extension which takes points outside the unit circle
|
|
// and projects them somewhere behind you (with the point at infinity
|
|
// being directly behind you), but I haven't come up with any reasonable
|
|
// extension of this perspective system which behaves in that manner.
|
|
//
|
|
// What we can do instead is *tile* the projection so that adjacent projections
|
|
// are a mirrored projection of the unit hemisphere *behind* you.
|
|
// This is a logical extension because the projection becomes continuous
|
|
// along the x and y axis (you're just looking around in perfect circles),
|
|
// and it allows you to view the entire space. The main problem to this approach
|
|
// is that all of the space between the tiled circles is still undefined,
|
|
// but this is still the best solution which I'm aware of.
|
|
coord -= 1.;
|
|
coord = mod(coord + 2., 4.) - 1.;
|
|
if (coord.x > 1.) {
|
|
coord.x = 1. - (coord.x - 1.);
|
|
dir = -dir;
|
|
}
|
|
if (coord.y > 1.) {
|
|
coord.y = 1. - (coord.y - 1.);
|
|
dir = -dir;
|
|
}
|
|
#endif
|
|
float z = dir*sqrt(1. - coord.x*coord.x - coord.y*coord.y);
|
|
#ifdef CLAMP_PERSPECTIVE
|
|
// We can "define" the remaining undefined region of the screen
|
|
// by clamping it to the nearest unit circle. This is sometimes
|
|
// better than nothing, though it can also be a lot worse because
|
|
// we still have to actually *render* all of those pixels.
|
|
if (isnan(z)) {
|
|
coord = normalize(coord);
|
|
z = 0.;
|
|
}
|
|
#endif
|
|
return vec3(coord, z);
|
|
}
|
|
|
|
/// Return the camera's position and direction.
|
|
vec3 camera(inout vec3 e) {
|
|
// camera direction (forward vector)
|
|
vec3 d = vec3(0., 0., 1.);
|
|
|
|
// point projection relative to direction
|
|
// this really needs to be simplified,
|
|
// but I don't know the math to understand how to do it.
|
|
vec3 up = vec3(0., 1., 0.);
|
|
//vec3 x = cross(up, d);
|
|
vec3 x = vec3(d.z, 0., -d.x);
|
|
//vec3 y = cross(d, x);
|
|
vec3 y = vec3(-d.y*d.x, d.z*d.z+d.x*d.x, -d.y*d.z);
|
|
|
|
mat3 rot = mat3(
|
|
x.x, y.x, d.x,
|
|
x.y, y.y, d.y,
|
|
x.z, y.z, d.z
|
|
);
|
|
|
|
e = normalize(rot * e);
|
|
|
|
// camera position
|
|
return vec3(0., 0., 0.);
|
|
}
|
|
|
|
/// Map pixel coordinate (from 0,0 to resolution) onto the coordinate system
|
|
/// *we* use to represent the screen (from (-1, -1) to (1, 1) with a square
|
|
/// aspect ratio).
|
|
vec2 screen2square(vec2 screen) {
|
|
// Map rectangular screen into square coordinate space.
|
|
vec2 square = ((screen / iResolution.xy) - 0.5) * 2.;
|
|
// Adjust for aspect ratio to get square coordinates.
|
|
if (iResolution.x > iResolution.y) {
|
|
return vec2(square.x, square.y * iResolution.y / iResolution.x);
|
|
} else {
|
|
return vec2(square.x * iResolution.x / iResolution.y, square.y);
|
|
}
|
|
}
|
|
|
|
vec4 colorPixel(vec2 fragCoord) {
|
|
vec2 uv = screen2square(fragCoord) * 1.;
|
|
uv /= FOV;
|
|
vec3 eye = project(uv);
|
|
if (any(isnan(eye))) {
|
|
// The projection is undefined at this pixel coordinate (see `project`);
|
|
// don't bother rendering it, and return 100% transparent black to indicate that
|
|
// we didn't render it.
|
|
return vec4(NAN);
|
|
}
|
|
|
|
vec3 pos = camera(eye);
|
|
return vec4(light(ray(pos, eye)), 1.);
|
|
}
|
|
|
|
/// Convert from linear RGB to sRGB color space.
|
|
vec3 linear2srgb(vec3 linear_rgb) {
|
|
// I believe the first version is technically more accurate,
|
|
// but the difference is usually negligable in practice.
|
|
#if 1
|
|
// copied from somewhere on the internet
|
|
bvec3 cutoff = lessThan(linear_rgb, vec3(0.0031308));
|
|
vec3 higher = vec3(1.055)*pow(linear_rgb, vec3(1.0/2.4)) - vec3(0.055);
|
|
vec3 lower = linear_rgb * vec3(12.92);
|
|
|
|
return mix(higher, lower, cutoff);
|
|
// end copied from somewhere on the internet
|
|
#else
|
|
return pow(linear_rgb, vec3(1./2.2));
|
|
#endif
|
|
}
|
|
|
|
void mainImage(out vec4 fragColor, vec2 fragCoord) {
|
|
initRandoms(fragCoord);
|
|
|
|
// Sample a bunch of times around the pixel and return the average.
|
|
vec4 color = vec4(0.);
|
|
for (int i = 0; i < SAMPLES; i++) {
|
|
vec2 uv = fragCoord + vec2(rand(), rand()) - 0.5;
|
|
// Add noise to time for temporal antialiasing.
|
|
time = iTime + rand() * iTimeDelta;
|
|
vec4 samp = colorPixel(uv);
|
|
// We failed to render a sample at this location.
|
|
if (any(isnan(samp))) continue;
|
|
color += samp;
|
|
}
|
|
color /= float(SAMPLES);
|
|
|
|
color = clamp(vec4(0.), color, vec4(1.));
|
|
fragColor = vec4(linear2srgb(color.rgb), color.a);
|
|
}
|