diff --git a/path_march 2021-12-14.frag b/path_march 2021-12-14.frag new file mode 100644 index 0000000..8b61ed0 --- /dev/null +++ b/path_march 2021-12-14.frag @@ -0,0 +1,427 @@ +// See the descriptions for these in `project`. They're only relevant if you zoom out. +//#define TILE_PERSPECTIVE +//#define CLAMP_PERSPECTIVE +// "FOV", poorly-defined. affects *zoom*. +#define FOV (1.5) +#define SAMPLES 1 +#define LIGHT_SAMPLES 7 + +// The maximum number of steps a ray can take during marching before giving up +// and colliding with nothing. This prevents scenes from taking infinite time to render. +#define MAX_STEPS 300 +// The maximum distance a ray can travel before we give up and just say it collides +// with nothing. This helps prevent the background from appearing warped by the foreground +// due to rays which march close to a foreground object run out of steps before +// reaching their destination when slightly farther rays do reach their target. +#define MAX_DIST 50. +// The minimum distance between two points before they are considered the same point. +// Setting a minimum distance prevents graphical glitches when ray marching parallel +// to a surface, where the ray does not intersect an object, but comes close enough +// that the march becomes so slow that it fails to reach its actual destination. +#define MIN_DIST (0.001953125/256.) +// The distance between samples when estimating a surface's normal. +#define NORMAL_DELTA (MIN_DIST/2.) +// How far away from the source to start marching a scatter ray, to prevent accidental collision. +#define SCATTER_MAGIC (MIN_DIST*16.) +// Only march this much of MIN_DIST at a time to account for imprecision in the distance +// calculations. Chosen by experimentation. +#define IMPRECISION_FACTOR 1. + +#ifndef SAMPLES +#define SAMPLES 1 +#endif + +#ifndef LIGHT_SAMPLES +#define LIGHT_SAMPLES 1 +#endif + +#define NAN sqrt(-1.) +#define INF (1./0.) + +// stolen from iq: https://www.shadertoy.com/view/4sfGzS +//------------------------------------------------------------------ +// oldschool rand() from Visual Studio +//------------------------------------------------------------------ +#if 1 +int seed = 1; +void srand(int s ) { seed = s; } +int rand(void) { seed = seed*0x343fd+0x269ec3; return (seed>>16)&32767; } +float frand(void) { return float(rand())/32767.0; } +#else +// this isn't copied from iq. it's unclear which version is faster. +float seed = 0.; +void srand(int s) { seed = mod(float(s), 256. * 256.)/256.; } +float frand(void) { + float x, y; + x = modf(seed, y); + seed += 1./256.; + x *= 256.; + return texelFetch(iChannel0, ivec2(int(x), int(y)),0).r; +} +#endif +//------------------------------------------------------------------ +// hash to initialize the random sequence (copied from Hugo Elias) +//------------------------------------------------------------------ +int hash( int n ) +{ + n = (n << 13) ^ n; + return n * (n * n * 15731 + 789221) + 1376312589; +} + +void initRandoms(vec2 fragCoord) { + ivec2 q = ivec2(fragCoord); + srand( hash(q.x+hash(q.y+hash(iFrame)))); +} +//------------------------------------------------------------------ +// end stolen from iq + +float time; + +/// The amount of outgoing light reflected is related to the angle of the +/// incoming light and the normal of the surface. The naive approach is +/// to divide the outgoing light by `dot(norm, dir)`, but by preferentially +/// sampling points according to their weight, we achieve the same effect +/// but with more information per-sample on average. +// by fizzer via IQ: http://www.amietia.com/lambertnotangent.html +vec3 cosine_direction(vec3 norm) { + float u = frand(); + float v = frand(); + + float a = 6.2831853 * v; + u = 2.0*u - 1.0; + return normalize(norm + vec3(sqrt(1.0 - u*u)*vec2(cos(a), sin(a)), u)); +} +// end by fizzer + +float dist_light(vec3 pos) { + // vec3(1.5, 1.5, 4.) + return distance(pos, vec3(1.5, 1.2, 7.)) - 0.7; +} + +float dist_floor(vec3 pos) { + return pos.y + 1.0; +} + +// The distance from a point to the nearest object in the scene. +float dist(vec3 pos) { + vec3 sphere_pos = vec3(0., -0.2, 8.); + vec3 neg_pos = sphere_pos + .7*vec3(sin(time), 0., cos(time)); + float sphere = distance(pos, sphere_pos) - 1.; + float sphere2 = distance(pos, neg_pos) - 0.5; + float plane = dist_floor(pos); + return min(dist_light(pos), min(max(sphere, -sphere2), plane)); +} + +/// Approximate the distance to the nearest object along a ray +/// using our signed distance function (`dist`). +float march1(vec3 origin, vec3 direction, float magic) { + float total_dist = 0.; + float delta = magic >= MIN_DIST ? magic / IMPRECISION_FACTOR : dist(origin); + for (int steps = 0; steps < MAX_STEPS && total_dist < MAX_DIST && delta >= MIN_DIST; steps++) { + total_dist += delta * IMPRECISION_FACTOR; + vec3 pos = origin + direction * total_dist; + delta = dist(pos); + } + return delta < MIN_DIST ? total_dist : INF; +} + +vec3 intersect1(vec3 origin, vec3 direction, float magic) { + return origin + direction*march1(origin, direction, magic); +} + +vec3 intersect2(vec3 origin, vec3 direction, float magic) { + float total_dist = 0.; + float delta = magic >= MIN_DIST ? magic / IMPRECISION_FACTOR : dist(origin); + vec3 pos = origin; + for (int steps = 0; steps < MAX_STEPS && total_dist < MAX_DIST && delta >= MIN_DIST; steps++) { + pos += direction * delta * IMPRECISION_FACTOR; + delta = dist(pos); + total_dist = distance(origin, pos); + } + return delta < MIN_DIST ? pos : vec3(INF); +} + +float march2(vec3 origin, vec3 direction, float magic) { + return distance(origin, intersect2(origin, direction, magic)); +} + +#define MARCH_ALG 1 + +float march(vec3 origin, vec3 direction, float magic) { +#if MARCH_ALG + return march1(origin, direction, magic); +#else + return march2(origin, direction, magic); +#endif +} + +/// Intersect with an object in the scene by ray marching. +vec3 intersect(vec3 origin, vec3 direction, float magic) { +#if MARCH_ALG + return intersect1(origin, direction, magic); +#else + return intersect2(origin, direction, magic); +#endif +} + +float march(vec3 origin, vec3 direction) { + return march(origin, direction, 0.); +} + +vec3 intersect(vec3 pos, vec3 dir) { + return intersect(pos, dir, 0.); +} + +// Estimate the angle from the nearest surface to a point. +vec3 normal(vec3 pos) { + vec2 delta = vec2(NORMAL_DELTA, 0.); + vec3 dq = (dist(pos) - vec3( + dist(pos - delta.xyy), + dist(pos - delta.yxy), + dist(pos - delta.yyx) + )); // alternate version: divide by /delta.x, but skip the normalize + + return normalize(dq); +} + +struct LightSample { + // Position of point of interest + vec3 position; + // Angle of incoming light + vec3 incoming; + // Angle of outgoing light + vec3 outgoing; +} light_samples[LIGHT_SAMPLES]; + +/// Choose which direction to cast the next ray, depending on the +/// surface normal, material, and angle of incoming light. +/// +/// This distribution *must be weighted by importance*! In particular, +/// by the angle between the incoming and outgoing rays, and by the BRDF; +/// if the BRDF would evenly reject all wavelengths, it should instead probabilistically +/// choose to *not* scatter by emitting a NaN direction so we can stop unnecessarily +/// bouncing our paths around. +vec3 scatter(vec3 pos) { + return cosine_direction(normal(pos)); +} + +/// Choose how much light to reflect based on the bidirectional reflectance distribution function, +/// then add light according to the matterial's emittance. These are fundamentally separate concepts, +/// but they're combined into this function so we have to sample the material only once instead of twice. +void brdf_emit(inout vec3 color, in int sample_i) { + LightSample samp = light_samples[sample_i]; + if (dist_light(samp.position) <= MIN_DIST) { + color += vec3(0.95); + } else if (dist_floor(samp.position) <= MIN_DIST) { + color.rg *= 0.6; + } else { + color.gb *= 0.3; + } +} + +vec4 light(vec3 pos, vec3 dir) { +#if 1 + int sample_i = 0; + + for (; sample_i < LIGHT_SAMPLES; sample_i++) { + pos = intersect(pos, dir, SCATTER_MAGIC); + // We've stuck in the void forever! + if (any(isinf(pos))) break; + vec3 neg_incoming = scatter(pos); + vec3 outgoing = -dir; + // The surface is darkening itself by simply choosing not to sample. + // This is going to be worse on a per-sample basis for most materials, + // but it averages out, and massively saves performance by letting us quit + // recursing. + if (any(isnan(neg_incoming))) break; + light_samples[sample_i] = LightSample(pos, -neg_incoming, outgoing); + dir = neg_incoming; + } + + if (sample_i == 0) return vec4(0.); + + vec3 color = vec3(0.); + for (; sample_i >= 0; sample_i--) { + brdf_emit(color, sample_i); + } + + return vec4(color, 1.); + +#elif 0 + // debug intersection location + pos = intersect(pos, dir); + if (any(isinf(pos))) return vec4(0.); + return vec4(pos, 1.); +#elif 0 + // debug ray marching + float dist = march(pos, dir); + return (vec4(vec3(dist), 1.) - 0.) / 10.; +#elif 0 + // debug ray marching algorithms + float dist = distance(march1(pos, dir, 0.), march2(pos, dir, 0.)); + return vec4(vec3(dist), 1.); +#elif 0 + // debug normals + pos = intersect(pos, dir); + vec3 norm = normal(pos); + return vec4(abs(norm), 1.); +#elif 0 + // debug scattering + pos = intersect(pos, dir); + return vec4(abs(scatter(pos)), 1.); +#else + // debug scatter magic + pos = intersect(pos, dir); + vec3 o = pos; + vec3 scat = scatter(pos); + pos = intersect1(o, scat, SCATTER_MAGIC)/10.; + pos = vec3(distance(pos, intersect2(o, scat, SCATTER_MAGIC)/10.)); + //pos = vec3(march(pos, scat, SCATTER_MAGIC))/10.; + return vec4(pos, 1.); +#endif +} + +/// Project a coordinate on the unit circle onto the unit hemisphere. +/// This is used for curvilinear perspective. +vec3 project(vec2 coord) { + // The sign of the direction we're facing. 1 is forward, -1 is backward. + float dir = 1.; + #ifdef TILE_PERSPECTIVE + // This projection only supports coordinates within the unit circle + // and only projects into the unit hemisphere. Ideally we'd want + // some sort of extension which takes points outside the unit circle + // and projects them somewhere behind you (with the point at infinity + // being directly behind you), but I haven't come up with any reasonable + // extension of this perspective system which behaves in that manner. + // + // What we can do instead is *tile* the projection so that adjacent projections + // are a mirrored projection of the unit hemisphere *behind* you. + // This is a logical extension because the projection becomes continuous + // along the x and y axis (you're just looking around in perfect circles), + // and it allows you to view the entire space. The main problem to this approach + // is that all of the space between the tiled circles is still undefined, + // but this is still the best solution which I'm aware of. + coord -= 1.; + coord = mod(coord + 2., 4.) - 1.; + if (coord.x > 1.) { + coord.x = 1. - (coord.x - 1.); + dir = -dir; + } + if (coord.y > 1.) { + coord.y = 1. - (coord.y - 1.); + dir = -dir; + } + #endif + float z = dir*sqrt(1. - coord.x*coord.x - coord.y*coord.y); + #ifdef CLAMP_PERSPECTIVE + // We can "define" the remaining undefined region of the screen + // by clamping it to the nearest unit circle. This is sometimes + // better than nothing, though it can also be a lot worse because + // we still have to actually *render* all of those pixels. + if (isnan(z)) { + coord = normalize(coord); + z = 0.; + } + #endif + return vec3(coord, z); +} + +/// Return the camera's position and direction. +vec3 camera(inout vec3 e) { + // camera direction (forward vector) + vec3 d = vec3(0., 0., 1.); + + // point projection relative to direction + // this really needs to be simplified, + // but I don't know the math to understand how to do it. + vec3 up = vec3(0., 1., 0.); + //vec3 x = cross(up, d); + vec3 x = vec3(d.z, 0., -d.x); + //vec3 y = cross(d, x); + vec3 y = vec3(-d.y*d.x, d.z*d.z+d.x*d.x, -d.y*d.z); + + mat3 rot = mat3( + x.x, y.x, d.x, + x.y, y.y, d.y, + x.z, y.z, d.z + ); + + e = normalize(rot * e); + + // camera position + return vec3(0., 0., 0.); + + /*e = vec3( + e.x*d.z - e.y*d.y*d.x + d.x, + e.y*(d.z*d.z+d.x*d.x) + d.y, + -e.x*d.x - e.y*d.y*d.z + d.z + ); + e = normalize(e);*/ + +} + +/// Map pixel coordinate (from 0,0 to resolution) onto the coordinate system +/// *we* use to represent the screen (from (-1, -1) to (1, 1) with a square +/// aspect ratio). +vec2 screen2square(vec2 screen) { + // Map rectangular screen into square coordinate space. + vec2 square = ((screen / iResolution.xy) - 0.5) * 2.; + // Adjust for aspect ratio to get square coordinates. + if (iResolution.x > iResolution.y) { + return vec2(square.x, square.y * iResolution.y / iResolution.x); + } else { + return vec2(square.x * iResolution.x / iResolution.y, square.y); + } +} + +vec4 colorPixel(vec2 fragCoord) { + vec2 uv = screen2square(fragCoord) * 1.; + uv /= FOV; + vec3 eye = project(uv); + if (any(isnan(eye))) { + // The projection is undefined at this pixel coordinate (see `project`); + // don't bother rendering it, and return 100% transparent black to indicate that + // we didn't render it. + return vec4(NAN); + } + + vec3 pos = camera(eye); + return light(pos, eye); +} + +/// Convert from linear RGB to sRGB color space. +vec3 linear2srgb(vec3 linear_rgb) { + // I believe the first version is technically more accurate, + // but the difference is usually negligable in practice. + #if 1 + // copied from somewhere on the internet + bvec3 cutoff = lessThan(linear_rgb, vec3(0.0031308)); + vec3 higher = vec3(1.055)*pow(linear_rgb, vec3(1.0/2.4)) - vec3(0.055); + vec3 lower = linear_rgb * vec3(12.92); + + return mix(higher, lower, cutoff); + // end copied from somewhere on the internet + #else + return pow(linear_rgb, vec3(1./2.2)); + #endif +} + +void mainImage(out vec4 fragColor, vec2 fragCoord) { + initRandoms(fragCoord); + + // Sample a bunch of times around the pixel and return the average. + vec4 color = vec4(0.); + for (int i = 0; i < SAMPLES; i++) { + vec2 uv = fragCoord + vec2(frand(), frand()) - 0.5; + // Add noise to time for temporal antialiasing. + time = iTime + frand() * iTimeDelta; + vec4 samp = colorPixel(uv); + // We failed to render a sample at this location. + if (any(isnan(samp))) continue; + color += samp; + } + color /= float(SAMPLES); + + color = clamp(vec4(0.), color, vec4(1.)); + fragColor = vec4(linear2srgb(color.rgb), color.a); +}