Rewrite #4, preparing for volumetric marching 2.0.

Added a ton of comments & docs, refactored, and *hopefully* now support
medium/medium transitions and nested media well enough to work.
Haven't tested that part yet, though.
master
James T. Martin 2021-12-17 13:05:49 -08:00
parent 63dac47c4d
commit 6906779065
Signed by: james
GPG Key ID: 4B7F3DA9351E577C
1 changed files with 845 additions and 0 deletions

845
path_march 2021-12-16.frag Normal file
View File

@ -0,0 +1,845 @@
//////// ================================
//////// Settings
//////// ================================
// See the descriptions for these in `project`. They're only relevant if you zoom out.
//#define TILE_PERSPECTIVE
//#define CLAMP_PERSPECTIVE
// "FOV", poorly-defined. affects *zoom*.
#define FOV (1.5)
#define SAMPLES 3
// The maximum number of times light can reflect or scatter before it is extinguished.
#define PATH_SEGMENTS 14
// The maximum number of steps a ray can take during marching before giving up
// and colliding with nothing. This prevents scenes from taking infinite time to render.
#define MAX_STEPS 200
// The maximum distance a ray can travel before we give up and just say it collides
// with nothing. This helps prevent the background from appearing warped by the foreground
// due to rays which march close to a foreground object run out of steps before
// reaching their destination when slightly farther rays do reach their target.
#define MAX_DIST 20.
// The minimum distance between two points before they are considered the same point.
// Setting a minimum distance prevents graphical glitches when ray marching parallel
// to a surface, where the ray does not intersect an object, but comes close enough
// that the march becomes so slow that it fails to reach its actual destination.
#define MIN_DIST (0.001953125/128.)
// The distance between samples when estimating a surface's normal.
#define NORMAL_DELTA (MIN_DIST/4.)
// Only march this much of MIN_DIST at a time to account for imprecision in the distance
// calculations. Chosen by experimentation. If you have to set this low, that often means
// that there's a bug somewhere (e.g. you forgot to call `normalize`).
#define IMPRECISION_FACTOR 0.9
//////// ================================
//////// DOCS: Declarations & documentation
//////// ================================
/*
Helpful resources:
* Inigo Quilez (website, youtube, shadertoy)
* Importance Sampling Techniques for Path Tracing in Participating Media (Christopher Kulla and Marcos Fajardo)
* Wikipedia articles:
* Rendering equation
* Bidirectional reflectance distribution function
* Bidirectional scattering distribution function
*/
//////// --------------------------------
//////// PIXEL: Pixels, color, and projections
//////// --------------------------------
/// Assign each pixel a color in the sRGB color space.
void mainImage(out vec4 color, vec2 pixel);
/// Convert a color from linear RGB to the sRGB color space.
vec3 linear2srgb(vec3 color);
/// Take the coordinate of a pixel on the screen and return a color
/// in the linear RGBA color space.
vec4 color_pixel(vec2 pixel);
/// Convert a pixel coordinate, which is dependent on resolution and aspect ratio,
/// to our internal coordinate system, which is not.
///
/// (Pixel coordinates are from 0,0 to the window resolution, iResolution;
/// the internal coordinate system goes from (-1, -1) to (1, 1) with a square aspect ratio,
vec2 pixel2square(vec2 pixel);
struct ray {
vec3 pos; // POSition (aka the origin)
vec3 dir; // DIRection
};
/// After converting pixel coordinates to screen coordinates, we still have a problem:
/// screen coordinates are 2d, but our world is 3d! The camera assigns each screen
/// coordinate to a ray in 3d space, indicating the position and angle which
/// we will be receiving light from.
ray camera_project(vec2 coord);
/// Project a coordinate on the unit circle onto the unit hemisphere.
/// This is used for curvilinear perspective.
vec3 project(vec2 coord);
/// The virtual camera has a position and orientation in 3d space.
/// This takes a ray positioned relative to `(0, 0, 0)` and facing `(0, 0, 1)`
/// and moves and orients it relative to the camera.
ray camera(ray r);
//////// --------------------------------
//////// LIGHT: Light simulation (path marching)
//////// --------------------------------
/// Perform (backwards) path marching. Simulate a single photon as it travels
/// through the world, bouncing off surfaces and scattering through media,
/// and compute its end color when it reaches the camera.
///
/// Note that this is *backwards* path march, so it works the *opposite*
/// of real life. Our "photon" is actually travelling from the camera to the
/// light source (the ray being the ray you get from the `cameraProject` function);
/// however, visually, this gets similar results to doing it forward, only
/// you waste less time computing photons which never reach the camera.
vec3 light(ray r);
/// Determine how a ray interacts with and/or is transmitted through a medium
/// until it enters a new medium, reflects off a surface, or scatters.
/// This function returns the outgoing ray in the new medium and the proportion
/// of light absorbed along the way.
struct transmission {
/// The new ray position and direction after colliding with a surface or scattering.
ray outgoing;
/// The reciprocal proportion of all light extinguished along the path
float extinction;
};
transmission transmit(ray r);
/// Determine how much light to emit on the outgoing ray based on the incoming light.
vec3 emit(ray incoming, ray outgoing, vec3 color);
//////// --------------------------------
//////// WORLD: World simulation (signed distance functions, or SDFs)
//////// --------------------------------
/// The current time. We use a global instead of a parameter for convenience.
/// We use this instead of iTime so that we can apply temporal anti-aliasing
/// (see the explanation in `mainImage`).
float time;
/// Compute the location at which this ray will next encounter a change of medium
/// (e.g. if it's in the air when it will encounter a surface, if it's in a cloud
/// when it'll escape that cloud, etc).
/// This will always return a position along the ray.
vec3 intersect(ray r);
/// Compute the distance along the ray at which it will first encounter a change of medium.
/// This is equivalent to `distance(intersect(r), r.pos)`.
float march(ray r);
/// Given a point on or near the surface of an object or medium, compute (or estimate)
/// the normal (i.e. the vector perpendicular to the surface).
vec3 normal(vec3 pos, int medium);
/// Take a point near the edge of a medium and nudge towards the surface
/// until it is outside of the medium.
vec3 nudge(vec3 pos, int medium);
/// The distance to the nearest change of medium (e.g. the surface of an object).
/// (This is used primarily by ray marching in the `march` function.)
float dist(vec3 pos);
/// The signed distance to the nearest surface of a given medium.
/// This is positive if you are outside the surface, or negative if you are inside it.
/// (This is used primarily when computing normals in the `normal` function.)
float mdist(vec3 pos, int medium);
/// The medium which encompasses a point.
/// In the case of multiple overlapping media (e.g. one object inside another object),
/// the innermost medium will be returned (i.e. the one it is *least* far from the surface of).
int medium(vec3 pos);
//////// --------------------------------
//////// UTIL: Utility functions (e.g. random number generation)
//////// --------------------------------
// Convenience definitions
#define INF (1./0.)
// NOTE: I used to use `sqrt(-1)`, but apparently that doesn't evaluate to NaN????
// This makes me wonder if NaN isn't portable due to constant folding or something.
#define NAN (0./0.)
#define NULL_RAY ray(vec3(0.), vec3(0.))
/// Used to forcibly set the pixel color from functions. Used for debugging.
vec3 _bug = vec3(NAN);
/// Return a random number between 0 and 1 (with uniform distribution);
float rand();
/// Use the fragment coordinate and current frame to seed the random number generator.
void seed_randoms(vec2 fragCoord);
/// Randomly select a direction on a hemisphere facing the direction of the normal,
/// with a bias toward directions close to the normal.
/// Specifically, the bias is by `dot(norm, dir)`, making this a cosine-weighted
/// distribution, which is useful because `dot(norm, dir)` is a term in the
/// rendering equation.
vec3 cosine_direction(vec3 norm);
//////// ================================
//////// IMPL: Implementation
//////// ================================
//////// --------------------------------
//////// PIXEL: Pixels, color, and projections
//////// --------------------------------
void mainImage(out vec4 fragColor, vec2 fragCoord) {
seed_randoms(fragCoord);
// Supersampling. Sample the color within each pixel multiple times and take the average.
// In raster-based rendering, this is used mostly to prevent jagged edges;
// with pathtracing, antialiasing is even *more* important to prevent graininess.
vec4 color = vec4(0.);
int successful_samples = 0;
for (int i = 0; i < SAMPLES; i++) {
// There are two ways to choose where to sample the color during multisampling:
// either in a fixed pattern of offsets, or by applying a *random* offset.
// Random offsets are more common in path tracing.
vec2 pixel = fragCoord + vec2(rand(), rand()) - 0.5;
// Apply temporal antialiasing (motion blur) by slightly randomizing the time.
// We distribute our samples across the time we estimate the frame will take
// to render, which in this case, is simply the time the *last* frame took
// to render. This causes jerkiness if the time it takes to render a frame
// changes suddenly (stutters).
//
// TODO: a more sophisticated frame time estimate
#ifdef MOTION_BLUR
time = iTime + rand() * iTimeDelta;
#else
time = iTime;
#endif
vec4 samp = color_pixel(pixel);
// We failed to render a sample at this location for some reason,
// e.g. the projection for this pixel is undefined. NaN and infinity
// are infectious, and we must block them or our entire pixel will be corrupted.
if (any(isnan(samp)) || any(isinf(samp))) continue;
successful_samples++; // Only count successful samples towards the average.
// NOTE: It's technically possible for the sample to be *negative*.
// If the sample is negative, it's probably a bug, though hypothetically
// maybe you could have negative light as some sort of special effect or something
// (like a material that sucks the light out of the room. that could be cool.
// or creepy.)
color += samp;
}
color /= max(float(successful_samples), 1.);
if (!any(isnan(_bug))) { color = vec4(_bug, 1.); }
// Note that it is possible for this renderer to emit colors brighter than 1.0,
// for example if you use very bright or many light sources. These colors will be
// displayed incorrectly, appearing desaturated and having their brightness
// clamped to whatever color output is supported.
//
// This is common in particular if you have very bright lights in a scene,
// which is sometimes necessary for objects to be clearly visible. The result
// will be you seeing flashes of over-bright white pixels where you should
// see color. One way to mitigate this is by increasing the number of samples per
// pixel; the average brightness per pixel is generally less than 1.0 when averaged
// out with the (more common) black pixels when no light source is encountered.
//
// Another mitigation approach would be to do color correction, where instead of
// trying to preserve the brightness by clamping the RGB values and losing saturation,
// you try to preserve the saturation by scaling down the brightness until the
// full saturation of the colors is visible (or at least part of it).
//
// TODO: Implement that more sophisticated color correction (it'd be really helpful
// when using only one sample per pixel).
//color = clamp(vec4(0.), color, vec4(1.));
// This shader operates in the linear RGB color space,
// but fragColor is expected to be in sRGB, so we convert.
fragColor = vec4(linear2srgb(color.rgb), color.a);
}
// NOTE: linear2srgb is in the vendored code section at the bottom of the file
vec2 screen2square(vec2 screen) {
// Map rectangular screen into square coordinate space.
vec2 square = ((screen / iResolution.xy) - 0.5) * 2.;
// Adjust for aspect ratio to get square coordinates.
if (iResolution.x > iResolution.y) {
return vec2(square.x, square.y * iResolution.y / iResolution.x);
} else {
return vec2(square.x * iResolution.x / iResolution.y, square.y);
}
}
vec4 color_pixel(vec2 pixel) {
vec2 coord = screen2square(pixel) * 1.;
// Apply zoom.
//
// TODO: Add an option for dynamic zoom based on aspect ratio,
// so that the portion of the screen shown always exactly matches
// the maximum area defined under curvilinear projection.
coord /= FOV;
ray r = camera_project(coord);
if (any(isnan(r.dir))) {
// The projection is undefined at this pixel coordinate (see `project`);
// don't bother rendering it, and return 100% transparent black to indicate that
// we didn't render it.
return vec4(NAN);
}
// We could hypothetically do something with alpha later,
// but for now, always return 100% opacity.
return vec4(light(r), 1.);
}
ray camera_project(vec2 coord) {
return camera(ray(vec3(0.), project(coord)));
}
// TODO: add support for the usual, non-curvilinear perspective projection
// (and possibly other projections, just for fun?)
vec3 project(vec2 coord) {
// The sign of the direction we're facing. 1 is forward, -1 is backward.
float dir = 1.;
#ifdef TILE_PERSPECTIVE
// This projection only supports coordinates within the unit circle
// and only projects into the unit hemisphere. Ideally we'd want
// some sort of extension which takes points outside the unit circle
// and projects them somewhere behind you (with the point at infinity
// being directly behind you), but I haven't come up with any reasonable
// extension of this perspective system which behaves in that manner.
//
// What we can do instead is *tile* the projection so that adjacent projections
// are a mirrored projection of the unit hemisphere *behind* you.
// This is a logical extension because the projection becomes continuous
// along the x and y axis (you're just looking around in perfect circles),
// and it allows you to view the entire space. The main problem to this approach
// is that all of the space between the tiled circles is still undefined,
// but this is still the best solution which I'm aware of.
coord -= 1.;
coord = mod(coord + 2., 4.) - 1.;
if (coord.x > 1.) {
coord.x = 1. - (coord.x - 1.);
dir = -dir;
}
if (coord.y > 1.) {
coord.y = 1. - (coord.y - 1.);
dir = -dir;
}
#endif
float z = dir*sqrt(1. - coord.x*coord.x - coord.y*coord.y);
#ifdef CLAMP_PERSPECTIVE
// We can "define" the remaining undefined region of the screen
// by clamping it to the nearest unit circle. This is sometimes
// better than nothing, though it can also be a lot worse because
// we still have to actually *render* all of those pixels.
// TODO: Add an option to allow stretching into a square instead of clamping?
// I imagine things could get pretty badly warped, but maybe it could be useful?
if (isnan(z)) {
coord = normalize(coord);
z = 0.;
}
#endif
return normalize(vec3(coord, z));
}
ray camera(ray r) {
// camera position
vec3 pos = vec3(0.);
// camera direction (faces forward, not up)
vec3 d = vec3(0., 0., 1.);
// point projection relative to direction
// this really ought to be simplified,
// but I don't know the math to understand how to do it.
vec3 up = vec3(0., 1., 0.);
//vec3 x = cross(up, d);
vec3 x = vec3(d.z, 0., -d.x);
//vec3 y = cross(d, x);
vec3 y = vec3(-d.y*d.x, d.z*d.z+d.x*d.x, -d.y*d.z);
mat3 rot = mat3(
x.x, y.x, d.x,
x.y, y.y, d.y,
x.z, y.z, d.z
);
return ray(r.pos + pos, normalize(rot * r.dir));
}
//////// --------------------------------
//////// LIGHT: Light simulation (path marching)
//////// --------------------------------
vec3 light(ray o) {
// We handle this in two steps:
//
// 1. Trace the path a photon takes through the scene
// 2. Walk back through the path, computing what color the photon has
// by the time it reaches the camera.
//
// We do it this way because the color each material emits may be dependent
// on the specific incoming color (think of how a prism splits light),
// but because we trace each path from the camera instead of from the light source,
// we can't know what the incoming color *is* until we've *already* traced
// the entire path.
ray path[uint(PATH_SEGMENTS + 1)];
int seg = 0;
///
/// Step 1: Trace path
///
for (; seg < PATH_SEGMENTS + 1; seg++) {
path[seg] = o;
transmission tr = transmit(o);
o = tr.outgoing;
// This is an optimization. We can handle a material absorbing color in two ways:
//
// 1. multiply the incoming color by the apsorbtion
// 2. simply randomly extinguish rays based on the absorption
//
// On average, these have the same effect (of removing some % of light),
// but extinguishing rays is much more computationally efficient.
// It *does* however slightly decrease the quality of *individual* rays,
// if you're using a small number of samples per pixel.
if (tr.extinction > rand()) break;
}
///
/// DEBUG:
///
ray debug = path[int(mod(iTime, float(PATH_SEGMENTS)))];
// Show the distances to objects on the first bounce (resp. every bounce)
//return vec3(march(path[1]));
//return vec3(march(debug));
// Show the positions of the ray at every path segment
//return (debug.pos + 2.) / 4.;
// Show the directions of the ray at every path segment
//return abs(debug.dir);
// Show the mediums of every path segment
//return vec3(float(medium(debug.pos))/3.);
// Show the normals of every path segment
//return abs(normal(debug.pos, medium(debug.pos)));
// Show the total number of path segments a ray took
//return vec3(float(seg))/float((PATH_SEGMENTS + 1));
///
/// Step 2: Accumulate color
///
vec3 color = vec3(0.);
for (; seg >= 0; seg--) {
ray i = path[seg];
color = emit(i, o, color);
o = i;
}
return color;
}
//
// It is generally preferable to use a weighted distribution compared to uniform extinction
// so that the average ray returns more meaningful information. It is also preferable
// to use uniform extinction to color-biased extinction so that we can probabilistically
// choose to extinguish a ray early to save computation time.
//
// Note that this function, in conjunction with `emit`, is designed to work only in one
// direction (from the camera, to the light source). This is because some transformations
// are dependent on the color of the light involved (e.g. the BRDF), but we don't
// *know* the color of the light involved until the ray has reached a light source.
// Thus we first try to statistically choose a path based on the best weighted distribution
// we can come up *without* knowing the specific wavelength of light, and then
// retroactively fix it up and compute the *specific* color of light using the `emit` function.
// This split would still *work* travelling from light to camera, but would not be
// as effective as if they were simply combined in the first place.
//
// The combination of surface and volumetric lighting into `transmit` and the `transmit`/`emit`
// dichotomy is original to the best of my knowledge, but that's more likely
// because I'm new to the field than that no-one's come up with those things before.
//
// With regards to the rendering equation, this function should incorporate the following
// (and more, depending on the medium; this list is not exhaustive).
//
// If the medium is a surface:
// * the cosine distribution
// * the BSSDF, to the extent that it is independent of wavelength
//
// If the medium is not a surface (e.g. a gas):
// * the transmittance of the medium
// * the extinction coefficient, to the extent that it is independent of wavelength
// * the scattering coefficient
// * the phase function
//
transmission transmit(ray r) {
int m = medium(r.pos);
vec3 norm = normal(r.pos, m);
if (any(isnan(norm))) norm = vec3(0.);
// coordinate *outside* the medium
vec3 np = any(isnan(norm)) ? r.pos : nudge(r.pos, m);
switch (m) {
case 0: // air material
// pass through doing nothing
vec3 p = intersect(ray(r.pos, r.dir));
if (any(isinf(p))) {
// we're sailing through the void!
return transmission(ray(p, r.dir), INF);
}
return transmission(ray(p, r.dir), 0.);
case 1: // floor material
if (rand() < 0.25) {
vec3 refl = 2.*dot(-r.dir, norm)*norm + r.dir;
return transmission(ray(np, refl), 0.);
}
// fall-through
case 2: // sphere material
if (rand() < 0.2) {
vec3 refl = 2.*dot(-r.dir, norm)*norm + r.dir;
return transmission(ray(np, normalize(cosine_direction(refl) + norm)), 0.);
}
return transmission(ray(np, cosine_direction(norm)), 0.);
case 3: // light material
// don't bother reflecting off the lights.
return transmission(NULL_RAY, INF);
//return transmission(ray(np, cosine_direction(norm)), 0.);
}
return transmission(ray(vec3(0.), vec3(0.)), 1.);
}
// This is affected primarily by the emittance and color-biased extinction
// (e.g. the color of the object).
//
// With regards to the rendering equation, this function should incorporate the following:
// * the emittance of the surface/medium
// * the BSSDF to the extent that it is dependent on wavelength
// * the extinction coefficient to the extent that it is dependent on wavelength
//
// This function does not have to incorporate anything which is not color-related,
// and does not have to be probabilistic.
//
// Generally we want to move as much as possible from this function to `transmit`
// so that the information can be used to inform our path-making choices better.
vec3 emit(ray i, ray o, vec3 color) {
int m = medium(i.pos);
switch (m) {
case 0: // air material
if (any(isinf(o.pos))) return vec3(0.);
color += sqrt(distance(i.pos, o.pos)) * 0.0005;
return color;
case 1: // floor material
color.r *= 0.3;
color.g *= 0.2;
color.b *= 0.9;
color += vec3(0., 0., 0.01);
return color;
case 2: // sphere material
color.gb *= 0.3;
color += vec3(0.004, 0., 0.);
return color;
case 3: // light material
color += vec3(5.) * dot(-i.dir, normal(i.pos, m));
return color;
}
// unknown material
return vec3(1., 0., 1.);
}
//////// --------------------------------
//////// WORLD: World simulation (signed distance functions, or SDFs)
//////// --------------------------------
vec3 intersect(ray r) {
return r.pos + r.dir*march(r);
}
float march(ray r) {
int m = medium(r.pos);
float total_dist = 0.;
float delta = INF;
vec3 pos = r.pos;
int steps = 0;
for (; steps < MAX_STEPS && total_dist < MAX_DIST && delta > MIN_DIST; steps++) {
delta = dist(pos);
total_dist += delta * IMPRECISION_FACTOR;
pos = r.pos + r.dir * total_dist;
// If the distance is less than the minimum distance, fudge our position
// until we've actually *entered* the medium, or we pass by the surface
// without entering it. MIN_DIST could have a substantial impact on performance
// when travelling parallel to a surface (small MIN_DISTs would take
// an eternity to get by).
for (; delta <= MIN_DIST && total_dist < MAX_DIST && steps < MAX_STEPS && medium(pos) == m; steps++) {
total_dist += MIN_DIST;
pos = r.pos + r.dir * total_dist;
delta = dist(pos);
// NOTE: If you're parallel to a surface, you could waste a *lot* of time
// making tiny steps without ever making contact or passing by. It might
// be worth adding a FUDGE_DIST to `intersect` so that if you're less
// than that distance away, it simply computes the normal of the surface
// and fudges the position *into* the surface. That's actually how MIN_DIST
// works for normal ray tracers and path tracers, but the fact that we
// support transparent media means that we need to be *inside*
// the new medium at medium/medium boundary so we can check what medium
// we're actually *in*. We could work around this by tracking what
// medium we're in and then writing functions around "we're in the nearest medium
// *except* the last medium we were in" and "march until we exit the
// medium we were last in, then continue through the medium we're actually in
// until we encounter a different medium or re-encounter the same medium
// or travel a certain distance (to prevent bugs due to parallel surfaces, again,
// if you never actually *enter* the medium you thought you were in and continue
// travelling a long distance through the old medium). Hopefully, we won't
// need any of these workarounds, but I'm making note of it for later.
}
}
return steps < MAX_STEPS && total_dist < MAX_DIST ? total_dist : INF;
}
vec3 normal(vec3 pos, int medium) {
// Although it is often possible to compute a normal analytically
// (and often more precise and more efficient),
// we just compute an estimate because it's good enough and it saves us a lot of math.
vec2 delta = vec2(-NORMAL_DELTA, 0.);
vec3 dq = (mdist(pos, medium) - vec3(
mdist(pos - delta.xyy, medium),
mdist(pos - delta.yxy, medium),
mdist(pos - delta.yyx, medium)
))/delta.x;
if (any(isnan(dq))) return vec3(0.);
// NOTE: You can generally get away with removing either this division or `normalize`
// (but not both) without significantly impacting accuracy.
return normalize(dq);
}
vec3 nudge(vec3 pos, int m) {
vec3 norm = normal(pos, m);
for (int steps = 0; steps < MAX_STEPS && medium(pos) == m; steps++) {
pos += MIN_DIST*norm;
}
return pos;
}
////
//// Combinators for creating signed distance functions and modelling objects.
////
float dist_plane(vec3 p, float y) {
return distance(p.y, y);
}
// a plane with infinite depth
float dist_floor(vec3 p, float y) {
return p.y - y;
}
float dist_sphere(vec3 p, vec3 pos, float r) {
return distance(p, pos) - r;
}
/// Model difference/subtraction (objects in `a` but not in `b`).
float sub(float a, float b) {
return max(a, -b);
}
////
//// Cheat and calculate distance-related stuff with globals
//// to reduce the amount of argument-passing we have to do.
////
/// For computing `dist`
float _dist; // The unsigned distance to the nearest surface.
/// For computing `mdist`
int _focus; // The medium we're focusing on for mdist.
float _mdist; // The nearest signed distance to the medium's surface.
/// For computing `medium`
int _medium; // The innermost medium encountered so far.
float _sdist; // The signed distance to the nearest surface of an object we are inside.
/// Cache the position to avoid re-computing distances between calls
/// (e.g. if you call `sdist` and then `medium`, or something.)
float _time;
vec3 _pos;
/// A helper function to update the globals.
///
/// mnemomic: "Update Distances"
void ud(int m, float dist) {
float adist = abs(dist);
// `dist`
if (adist < _dist) _dist = adist;
// `mdist`
if (m == _focus && adist < abs(_mdist)) _mdist = dist;
// `medium`
if (dist <= 0. && dist > _sdist) {
_sdist = dist;
_medium = m;
}
}
////
//// Compute distances
////
void update_scene(vec3 pos, int medium);
void scene(vec3 pos);
float dist(vec3 pos) {
update_scene(pos, -1);
return _dist;
}
float mdist(vec3 pos, int focus) {
update_scene(pos, focus);
return _mdist;
}
int medium(vec3 pos) {
update_scene(pos, -1);
return _medium;
}
/// Initialize global variables and then compute distances.
void update_scene(vec3 pos, int focus) {
bool peq = pos.x == _pos.x && pos.y == _pos.y && pos.z == _pos.z;
if (peq && time == _time && (focus == -1 || focus == _focus)) return;
_pos = pos;
_time = time;
// We are infinitely distant to nothing.
_dist = INF;
// We are infinitely distant to the nearest nothing.
_focus = focus;
_mdist = INF;
// We are infinitely interior to the surface of the vaccuum/air.
_medium = 0;
_sdist = -INF;
scene(pos);
// If we didn't focus on any particular object for `mdist`,
// then just pretend we were focusing on whatever we happen to be inside
// so we can cache it for later.
if (focus == -1) {
_focus = _medium;
_mdist = _sdist;
}
}
////
//// SCENE: Objects in the scene
////
void scene(vec3 p) {
// the floor
ud(1, dist_floor(p, -1.));
// the sphere
vec3 sphere_pos = vec3(0., -0.15, 8.);
vec3 sphere_sub_pos = sphere_pos + .7*vec3(sin(time), 0., cos(time));
ud(2, sub(dist_sphere(p, sphere_pos, 1.),
dist_sphere(p, sphere_sub_pos, 0.5)));
// the light sources
ud(3, dist_sphere(p, vec3(1.5, 1.2, 7.), 0.7));
ud(3, dist_sphere(p, vec3(-1.2, 0.5, 5.0), 0.5));
}
//////// ================================
//////// VENDOR: Vendored code
//////// ================================
////
//// AUTHOR: unknown
////
vec3 linear2srgb(vec3 linear_rgb) {
// I believe the first version is technically more accurate,
// but the difference is usually negligable in practice.
#if 1
// copied from somewhere on the internet
bvec3 cutoff = lessThan(linear_rgb, vec3(0.0031308));
vec3 higher = vec3(1.055)*pow(linear_rgb, vec3(1.0/2.4)) - vec3(0.055);
vec3 lower = linear_rgb * vec3(12.92);
return mix(higher, lower, cutoff);
// end copied from somewhere on the internet
#else
return pow(linear_rgb, vec3(1./2.2));
#endif
}
////
//// AUTHOR: iq (https://www.shadertoy.com/view/4sfGzS)
////
// oldschool rand() from Visual Studio
int seed = 1;
int irand(void) { seed = seed*0x343fd+0x269ec3; return (seed>>16)&32767; }
float rand(void) { return float(irand())/32767.0; }
// hash to initialize the random sequence (copied from Hugo Elias)
int hash( int n )
{
n = (n << 13) ^ n;
return n * (n * n * 15731 + 789221) + 1376312589;
}
void seed_randoms(vec2 fragCoord) {
ivec2 q = ivec2(fragCoord);
seed = hash(q.x+hash(q.y+hash(iFrame)));
}
////
//// AUTHOR: fizzer, via iq: http://www.amietia.com/lambertnotangent.html
////
vec3 cosine_direction(vec3 norm) {
float u = rand();
float v = rand();
float a = 6.2831853 * v;
u = 2.0*u - 1.0;
return normalize(norm + vec3(sqrt(1.0 - u*u)*vec2(cos(a), sin(a)), u));
}