//////// ================================ //////// SETTINGS: Settings //////// ================================ // // If you don't feel like tweaking settings, try one of these presets: // // 0: Maximize FPS at the cost of image quality. (DEFAULT) // 1: A balance between image quality and FPS. // 2: Just render a pretty static image. // 3: Demonstrate off the weird tiling render feature. // // The user settings below will override whatever preset you use. // #define PRESET 0 //////// -------------------------------- //////// User settings //////// -------------------------------- //////// Tweak these according to your preferences and the power of your graphics card. //////// Comment out a setting to restore it to its default value. //// //// Sample settings //// // The number of color samples taken per pixel. Increasing this has a dramatic effect on // image quality, reducing graininess and preventing overly-bright pixels ("fireflies"). // However, how much GPU power you need to render a frame scales linearly with // the number of samples. //#define SAMPLES 1 // The maximum number of times light can reflect or scatter before it is extinguished. //#define PATH_SEGMENTS 14 // If a pixel color is too bright for fit in sRGB, there are two ways to handle it: // // 1. Clamp the pixel within the limits of sRGB, resulting in (near-)maximum // brightness at the cost of the color's saturation. (If it's too bright, it'll // become entirely white.) // 2. Reduce the brightness of the color until it fits within sRGB, preserving // the color's saturation, but losing even *more* brightness. // // Correction for saturation generally looks better, but isn't usually necessary // for more than five or so samples (because the bright pixels will average out // with the dark pixels and fall back within sRGB), so this is *on* by default // with 1-2 samples and *off* by default with 5+ samples. //#define SATURATION_CORRECTION 1 //// //// Perspective settings //// // This shader natively uses a square (circular?) aspect ratio. With ASPECT_RATIO_CROP // enabled, if you use a wide aspect ratio, the frame will have its height // cropped so that the image can take up the full width of the screen. //#define ASPECT_RATIO_CROP 1 // This setting affects how far you zoom in on the scene. // Greater values = more zoom. Fractional values zoom out. Negative values mirror the scene. //#define FOV 1.5 // Camera position and angle. (Feel free to reference `time` here.) //#define CAMERA_POS vec3(0.) // (Don't worry, we call `normalize` for you. //#define CAMERA_DIR vec3(0., 0., 1.) /// /// TILE_PERSPECTIVE and CLAMP_PERSPECTIVE are only relevant if you zoom out /// (e.g. an FOV < ~1.15). For more information on how and why these settings /// behave the way they do, see their extended descriptions in the `project` function. /// // Points on the screen >1 or <-1 show the portion of the scene *behind* you, // mirrored so that the edges of each adjacent tile lines up (e.g. tiles above // and below are mirrored vertically, to the left and right horizontally). // This tiling is infinite. You might want to combine this with an IMAGE_OFFSET of // (-1, 0) so that you can see two whole hemispheres instead of one whole hemisphere // and two halves on opposite sides. //#define TILE_PERSPECTIVE 0 // Points on the screen outside of the unit circle (within a tile) are clamped // to the nearest point on the unit circle. This doesn't look very good, but // might be preferable to just rendering black? //#define CLAMP_PERSPECTIVE 0 // Slide the image around on the screen. Each time is `2x2` centered on the // origin, so an offset of e.g. (2,0) with TILE_PERSPECTIVE enabled // will show you the portion of the scene *behind* you. //#define IMAGE_OFFSET vec2(0., 0.) //// //// Simulation settings //// // The maximum number of steps a ray can take during marching before giving up // and colliding with nothing. This prevents scenes from taking infinite time to render. //#define MAX_STEPS 200 // The maximum distance a ray can travel before we give up and just say it collides // with nothing. This helps prevent the background from appearing warped by the foreground // due to rays which march close to a foreground object run out of steps before // reaching their destination when slightly farther rays do reach their target. //#define MAX_DIST 20. // Average the color across frames by storing them in the buffer. // This is like supersampling, but across frames instead of within a pixel, // which lets you render with thousands of samples without crashing. // It's strongly advised that you enable FREEZE_TIME when this is enabled! // This uses iFrame, so if you want to enable this, make sure you hit the // "reset time" function or things will get screwed up. //#define AVERAGE_FRAMES 1 // Set a time in seconds. The simulation will be frozen at this point in time every frame. // Comment this out to allow time to pass normally. //#define FREEZE_TIME 2.75 // Loop time over an interval of this duration, beginning at FREEZE_TIME, // or 0, if FREEZE_TIME is not set. //#define LOOP_TIME 0. // Set the maximum duration of temporal antialiasing (i.e. how much time // motion blur smears across). Note that this is a *maximum* time, and motion // blur will never be greater than the duration of a frame. That said, when rendering // a still image with FREEZE_TIME you probably want this set to 0., and if you're // stuttering a lot, the large variance in frame times can make objects in the image // appear to jerk back and forth, so this probably shouldn't be any higher // than (the reciprocal of) your average framerate. Comment this out to // remove any cap on the amount of motion blur. //#define MAX_TAA_DIFF (1./30.) //////// -------------------------------- //////// Internal settings //////// -------------------------------- //////// If you're just viewing the shader, you shouldn't usually need to tweak these. // The minimum distance between two points before they are considered the same point. // Setting lower values increases the sharpness of the image at the cost of performance // and rounding errors at objects very far from 0. // // Ray marching halves the distance to the surface of an object each iteration, but the // end goal of ray marching is to pass slightly *inside* the object. Setting a minimum // distance prevents zeno's paradox. This also serves as a optimization // because the number of steps increases logarithmically as you decrease the minimum distance. // // Chosen to be 2^(-9), or about ~2mm, because that's the largest you can set it before // the quality of the image is significantly effected. You can set it as low as about // 2^(-19) before things begin to break. It's good to experiment with both high and low // values to help find bugs in the numerical precision of the light simulation. // If you have precision bugs, the simulation ends up getting affected pretty dramatically // by changes to MIN_DIST, whereas a numerically stable simulation is not affected much at all. // // I expect that a minimum distance of 2^(-9) would work until about 10km from the origin // with 32-bit floating point before starting to break down, but I have not tested it. //#define MIN_DIST (0.001953125) // The distance between samples when estimating a surface's normal. Smaller values result // in more precise calculations, but are more sensitive to numerical imprecision. // This should probably be less than MIN_DIST. //#define NORMAL_DELTA (MIN_DIST/4.) // Only march this much of MIN_DIST at a time to account for imprecision in the distance // calculations. Chosen by experimentation. If you have to set this low, that often means // that there's a bug somewhere (e.g. you forgot to call `normalize`). // // Right now, the simulation is numerically stable and I don't have to use it at all! // But I often find that it's necessary to set this to around ~0.92 when debugging // numerical issues. //#define IMPRECISION_FACTOR 1. //////// -------------------------------- //////// Default settings & presets //////// -------------------------------- //////// So you can restore a setting to its default value by commenting it out. #ifndef PRESET #define PRESET 0 #endif //// PRESET 1 #if PRESET == 1 #ifndef SAMPLES #define SAMPLES 6 #endif #ifndef PATH_SEGMENTS #define PATH_SEGMENTS 16 #endif #ifndef MAX_TAA_DIFF #define MAX_TAA_DIFF (1./30.) #endif //// PRESET 2 #elif PRESET == 2 #ifndef PATH_SEGMENTS #define PATH_SEGMENTS 16 #endif #ifndef MAX_STEPS #define MAX_STEPS 300 #endif #ifndef MAX_DIST #define MAX_DIST 100. #endif #ifndef MIN_DIST #define MIN_DIST (0.001953125/256.) #endif #ifndef MAX_TAA_DIFF #define MAX_TAA_DIFF 0. #endif #ifndef AVERAGE_FRAMES #define AVERAGE_FRAMES 1 #endif #ifndef FREEZE_TIME #define FREEZE_TIME 2.75 #endif #ifndef SATURATION_CORRECTION #define SATURATION_CORRECTION 0 #endif //// PRESET 3 #elif PRESET == 3 #ifndef FOV #define FOV 0.5 #endif #ifndef TILE_PERSPECTIVE #define TILE_PERSPECTIVE 1 #endif #ifndef CLAMP_PERSPECTIVE #define CLAMP_PERSPECTIVE 1 #endif #ifndef IMAGE_OFFSET #define IMAGE_OFFSET vec2(0., 0.) #endif #endif //// PRESET 0 (default values) #ifndef SAMPLES #define SAMPLES 1 #endif #ifndef PATH_SEGMENTS #define PATH_SEGMENTS 10 #endif #ifndef SATURATION_CORRECTION #if SAMPLES > 5 #define SATURATION_CORRECTION 0 #else #define SATURATION_CORRECTION 1 #endif #endif #ifndef FOV #define FOV 1.5 #endif #ifndef CAMERA_POS #define CAMERA_POS vec3(0.) #endif #ifndef CAMERA_DIR #define CAMERA_DIR vec3(0., 0., 1.) #endif #ifndef ASPECT_RATIO_CROP #define ASPECT_RATIO_CROP 1 #endif #ifndef TILE_PERSPECTIVE #define TILE_PERSPECTIVE 0 #endif #ifndef CLAMP_PERSPECTIVE #define CLAMP_PERSPECTIVE 0 #endif #ifndef IMAGE_OFFSET #define IMAGE_OFFSET vec2(0., 0.) #endif #ifndef MAX_STEPS #define MAX_STEPS 200 #endif #ifndef MAX_DIST #define MAX_DIST 20. #endif #ifndef AVERAGE_FRAMES #define AVERAGE_FRAMES 0 #endif // FREEZE_TIME, LOOP_TIME, and MAX_TAA_DIFF are *undefined* by default. #ifndef MIN_DIST #define MIN_DIST (0.001953125/8.) #endif #ifndef NORMAL_DELTA #define NORMAL_DELTA (MIN_DIST/4.) #endif #ifndef IMPRECISION_FACTOR #define IMPRECISION_FACTOR 1. #endif //////// ================================ //////// DOCS: Declarations & documentation //////// ================================ /* Helpful resources: * Inigo Quilez (website, youtube, shadertoy) * Importance Sampling Techniques for Path Tracing in Participating Media (Christopher Kulla and Marcos Fajardo) * Wikipedia articles: * Rendering equation * Bidirectional reflectance distribution function * Bidirectional scattering distribution function */ //////// -------------------------------- //////// PIXEL: Pixels, color, and projections //////// -------------------------------- /// Assign each pixel a color in the sRGB color space. void mainImage(out vec4 color, vec2 pixel); /// Take the coordinate of a pixel on the screen and return a color /// in the linear RGBA color space. vec4 color_pixel(vec2 pixel); /// Convert a pixel coordinate, which is dependent on resolution and aspect ratio, /// to our internal coordinate system, which is not. /// /// (Pixel coordinates are from 0,0 to the window resolution, iResolution; /// the internal coordinate system goes from (-1, -1) to (1, 1) with a square aspect ratio, vec2 pixel2square(vec2 pixel); struct ray { vec3 pos; // POSition (aka the origin) vec3 dir; // DIRection }; /// After converting pixel coordinates to screen coordinates, we still have a problem: /// screen coordinates are 2d, but our world is 3d! The camera assigns each screen /// coordinate to a ray in 3d space, indicating the position and angle which /// we will be receiving light from. ray camera_project(vec2 coord); /// Project a coordinate on the unit circle onto the unit hemisphere. /// This is used for curvilinear perspective. vec3 project(vec2 coord); /// The virtual camera has a position and orientation in 3d space. /// This takes a ray positioned relative to `(0, 0, 0)` and facing `(0, 0, 1)` /// and moves and orients it relative to the camera. ray camera(ray r); //////// -------------------------------- //////// LIGHT: Light simulation (path marching) //////// -------------------------------- /// Perform (backwards) path marching. Simulate a single photon as it travels /// through the world, bouncing off surfaces and scattering through media, /// and compute its end color when it reaches the camera. /// /// Note that this is *backwards* path march, so it works the *opposite* /// of real life. Our "photon" is actually travelling from the camera to the /// light source (the ray being the ray you get from the `cameraProject` function); /// however, visually, this gets similar results to doing it forward, only /// you waste less time computing photons which never reach the camera. vec3 light(ray r); /// Determine how a ray interacts with and/or is transmitted through a medium /// until it enters a new medium, reflects off a surface, or scatters. /// This function returns the outgoing ray in the new medium and the proportion /// of light absorbed along the way. struct transmission { /// The new ray position and direction after colliding with a surface or scattering. ray outgoing; /// The reciprocal proportion of all light extinguished along the path float extinction; }; transmission transmit(ray r); /// Determine how much light to emit on the outgoing ray based on the incoming light. vec3 emit(ray incoming, ray outgoing, vec3 color); //////// -------------------------------- //////// WORLD: World simulation (signed distance functions, or SDFs) //////// -------------------------------- /// The current time. We use a global instead of a parameter for convenience. /// We use this instead of iTime so that we can apply temporal anti-aliasing /// (see the explanation in `mainImage`). float time; /// Compute the location at which this ray will next encounter a change of medium /// (e.g. if it's in the air when it will encounter a surface, if it's in a cloud /// when it'll escape that cloud, etc). /// This will always return a position along the ray. vec3 intersect(ray r); /// Compute the distance along the ray at which it will first encounter a change of medium. /// This is equivalent to `distance(intersect(r), r.pos)`. float march(ray r); /// Given a point on or near the surface of an object or medium, compute (or estimate) /// the normal (i.e. the vector perpendicular to the surface). vec3 normal(vec3 pos, int medium); /// Take a point near the edge of a medium and nudge towards the surface /// until it is outside of the medium. vec3 nudge(vec3 pos, int medium); /// The distance to the nearest change of medium (e.g. the surface of an object). /// (This is used primarily by ray marching in the `march` function.) float dist(vec3 pos); /// The signed distance to the nearest surface of a given medium. /// This is positive if you are outside the surface, or negative if you are inside it. /// (This is used primarily when computing normals in the `normal` function.) float mdist(vec3 pos, int medium); /// The medium which encompasses a point. /// In the case of multiple overlapping media (e.g. one object inside another object), /// the innermost medium will be returned (i.e. the one it is *least* far from the surface of). int medium(vec3 pos); //////// -------------------------------- //////// UTIL: Utility functions (e.g. random number generation) //////// -------------------------------- // Convenience definitions #define INF (1./0.) // NOTE: I used to use `sqrt(-1)`, but apparently that doesn't evaluate to NaN???? // This makes me wonder if NaN isn't portable due to constant folding or something. #define NAN (0./0.) #define NULL_RAY ray(vec3(0.), vec3(0.)) /// Used to forcibly set the pixel color from functions. Used for debugging. vec3 _bug = vec3(NAN); /// Return a random number between 0 and 1 (with uniform distribution); float rand(); /// Use the fragment coordinate and current frame to seed the random number generator. void seed_randoms(vec2 fragCoord); /// Randomly select a direction on a hemisphere facing the direction of the normal, /// with a bias toward directions close to the normal. /// Specifically, the bias is by `dot(norm, dir)`, making this a cosine-weighted /// distribution, which is useful because `dot(norm, dir)` is a term in the /// rendering equation. vec3 cosine_direction(vec3 norm); // Convert between RGB and HSV. Used for SATURATION_CORRECTION. vec3 rgb2hsv(vec3 c); vec3 hsv2rgb(vec3 c); //////// ================================ //////// IMPL: Implementation //////// ================================ //////// -------------------------------- //////// PIXEL: Pixels, color, and projections //////// -------------------------------- void mainImage(out vec4 fragColor, vec2 fragCoord) { seed_randoms(fragCoord); // Supersampling. Sample the color within each pixel multiple times and take the average. // In raster-based rendering, this is used mostly to prevent jagged edges; // with pathtracing, antialiasing is even *more* important to prevent graininess. vec4 color = vec4(0.); int successful_samples = 0; for (int i = 0; i < SAMPLES; i++) { // There are two ways to choose where to sample the color during multisampling: // either in a fixed pattern of offsets, or by applying a *random* offset. // Random offsets are more common in path tracing. vec2 pixel = fragCoord + vec2(rand(), rand()) - 0.5; #ifdef FREEZE_TIME time = FREEZE_TIME; #else time = 0.; #endif #ifdef LOOP_TIME time += mod(iTime, LOOP_TIME); #else #ifndef FREEZE_TIME time = iTime; #endif #endif // Apply temporal antialiasing (motion blur) by slightly randomizing the time. // We distribute our samples across the time we estimate the frame will take // to render, which in this case, is simply the time the *last* frame took // to render. This causes jerkiness if the time it takes to render a frame // changes suddenly (stutters). // // TODO: a more sophisticated frame time estimate float max_taa_diff = INF; #ifdef MAX_TAA_DIFF max_taa_diff = MAX_TAA_DIFF; #endif time += rand() * min(iTimeDelta, max_taa_diff); vec4 samp = color_pixel(pixel); // We failed to render a sample at this location for some reason, // e.g. the projection for this pixel is undefined. NaN and infinity // are infectious, and we must block them or our entire pixel will be corrupted. if (any(isnan(samp)) || any(isinf(samp))) continue; successful_samples++; // Only count successful samples towards the average. // NOTE: It's technically possible for the sample to be *negative*. // If the sample is negative, it's probably a bug, though hypothetically // maybe you could have negative light as some sort of special effect or something // (like a material that sucks the light out of the room. that could be cool. // or creepy.) color += samp; } color /= max(float(successful_samples), 1.); if (!any(isnan(_bug))) { color = vec4(_bug, 1.); } // NOTE: it is possible for this renderer to emit colors brighter than 1.0, // for example if you use very bright or many light sources. These colors will be // displayed incorrectly, appearing desaturated and having their brightness // clamped to whatever color output is supported. // // This is common in particular if you have very bright lights in a scene, // which is sometimes necessary for objects to be clearly visible. The result // will be you seeing flashes of over-bright white pixels where you should // see color. One way to mitigate this is by increasing the number of samples per // pixel; the average brightness per pixel is generally less than 1.0 when averaged // out with the (more common) black pixels when no light source is encountered. // // Another mitigation approach is to do color correction, where instead of // trying to preserve the brightness by clamping the RGB values and losing saturation, // you try to preserve the saturation by scaling down the brightness until the // full saturation of the colors is visible (or at least part of it). #if SATURATION_CORRECTION // TODO: I'm sure there's a way to do this directly without having to // convert between color spaces twice. This was just more convenient in the moment. color.xyz = rgb2hsv(color.rgb); color.z = min(color.z, 1.); // clamp value to 1 color.rgb = hsv2rgb(color.xyz); #else //color = clamp(vec4(0.), color, vec4(1.)); #endif #if AVERAGE_FRAMES vec2 uv = fragCoord/iResolution.xy; vec4 rest = texture(iChannel0, uv).rgba; color = (rest*float(iFrame) + color) / (float(iFrame + 1)); // Don't output NaN or inf or it'll corrupt the buffer and leave you with // black pixels that never go away because they break the average! if (any(isnan(color)) || any(isinf(color))) { fragColor = rest; return; } #endif // This shader operates in the linear RGB color space, // but fragColor is expected to be in sRGB, so we convert. // NOTE: We do this in the main image now. (It's basically the *only* thing we do // in the main image.) fragColor = color; } // NOTE: linear2srgb is in the vendored code section at the bottom of the file vec4 color_pixel(vec2 pixel) { vec2 coord = pixel2square(pixel) * 1.; // Apply zoom. // // TODO: Add an option for dynamic zoom based on aspect ratio, // so that the portion of the screen shown always exactly matches // the maximum area defined under curvilinear projection. coord /= FOV; #ifdef IMAGE_OFFSET coord += IMAGE_OFFSET; #endif ray r = camera_project(coord); if (any(isnan(r.dir))) { // The projection is undefined at this pixel coordinate (see `project`); // don't bother rendering it, and return 100% transparent black to indicate that // we didn't render it. return vec4(NAN); } // We could hypothetically do something with alpha later, // but for now, always return 100% opacity. return vec4(light(r), 1.); } vec2 pixel2square(vec2 screen) { // Map rectangular screen into square coordinate space. vec2 square = ((screen / iResolution.xy) - 0.5) * 2.; // Adjust for aspect ratio to get square coordinates. #if ASPECT_RATIO_CROP if (iResolution.x > iResolution.y) { #else if (iResolution.x < iResolution.y) { #endif return vec2(square.x, square.y * iResolution.y / iResolution.x); } else { return vec2(square.x * iResolution.x / iResolution.y, square.y); } } ray camera_project(vec2 coord) { return camera(ray(vec3(0.), project(coord))); } // TODO: add support for the usual, non-curvilinear perspective projection // (and possibly other projections, just for fun?) vec3 project(vec2 coord) { // The sign of the direction we're facing. 1 is forward, -1 is backward. float dir = 1.; #if TILE_PERSPECTIVE // This projection only supports coordinates within the unit circle // and only projects into the unit hemisphere. Ideally we'd want // some sort of extension which takes points outside the unit circle // and projects them somewhere behind you (with the point at infinity // being directly behind you), but I haven't come up with any reasonable // extension of this perspective system which behaves in that manner. // // What we can do instead is *tile* the projection so that adjacent projections // are a mirrored projection of the unit hemisphere *behind* you. // This is a logical extension because the projection becomes continuous // along the x and y axis (you're just looking around in perfect circles), // and it allows you to view the entire space. The main problem to this approach // is that all of the space between the tiled circles is still undefined, // but this is still the best solution which I'm aware of. coord -= 1.; coord = mod(coord + 2., 4.) - 1.; if (coord.x > 1.) { coord.x = 1. - (coord.x - 1.); dir = -dir; } if (coord.y > 1.) { coord.y = 1. - (coord.y - 1.); dir = -dir; } #endif float z = dir*sqrt(1. - coord.x*coord.x - coord.y*coord.y); #if CLAMP_PERSPECTIVE // We can "define" the remaining undefined region of the screen // by clamping it to the nearest unit circle. This is sometimes // better than nothing, though it can also be a lot worse because // we still have to actually *render* all of those pixels. // TODO: Add an option to allow stretching into a square instead of clamping? // I imagine things could get pretty badly warped, but maybe it could be useful? if (isnan(z)) { coord = normalize(coord); z = 0.; } #endif return normalize(vec3(coord, z)); } ray camera(ray r) { // camera position vec3 pos = CAMERA_POS; // camera direction (faces forward, not up) vec3 d = normalize(CAMERA_DIR); // point projection relative to direction // TODO: this really ought to be simplified, // but I don't know the math to understand how to do it. vec3 up = vec3(0., 1., 0.); //vec3 x = cross(up, d); vec3 x = vec3(d.z, 0., -d.x); //vec3 y = cross(d, x); vec3 y = vec3(-d.y*d.x, d.z*d.z+d.x*d.x, -d.y*d.z); mat3 rot = mat3( x.x, y.x, d.x, x.y, y.y, d.y, x.z, y.z, d.z ); return ray(r.pos + pos, normalize(rot * r.dir)); } //////// -------------------------------- //////// LIGHT: Light simulation (path marching) //////// -------------------------------- vec3 light(ray o) { // We handle this in two steps: // // 1. Trace the path a photon takes through the scene // 2. Walk back through the path, computing what color the photon has // by the time it reaches the camera. // // We do it this way because the color each material emits may be dependent // on the specific incoming color (think of how a prism splits light), // but because we trace each path from the camera instead of from the light source, // we can't know what the incoming color *is* until we've *already* traced // the entire path. ray path[uint(PATH_SEGMENTS + 1)]; int seg = 0; /// /// Step 1: Trace path /// for (; seg < PATH_SEGMENTS + 1; seg++) { path[seg] = o; transmission tr = transmit(o); o = tr.outgoing; // This is an optimization. We can handle a material absorbing color in two ways: // // 1. multiply the incoming color by the apsorbtion // 2. simply randomly extinguish rays based on the absorption // // On average, these have the same effect (of removing some % of light), // but extinguishing rays is much more computationally efficient. // It *does* however slightly decrease the quality of *individual* rays, // if you're using a small number of samples per pixel. if (tr.extinction > rand()) break; } /// /// DEBUG: /// ray debug = path[int(mod(iTime, float(PATH_SEGMENTS)))]; // Show the distances to objects on the first bounce (resp. every bounce) //return vec3(march(path[1])); //return vec3(march(debug)); // Show the positions of the ray at every path segment //return (debug.pos + 2.) / 4.; // Show the directions of the ray at every path segment //return abs(debug.dir); // Show the mediums of every path segment //return vec3(float(medium(debug.pos))/3.); // Show the normals of every path segment //return abs(normal(debug.pos, medium(debug.pos))); // Show the total number of path segments a ray took //return vec3(float(seg))/float((PATH_SEGMENTS + 1)); /// /// Step 2: Accumulate color /// vec3 color = vec3(0.); for (; seg >= 0; seg--) { ray i = path[seg]; color = emit(i, o, color); o = i; } return color; } // // It is generally preferable to use a weighted distribution compared to uniform extinction // so that the average ray returns more meaningful information. It is also preferable // to use uniform extinction to color-biased extinction so that we can probabilistically // choose to extinguish a ray early to save computation time. // // Note that this function, in conjunction with `emit`, is designed to work only in one // direction (from the camera, to the light source). This is because some transformations // are dependent on the color of the light involved (e.g. the BRDF), but we don't // *know* the color of the light involved until the ray has reached a light source. // Thus we first try to statistically choose a path based on the best weighted distribution // we can come up *without* knowing the specific wavelength of light, and then // retroactively fix it up and compute the *specific* color of light using the `emit` function. // This split would still *work* travelling from light to camera, but would not be // as effective as if they were simply combined in the first place. // // The combination of surface and volumetric lighting into `transmit` and the `transmit`/`emit` // dichotomy is original to the best of my knowledge, but that's more likely // because I'm new to the field than that no-one's come up with those things before. // // With regards to the rendering equation, this function should incorporate the following // (and more, depending on the medium; this list is not exhaustive). // // If the medium is a surface: // * the cosine distribution // * the BSSDF, to the extent that it is independent of wavelength // // If the medium is not a surface (e.g. a gas): // * the transmittance of the medium // * the extinction coefficient, to the extent that it is independent of wavelength // * the scattering coefficient // * the phase function // transmission transmit(ray r) { int m = medium(r.pos); vec3 norm = normal(r.pos, m); if (any(isnan(norm))) norm = vec3(0.); // coordinate *outside* the medium vec3 np = any(isnan(norm)) ? r.pos : nudge(r.pos, m); switch (m) { case 0: // air material // pass through doing nothing vec3 p = intersect(ray(r.pos, r.dir)); if (any(isinf(p))) { // we're sailing through the void! return transmission(ray(p, r.dir), INF); } return transmission(ray(p, r.dir), 0.); case 1: // floor material if (rand() < 0.25) { vec3 refl = 2.*dot(-r.dir, norm)*norm + r.dir; return transmission(ray(np, refl), 0.); } // fall-through case 2: // sphere material if (rand() < 0.25) { vec3 refl = 2.*dot(-r.dir, norm)*norm + r.dir; return transmission(ray(np, normalize(cosine_direction(refl) + norm)), 0.); } return transmission(ray(np, cosine_direction(norm)), 0.); case 3: // light material // don't bother reflecting off the lights. return transmission(NULL_RAY, INF); //return transmission(ray(np, cosine_direction(norm)), 0.); } return transmission(ray(vec3(0.), vec3(0.)), 1.); } // This is affected primarily by the emittance and color-biased extinction // (e.g. the color of the object). // // With regards to the rendering equation, this function should incorporate the following: // * the emittance of the surface/medium // * the BSSDF to the extent that it is dependent on wavelength // * the extinction coefficient to the extent that it is dependent on wavelength // // This function does not have to incorporate anything which is not color-related, // and does not have to be probabilistic. // // Generally we want to move as much as possible from this function to `transmit` // so that the information can be used to inform our path-making choices better. vec3 emit(ray i, ray o, vec3 color) { int m = medium(i.pos); switch (m) { case 0: // air material if (any(isinf(o.pos))) return vec3(0.); color += sqrt(distance(i.pos, o.pos)) * 0.0005; return color; case 1: // floor material color.r *= 0.3; color.g *= 0.2; color.b *= 0.9; //color += vec3(0., 0., 0.01); return color; case 2: // sphere material color.gb *= 0.3; //color += vec3(0.004, 0., 0.); return color; case 3: // light material color += vec3(5.) * dot(-i.dir, normal(i.pos, m)); return color; } // unknown material return vec3(1., 0., 1.); } //////// -------------------------------- //////// WORLD: World simulation (signed distance functions, or SDFs) //////// -------------------------------- vec3 intersect(ray r) { return r.pos + r.dir*march(r); } float march(ray r) { int m = medium(r.pos); float total_dist = 0.; float delta = INF; vec3 pos = r.pos; int steps = 0; for (; steps < MAX_STEPS && total_dist < MAX_DIST && delta > MIN_DIST; steps++) { delta = dist(pos); total_dist += delta * IMPRECISION_FACTOR; pos = r.pos + r.dir * total_dist; // If the distance is less than the minimum distance, fudge our position // until we've actually *entered* the medium, or we pass by the surface // without entering it. MIN_DIST could have a substantial impact on performance // when travelling parallel to a surface (small MIN_DISTs would take // an eternity to get by). for (; delta <= MIN_DIST && total_dist < MAX_DIST && steps < MAX_STEPS && medium(pos) == m; steps++) { total_dist += MIN_DIST; pos = r.pos + r.dir * total_dist; delta = dist(pos); // NOTE: If you're parallel to a surface, you could waste a *lot* of time // making tiny steps without ever making contact or passing by. It might // be worth adding a FUDGE_DIST to `intersect` so that if you're less // than that distance away, it simply computes the normal of the surface // and fudges the position *into* the surface. That's actually how MIN_DIST // works for normal ray tracers and path tracers, but the fact that we // support transparent media means that we need to be *inside* // the new medium at medium/medium boundary so we can check what medium // we're actually *in*. We could work around this by tracking what // medium we're in and then writing functions around "we're in the nearest medium // *except* the last medium we were in" and "march until we exit the // medium we were last in, then continue through the medium we're actually in // until we encounter a different medium or re-encounter the same medium // or travel a certain distance (to prevent bugs due to parallel surfaces, again, // if you never actually *enter* the medium you thought you were in and continue // travelling a long distance through the old medium). Hopefully, we won't // need any of these workarounds, but I'm making note of it for later. } } return steps < MAX_STEPS && total_dist < MAX_DIST ? total_dist : INF; } vec3 normal(vec3 pos, int medium) { // Although it is often possible to compute a normal analytically // (and often more precise and more efficient), // we just compute an estimate because it's good enough and it saves us a lot of math. vec2 delta = vec2(-NORMAL_DELTA, 0.); vec3 dq = (mdist(pos, medium) - vec3( mdist(pos - delta.xyy, medium), mdist(pos - delta.yxy, medium), mdist(pos - delta.yyx, medium) ))/delta.x; if (any(isnan(dq))) return vec3(0.); // NOTE: You can generally get away with removing either this division or `normalize` // (but not both) without significantly impacting accuracy. return normalize(dq); } vec3 nudge(vec3 pos, int m) { vec3 norm = normal(pos, m); for (int steps = 0; steps < MAX_STEPS && medium(pos) == m; steps++) { pos += MIN_DIST*norm; } return pos; } //// //// Combinators for creating signed distance functions and modelling objects. //// float dist_plane(vec3 p, float y) { return distance(p.y, y); } // a plane with infinite depth float dist_floor(vec3 p, float y) { return p.y - y; } float dist_sphere(vec3 p, vec3 pos, float r) { return distance(p, pos) - r; } /// Model difference/subtraction (objects in `a` but not in `b`). float sub(float a, float b) { return max(a, -b); } //// //// Cheat and calculate distance-related stuff with globals //// to reduce the amount of argument-passing we have to do. //// /// For computing `dist` float _dist; // The unsigned distance to the nearest surface. /// For computing `mdist` int _focus; // The medium we're focusing on for mdist. float _mdist; // The nearest signed distance to the medium's surface. /// For computing `medium` int _medium; // The innermost medium encountered so far. float _sdist; // The signed distance to the nearest surface of an object we are inside. /// Cache the position to avoid re-computing distances between calls /// (e.g. if you call `sdist` and then `medium`, or something.) float _time; vec3 _pos; /// A helper function to update the globals. /// /// mnemomic: "Update Distances" void ud(int m, float dist) { float adist = abs(dist); // `dist` if (adist < _dist) _dist = adist; // `mdist` if (m == _focus && adist < abs(_mdist)) _mdist = dist; // `medium` if (dist <= 0. && dist > _sdist) { _sdist = dist; _medium = m; } } //// //// Compute distances //// void update_scene(vec3 pos, int medium); void scene(vec3 pos); float dist(vec3 pos) { update_scene(pos, -1); return _dist; } float mdist(vec3 pos, int focus) { update_scene(pos, focus); return _mdist; } int medium(vec3 pos) { update_scene(pos, -1); return _medium; } /// Initialize global variables and then compute distances. void update_scene(vec3 pos, int focus) { bool peq = pos.x == _pos.x && pos.y == _pos.y && pos.z == _pos.z; if (peq && time == _time && (focus == -1 || focus == _focus)) return; _pos = pos; _time = time; // We are infinitely distant to nothing. _dist = INF; // We are infinitely distant to the nearest nothing. _focus = focus; _mdist = INF; // We are infinitely interior to the surface of the vaccuum/air. _medium = 0; _sdist = -INF; scene(pos); // If we didn't focus on any particular object for `mdist`, // then just pretend we were focusing on whatever we happen to be inside // so we can cache it for later. if (focus == -1) { _focus = _medium; _mdist = _sdist; } } //// //// SCENE: Objects in the scene //// void scene(vec3 p) { // the floor ud(1, dist_floor(p, -1.)); // the sphere vec3 sphere_pos = vec3(0., -0.15, 8.); vec3 sphere_sub_pos = sphere_pos + .7*vec3(sin(time), 0., cos(time)); ud(2, sub(dist_sphere(p, sphere_pos, 1.), dist_sphere(p, sphere_sub_pos, 0.5))); // the light sources ud(3, dist_sphere(p, vec3(1.5, 1.2, 7.), 0.7)); ud(3, dist_sphere(p, vec3(-1.2, 0.5, 5.0), 0.5)); } //////// ================================ //////// VENDOR: Vendored code //////// ================================ //// //// AUTHOR: iq //// // Randoms (https://www.shadertoy.com/view/4sfGzS)) // oldschool rand() from Visual Studio int seed = 1; int irand(void) { seed = seed*0x343fd+0x269ec3; return (seed>>16)&32767; } float rand(void) { return float(irand())/32767.0; } // hash to initialize the random sequence (copied from Hugo Elias) int hash( int n ) { n = (n << 13) ^ n; return n * (n * n * 15731 + 789221) + 1376312589; } void seed_randoms(vec2 fragCoord) { ivec2 q = ivec2(fragCoord); seed = hash(q.x+hash(q.y+hash(iFrame))); } // HSV (https://www.shadertoy.com/view/MsS3Wc), via nmz vec3 hsv2rgb( in vec3 c ) { vec3 rgb = clamp( abs(mod(c.x*6.0+vec3(0.0,4.0,2.0),6.0)-3.0)-1.0, 0.0, 1.0 ); rgb = rgb*rgb*(3.0-2.0*rgb); // cubic smoothing return c.z * mix( vec3(1.0), rgb, c.y); } //// //// AUTHOR: fizzer, via iq: http://www.amietia.com/lambertnotangent.html //// vec3 cosine_direction(vec3 norm) { float u = rand(); float v = rand(); float a = 6.2831853 * v; u = 2.0*u - 1.0; return normalize(norm + vec3(sqrt(1.0 - u*u)*vec2(cos(a), sin(a)), u)); } //// //// AUTHOR: Sam Hocevar, via nmz (http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl) //// vec3 rgb2hsv(vec3 c) { vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0); vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g)); vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r)); float d = q.x - min(q.w, q.y); float e = 1.0e-10; return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x); }