//////// ================================ //////// SETTINGS //////// ================================ //////// Check the Common tab for settings. //////// ================================ //////// DOCS: Declarations & documentation //////// ================================ /* Helpful resources: * Inigo Quilez (website, youtube, shadertoy) * Importance Sampling Techniques for Path Tracing in Participating Media (Christopher Kulla and Marcos Fajardo) * Wikipedia articles: * Rendering equation * Bidirectional reflectance distribution function * Bidirectional scattering distribution function */ //////// -------------------------------- //////// PIXEL: Pixels, color, and projections //////// -------------------------------- /// Assign each pixel a color in the sRGB color space. void mainImage(out vec4 color, vec2 pixel); /// Take the coordinate of a pixel on the screen and return a color /// in the linear RGBA color space. vec4 color_pixel(vec2 pixel); /// Convert a pixel coordinate, which is dependent on resolution and aspect ratio, /// to our internal coordinate system, which is not. /// /// (Pixel coordinates are from 0,0 to the window resolution, iResolution; /// the internal coordinate system goes from (-1, -1) to (1, 1) with a square aspect ratio, vec2 pixel2square(vec2 pixel); struct ray { vec3 pos; // POSition (aka the origin) vec3 dir; // DIRection }; /// After converting pixel coordinates to screen coordinates, we still have a problem: /// screen coordinates are 2d, but our world is 3d! The camera assigns each screen /// coordinate to a ray in 3d space, indicating the position and angle which /// we will be receiving light from. ray camera_project(vec2 coord); /// Project a coordinate on the unit circle onto the unit hemisphere. /// This is used for curvilinear perspective. vec3 project(vec2 coord); /// The virtual camera has a position and orientation in 3d space. /// This takes a ray positioned relative to `(0, 0, 0)` and facing `(0, 0, 1)` /// and moves and orients it relative to the camera. ray camera(ray r); //////// -------------------------------- //////// LIGHT: Light simulation (path marching) //////// -------------------------------- /// Perform (backwards) path marching. Simulate a single photon as it travels /// through the world, bouncing off surfaces and scattering through media, /// and compute its end color when it reaches the camera. /// /// Note that this is *backwards* path march, so it works the *opposite* /// of real life. Our "photon" is actually travelling from the camera to the /// light source (the ray being the ray you get from the `cameraProject` function); /// however, visually, this gets similar results to doing it forward, only /// you waste less time computing photons which never reach the camera. vec3 light(ray r); /// Determine how a ray interacts with and/or is transmitted through a medium /// until it enters a new medium, reflects off a surface, or scatters. /// This function returns the outgoing ray in the new medium and the proportion /// of light absorbed along the way. struct transmission { /// The new ray position and direction after colliding with a surface or scattering. ray outgoing; /// The reciprocal proportion of all light extinguished along the path float extinction; }; transmission transmit(ray r); /// Determine how much light to emit on the outgoing ray based on the incoming light. vec3 emit(ray incoming, ray outgoing, vec3 color); //////// -------------------------------- //////// WORLD: World simulation (signed distance functions, or SDFs) //////// -------------------------------- /// The current time. We use a global instead of a parameter for convenience. /// We use this instead of iTime so that we can apply temporal anti-aliasing /// (see the explanation in `mainImage`). float time; /// Compute the location at which this ray will next encounter a change of medium /// (e.g. if it's in the air when it will encounter a surface, if it's in a cloud /// when it'll escape that cloud, etc). /// This will always return a position along the ray. vec3 intersect(ray r); /// Compute the distance along the ray at which it will first encounter a change of medium. /// This is equivalent to `distance(intersect(r), r.pos)`. float march(ray r); /// Given a point on or near the surface of an object or medium, compute (or estimate) /// the normal (i.e. the vector perpendicular to the surface). vec3 normal(vec3 pos, int medium); /// Take a point near the edge of a medium and nudge towards the surface /// until it is outside of the medium. vec3 nudge(vec3 pos, int medium); /// The distance to the nearest change of medium (e.g. the surface of an object). /// (This is used primarily by ray marching in the `march` function.) float dist(vec3 pos); /// The signed distance to the nearest surface of a given medium. /// This is positive if you are outside the surface, or negative if you are inside it. /// (This is used primarily when computing normals in the `normal` function.) float mdist(vec3 pos, int medium); /// The medium which encompasses a point. /// In the case of multiple overlapping media (e.g. one object inside another object), /// the innermost medium will be returned (i.e. the one it is *least* far from the surface of). int medium(vec3 pos); //////// -------------------------------- //////// UTIL: Utility functions (e.g. random number generation) //////// -------------------------------- #define NULL_RAY ray(vec3(0.), vec3(0.)) /// Used to forcibly set the pixel color from functions. Used for debugging. vec3 _bug = vec3(NAN); /// Randomly select a direction on a hemisphere facing the direction of the normal, /// with a bias toward directions close to the normal. /// Specifically, the bias is by `dot(norm, dir)`, making this a cosine-weighted /// distribution, which is useful because `dot(norm, dir)` is a term in the /// rendering equation. vec3 cosine_direction(vec3 norm); /// Create a translation + rotation matrix from a position and direction. mat4 translate(vec3 pos, vec3 forward, vec3 up); mat4 translate(vec3 pos, vec3 forward); mat4 translate(vec3 pos); /// Rotate a vector angle using a matrix. vec3 apply_rotate(mat4 r, vec3 d); /// Get the direction of a matrix. /// (Get the translated direction of (0, 0, 1).) vec3 apply_rotate(mat4 r); /// Translate a vector position by a matrix. vec3 apply_translate(mat4 r, vec3 p); /// Get the position offset of a matrix. /// (Get the translated position of (0, 0, 0).) vec3 apply_translate(mat4 r); //////// ================================ //////// IMPL: Implementation //////// ================================ //////// -------------------------------- //////// PIXEL: Pixels, color, and projections //////// -------------------------------- void mainImage(out vec4 fragColor, vec2 fragCoord) { seed_randoms(vec3(fragCoord, float(iFrame))); // Supersampling. Sample the color within each pixel multiple times and take the average. // In raster-based rendering, this is used mostly to prevent jagged edges; // with pathtracing, antialiasing is even *more* important to prevent graininess. vec4 color = vec4(0.); int successful_samples = 0; for (int i = 0; i < SAMPLES; i++) { // There are two ways to choose where to sample the color during multisampling: // either in a fixed pattern of offsets, or by applying a *random* offset. // Random offsets are more common in path tracing. vec2 pixel = fragCoord + vec2(rand(), rand()) - 0.5; #ifdef FREEZE_TIME time = FREEZE_TIME; #else time = 0.; #endif #ifdef LOOP_TIME time += mod(iTime, LOOP_TIME); #else #ifndef FREEZE_TIME time = iTime; #endif #endif // Apply temporal antialiasing (motion blur) by slightly randomizing the time. // We distribute our samples across the time we estimate the frame will take // to render, which in this case, is simply the time the *last* frame took // to render. This causes jerkiness if the time it takes to render a frame // changes suddenly (stutters). // // TODO: a more sophisticated frame time estimate float max_taa_diff = INF; #ifdef MAX_TAA_DIFF max_taa_diff = MAX_TAA_DIFF; #endif time += rand() * min(iTimeDelta, max_taa_diff); vec4 samp = color_pixel(pixel); // We failed to render a sample at this location for some reason, // e.g. the projection for this pixel is undefined. NaN and infinity // are infectious, and we must block them or our entire pixel will be corrupted. if (any(isnan(samp)) || any(isinf(samp))) continue; successful_samples++; // Only count successful samples towards the average. // NOTE: It's technically possible for the sample to be *negative*. // If the sample is negative, it's probably a bug, though hypothetically // maybe you could have negative light as some sort of special effect or something // (like a material that sucks the light out of the room. that could be cool. // or creepy.) color += samp; } color /= max(float(successful_samples), 1.); if (!any(isnan(_bug))) { color = vec4(_bug, 1.); } #if AVERAGE_FRAMES vec2 uv = fragCoord/iResolution.xy; vec4 rest = texture(iChannel0, uv).rgba; color = (rest*float(iFrame) + color) / (float(iFrame + 1)); // Don't output NaN or inf or it'll corrupt the buffer and leave you with // black pixels that never go away because they break the average! if (any(isnan(color)) || any(isinf(color))) { fragColor = rest; return; } #endif // This shader operates in the linear RGB color space, // but fragColor is expected to be in sRGB, so we convert. // NOTE: We do this in the main image now. (It's basically the *only* thing we do // in the main image.) fragColor = color; } // NOTE: linear2srgb is in the vendored code section at the bottom of the file vec4 color_pixel(vec2 pixel) { vec2 coord = pixel2square(pixel) * 1.; // Apply zoom. // // TODO: Add an option for dynamic zoom based on aspect ratio, // so that the portion of the screen shown always exactly matches // the maximum area defined under curvilinear projection. coord /= FOV; #ifdef IMAGE_OFFSET coord += IMAGE_OFFSET; #endif ray r = camera_project(coord); if (any(isnan(r.dir))) { // The projection is undefined at this pixel coordinate (see `project`); // don't bother rendering it, and return 100% transparent black to indicate that // we didn't render it. return vec4(NAN); } // We could hypothetically do something with alpha later, // but for now, always return 100% opacity. return vec4(light(r), 1.); } vec2 pixel2square(vec2 screen) { // Map rectangular screen into square coordinate space. vec2 square = ((screen / iResolution.xy) - 0.5) * 2.; // Adjust for aspect ratio to get square coordinates. #if ASPECT_RATIO_CROP if (iResolution.x > iResolution.y) { #else if (iResolution.x < iResolution.y) { #endif return vec2(square.x, square.y * iResolution.y / iResolution.x); } else { return vec2(square.x * iResolution.x / iResolution.y, square.y); } } ray camera_project(vec2 coord) { return camera(ray(vec3(0.), project(coord))); } // TODO: add support for the usual, non-curvilinear perspective projection // (and possibly other projections, just for fun?) vec3 project(vec2 coord) { // The sign of the direction we're facing. 1 is forward, -1 is backward. float dir = 1.; #if TILE_PERSPECTIVE // This projection only supports coordinates within the unit circle // and only projects into the unit hemisphere. Ideally we'd want // some sort of extension which takes points outside the unit circle // and projects them somewhere behind you (with the point at infinity // being directly behind you), but I haven't come up with any reasonable // extension of this perspective system which behaves in that manner. // // What we can do instead is *tile* the projection so that adjacent projections // are a mirrored projection of the unit hemisphere *behind* you. // This is a logical extension because the projection becomes continuous // along the x and y axis (you're just looking around in perfect circles), // and it allows you to view the entire space. The main problem to this approach // is that all of the space between the tiled circles is still undefined, // but this is still the best solution which I'm aware of. coord -= 1.; coord = mod(coord + 2., 4.) - 1.; if (coord.x > 1.) { coord.x = 1. - (coord.x - 1.); dir = -dir; } if (coord.y > 1.) { coord.y = 1. - (coord.y - 1.); dir = -dir; } #endif float z = dir*sqrt(1. - coord.x*coord.x - coord.y*coord.y); #if CLAMP_PERSPECTIVE // We can "define" the remaining undefined region of the screen // by clamping it to the nearest unit circle. This is sometimes // better than nothing, though it can also be a lot worse because // we still have to actually *render* all of those pixels. // TODO: Add an option to allow stretching into a square instead of clamping? // I imagine things could get pretty badly warped, but maybe it could be useful? if (isnan(z)) { coord = normalize(coord); z = 0.; } #endif return normalize(vec3(coord, z)); } ray camera(ray r) { // camera position vec3 pos = CAMERA_POS; // camera direction (faces forward, not up) vec3 d = normalize(CAMERA_DIR); mat4 trans = translate(pos, d); return ray(apply_translate(trans, r.pos), apply_rotate(trans, r.dir)); } //////// -------------------------------- //////// LIGHT: Light simulation (path marching) //////// -------------------------------- vec3 light(ray o) { // We handle this in two steps: // // 1. Trace the path a photon takes through the scene // 2. Walk back through the path, computing what color the photon has // by the time it reaches the camera. // // We do it this way because the color each material emits may be dependent // on the specific incoming color (think of how a prism splits light), // but because we trace each path from the camera instead of from the light source, // we can't know what the incoming color *is* until we've *already* traced // the entire path. ray path[uint(PATH_SEGMENTS + 1)]; int seg = 0; /// /// Step 1: Trace path /// for (; seg < PATH_SEGMENTS + 1; seg++) { path[seg] = o; transmission tr = transmit(o); o = tr.outgoing; // This is an optimization. We can handle a material absorbing color in two ways: // // 1. multiply the incoming color by the apsorbtion // 2. simply randomly extinguish rays based on the absorption // // On average, these have the same effect (of removing some % of light), // but extinguishing rays is much more computationally efficient. // It *does* however slightly decrease the quality of *individual* rays, // if you're using a small number of samples per pixel. if (tr.extinction > rand()) break; } /// /// DEBUG: /// ray debug = path[int(mod(iTime, float(PATH_SEGMENTS)))]; // Show the distances to objects on the first bounce (resp. every bounce) //return vec3(march(path[1])); //return vec3(march(debug)); // Show the positions of the ray at every path segment //return (debug.pos + 2.) / 4.; // Show the directions of the ray at every path segment //return abs(debug.dir); // Show the mediums of every path segment //return vec3(float(medium(debug.pos))/3.); // Show the normals of every path segment //return abs(normal(debug.pos, medium(debug.pos))); // Show the total number of path segments a ray took //return vec3(float(seg))/float((PATH_SEGMENTS + 1)); /// /// Step 2: Accumulate color /// vec3 color = vec3(0.); for (; seg >= 0; seg--) { ray i = path[seg]; color = emit(i, o, color); o = i; } return color; } // // It is generally preferable to use a weighted distribution compared to uniform extinction // so that the average ray returns more meaningful information. It is also preferable // to use uniform extinction to color-biased extinction so that we can probabilistically // choose to extinguish a ray early to save computation time. // // Note that this function, in conjunction with `emit`, is designed to work only in one // direction (from the camera, to the light source). This is because some transformations // are dependent on the color of the light involved (e.g. the BRDF), but we don't // *know* the color of the light involved until the ray has reached a light source. // Thus we first try to statistically choose a path based on the best weighted distribution // we can come up *without* knowing the specific wavelength of light, and then // retroactively fix it up and compute the *specific* color of light using the `emit` function. // This split would still *work* travelling from light to camera, but would not be // as effective as if they were simply combined in the first place. // // The combination of surface and volumetric lighting into `transmit` and the `transmit`/`emit` // dichotomy is original to the best of my knowledge, but that's more likely // because I'm new to the field than that no-one's come up with those things before. // // With regards to the rendering equation, this function should incorporate the following // (and more, depending on the medium; this list is not exhaustive). // // If the medium is a surface: // * the cosine distribution // * the BSSDF, to the extent that it is independent of wavelength // // If the medium is not a surface (e.g. a gas): // * the transmittance of the medium // * the extinction coefficient, to the extent that it is independent of wavelength // * the scattering coefficient // * the phase function // transmission transmit(ray r) { int m = medium(r.pos); vec3 norm = normal(r.pos, m); if (any(isnan(norm))) norm = vec3(0.); // coordinate *outside* the medium vec3 np = any(isnan(norm)) ? r.pos : nudge(r.pos, m); switch (m) { case 0: // air material // pass through doing nothing vec3 p = intersect(ray(r.pos, r.dir)); if (any(isinf(p))) { // we're sailing through the void! return transmission(ray(p, r.dir), INF); } return transmission(ray(p, r.dir), 0.); case 1: // floor material if (rand() < 0.25) { vec3 refl = 2.*dot(-r.dir, norm)*norm + r.dir; return transmission(ray(np, refl), 0.); } // fall-through case 2: // sphere material if (rand() < 0.25) { vec3 refl = 2.*dot(-r.dir, norm)*norm + r.dir; return transmission(ray(np, normalize(cosine_direction(refl) + norm)), 0.); } return transmission(ray(np, cosine_direction(norm)), 0.); case 3: // light material // don't bother reflecting off the lights. return transmission(NULL_RAY, INF); //return transmission(ray(np, cosine_direction(norm)), 0.); case 4: // generic diffuse material return transmission(ray(np, cosine_direction(norm)), 0.5); } return transmission(ray(vec3(0.), vec3(0.)), 1.); } // This is affected primarily by the emittance and color-biased extinction // (e.g. the color of the object). // // With regards to the rendering equation, this function should incorporate the following: // * the emittance of the surface/medium // * the BSSDF to the extent that it is dependent on wavelength // * the extinction coefficient to the extent that it is dependent on wavelength // // This function does not have to incorporate anything which is not color-related, // and does not have to be probabilistic. // // Generally we want to move as much as possible from this function to `transmit` // so that the information can be used to inform our path-making choices better. vec3 emit(ray i, ray o, vec3 color) { int m = medium(i.pos); switch (m) { case 0: // air material if (any(isinf(o.pos))) { #if SCENE == 2 return vec3(1.); #else return vec3(0.); #endif } return color; case 1: // floor material color.r *= 0.3; color.g *= 0.2; color.b *= 0.9; //color += vec3(0., 0., 0.01); return color; case 2: // sphere material color.gb *= 0.3; //color += vec3(0.004, 0., 0.); return color; case 3: // light material color += vec3(5.) * dot(-i.dir, normal(i.pos, m)); return color; case 4: // generic diffuse material return color; } // unknown material return vec3(1., 0., 1.); } //////// -------------------------------- //////// WORLD: World simulation (signed distance functions, or SDFs) //////// -------------------------------- vec3 intersect(ray r) { return r.pos + r.dir*march(r); } float march(ray r) { int m = medium(r.pos); float total_dist = 0.; float delta = INF; vec3 pos = r.pos; int steps = 0; for (; steps < MAX_STEPS && total_dist < MAX_DIST && delta > MIN_DIST; steps++) { delta = dist(pos); total_dist += delta * IMPRECISION_FACTOR; pos = r.pos + r.dir * total_dist; // If the distance is less than the minimum distance, fudge our position // until we've actually *entered* the medium, or we pass by the surface // without entering it. MIN_DIST could have a substantial impact on performance // when travelling parallel to a surface (small MIN_DISTs would take // an eternity to get by). for (; delta <= MIN_DIST && total_dist < MAX_DIST && steps < MAX_STEPS && medium(pos) == m; steps++) { total_dist += MIN_DIST; pos = r.pos + r.dir * total_dist; delta = dist(pos); // NOTE: If you're parallel to a surface, you could waste a *lot* of time // making tiny steps without ever making contact or passing by. It might // be worth adding a FUDGE_DIST to `intersect` so that if you're less // than that distance away, it simply computes the normal of the surface // and fudges the position *into* the surface. That's actually how MIN_DIST // works for normal ray tracers and path tracers, but the fact that we // support transparent media means that we need to be *inside* // the new medium at medium/medium boundary so we can check what medium // we're actually *in*. We could work around this by tracking what // medium we're in and then writing functions around "we're in the nearest medium // *except* the last medium we were in" and "march until we exit the // medium we were last in, then continue through the medium we're actually in // until we encounter a different medium or re-encounter the same medium // or travel a certain distance (to prevent bugs due to parallel surfaces, again, // if you never actually *enter* the medium you thought you were in and continue // travelling a long distance through the old medium). Hopefully, we won't // need any of these workarounds, but I'm making note of it for later. } } return steps < MAX_STEPS && total_dist < MAX_DIST ? total_dist : INF; } vec3 normal(vec3 pos, int medium) { // Although it is often possible to compute a normal analytically // (and often more precise and more efficient), // we just compute an estimate because it's good enough and it saves us a lot of math. vec2 delta = vec2(-NORMAL_DELTA, 0.); vec3 dq = (mdist(pos, medium) - vec3( mdist(pos - delta.xyy, medium), mdist(pos - delta.yxy, medium), mdist(pos - delta.yyx, medium) ))/delta.x; if (any(isnan(dq))) return vec3(0.); // NOTE: You can generally get away with removing either this division or `normalize` // (but not both) without significantly impacting accuracy. return normalize(dq); } vec3 nudge(vec3 pos, int m) { vec3 norm = normal(pos, m); for (int steps = 0; steps < MAX_STEPS && medium(pos) == m; steps++) { pos += MIN_DIST*norm; } return pos; } //// //// Cheat and calculate distance-related stuff with globals //// to reduce the amount of argument-passing we have to do. //// /// For computing `dist` float _dist; // The unsigned distance to the nearest surface. /// For computing `mdist` int _focus; // The medium we're focusing on for mdist. float _mdist; // The nearest signed distance to the medium's surface. /// For computing `medium` int _medium; // The innermost medium encountered so far. float _sdist; // The signed distance to the nearest surface of an object we are inside. /// Cache the position to avoid re-computing distances between calls /// (e.g. if you call `sdist` and then `medium`, or something.) float _time; vec3 _pos; /// A helper function to update the globals. /// /// mnemomic: "Update Distances" void ud(int m, float dist) { float adist = abs(dist); // `dist` if (adist < _dist) _dist = adist; // `mdist` if (m == _focus && adist < abs(_mdist)) _mdist = dist; // `medium` if (dist <= 0. && dist > _sdist) { _sdist = dist; _medium = m; } } //// //// Compute distances //// void update_scene(vec3 pos, int medium); void scene(vec3 pos); float dist(vec3 pos) { update_scene(pos, -1); return _dist; } float mdist(vec3 pos, int focus) { update_scene(pos, focus); return _mdist; } int medium(vec3 pos) { update_scene(pos, -1); return _medium; } /// Initialize global variables and then compute distances. void update_scene(vec3 pos, int focus) { bool peq = pos.x == _pos.x && pos.y == _pos.y && pos.z == _pos.z; if (peq && time == _time && (focus == -1 || focus == _focus)) return; _pos = pos; _time = time; // We are infinitely distant to nothing. _dist = INF; // We are infinitely distant to the nearest nothing. _focus = focus; _mdist = INF; // We are infinitely interior to the surface of the vaccuum/air. _medium = 0; _sdist = -INF; scene(pos); // If we didn't focus on any particular object for `mdist`, // then just pretend we were focusing on whatever we happen to be inside // so we can cache it for later. if (focus == -1) { _focus = _medium; _mdist = _sdist; } } //// //// Combinators for creating signed distance functions and modelling objects. //// float dist_plane(vec3 pos, mat4 trans) { vec4 p1w = trans * vec4(0., 0., 0., 1.); vec4 p2w = trans * vec4(1., 0., 0., 1.); vec4 p3w = trans * vec4(0., 0., 1., 1.); vec3 p1 = p1w.xyz / p1w.w; vec3 p2 = p2w.xyz / p2w.w; vec3 p3 = p3w.xyz / p3w.w; vec3 n = normalize(cross(p2 - p1, p3 - p1)); vec3 rp = pos - p1; return dot(rp, n); } // a plane with infinite depth float dist_floor(vec3 p, float y) { return p.y - y; } float dist_sphere(vec3 p, vec3 pos, float r) { return distance(p, pos) - r; } /// Model difference/subtraction (objects in `a` but not in `b`). float sub(float a, float b) { return max(a, -b); } //// //// SCENE: Objects in the scene //// void scene(vec3 p) { /// ------------- /// SCENE 1 /// ------------- #if SCENE == 1 // the floor ud(1, dist_floor(p, -1.)); // the sphere vec3 sphere_pos = vec3(0., -0.15, 8.); vec3 sphere_sub_pos = sphere_pos + .7*vec3(sin(time), 0., cos(time)); ud(2, sub(dist_sphere(p, sphere_pos, 1.), dist_sphere(p, sphere_sub_pos, 0.5))); // the light sources ud(3, dist_sphere(p, vec3(1.5, 1.2, 7.), 0.7)); ud(3, dist_sphere(p, vec3(-1.2, 0.5, 5.0), 0.5)); /// ------------- /// SCENE 2 /// ------------- #elif SCENE == 2 #endif } /// Rotate a vector angle using a matrix. vec3 apply_rotate(mat4 r, vec3 d) { return normalize((r*vec4(d, 0.)).xyz); } vec3 apply_rotate(mat4 r) { return apply_rotate(r, vec3(0., 0., 1.)); } vec3 apply_translate(mat4 r, vec3 p) { vec4 p_ = r*vec4(p, 1.); return p_.xyz/p_.w; } vec3 apply_translate(mat4 r) { return apply_translate(r, vec3(0.)); } mat4 translate(vec3 p, vec3 z, vec3 up) { z = normalize(z); up = normalize(up); vec3 x = cross(up, z); vec3 y = cross(z, x); // GLSL matrices are backwards!!! // Argh, it took me *two hours* to figure out what was going wrong!! // What's worse is that I've been bitten by this before and *forgot*. Fuck. /* return mat4( x.x, y.x, z.x, p.x, x.y, y.y, z.y, p.y, x.z, y.z, z.z, p.z, 0.0, 0.0, 0.0, 1. ); */ return mat4( x.x, x.y, x.z, 0., y.x, y.y, y.z, 0., z.x, z.y, z.z, 0., p.x, p.y, p.z, 1. ); } mat4 translate(vec3 p, vec3 z) { return translate(p, z, vec3(0., 1., 0.)); } mat4 translate(vec3 p) { return translate(p, vec3(0., 0., 1.)); } //////// ================================ //////// VENDOR: Vendored code //////// ================================ //// //// AUTHOR: fizzer, via iq: http://www.amietia.com/lambertnotangent.html //// vec3 cosine_direction(vec3 norm) { float u = rand(); float v = rand(); float a = 6.2831853 * v; u = 2.0*u - 1.0; return normalize(norm + vec3(sqrt(1.0 - u*u)*vec2(cos(a), sin(a)), u)); }