Add a bunch of quality of life features.
* Option presets, for demonstration. * Continously average samples across multiple frames so you can render more than your GPU can handle in one go. * Saturation correction for overly-bright pixels. * Freeze and loop time. * Settings for TAA, camera position, and aspect ratio cropping.master
parent
6906779065
commit
e73620c588
|
@ -0,0 +1,34 @@
|
|||
// All of the interesting code and settings are in Buffer A.
|
||||
|
||||
/// Convert a color from linear RGB to the sRGB color space.
|
||||
vec3 linear2srgb(vec3 color);
|
||||
|
||||
void mainImage(out vec4 fragColor, in vec2 fragCoord) {
|
||||
vec2 uv = fragCoord/iResolution.xy;
|
||||
vec3 color = texture(iChannel0, uv).rgb;
|
||||
fragColor = vec4(linear2srgb(color), 1.0);
|
||||
}
|
||||
|
||||
//////// ================================
|
||||
//////// VENDOR: Vendored code
|
||||
//////// ================================
|
||||
|
||||
////
|
||||
//// AUTHOR: unknown
|
||||
////
|
||||
|
||||
vec3 linear2srgb(vec3 linear_rgb) {
|
||||
// I believe the first version is technically more accurate,
|
||||
// but the difference is usually negligable in practice.
|
||||
#if 1
|
||||
// copied from somewhere on the internet
|
||||
bvec3 cutoff = lessThan(linear_rgb, vec3(0.0031308));
|
||||
vec3 higher = vec3(1.055)*pow(linear_rgb, vec3(1.0/2.4)) - vec3(0.055);
|
||||
vec3 lower = linear_rgb * vec3(12.92);
|
||||
|
||||
return mix(higher, lower, cutoff);
|
||||
// end copied from somewhere on the internet
|
||||
#else
|
||||
return pow(linear_rgb, vec3(1./2.2));
|
||||
#endif
|
||||
}
|
|
@ -1,35 +1,215 @@
|
|||
//////// ================================
|
||||
//////// Settings
|
||||
//////// SETTINGS: Settings
|
||||
//////// ================================
|
||||
|
||||
// See the descriptions for these in `project`. They're only relevant if you zoom out.
|
||||
//#define TILE_PERSPECTIVE
|
||||
//#define CLAMP_PERSPECTIVE
|
||||
// "FOV", poorly-defined. affects *zoom*.
|
||||
#define FOV (1.5)
|
||||
#define SAMPLES 3
|
||||
//////// --------------------------------
|
||||
//////// User settings
|
||||
//////// --------------------------------
|
||||
//////// Tweak these according to your preferences and the power of your graphics card.
|
||||
//////// Comment out a setting to restore it to its default value.
|
||||
|
||||
|
||||
////
|
||||
//// Sample settings
|
||||
////
|
||||
|
||||
// The number of color samples taken per pixel. Increasing this has a dramatic effect on
|
||||
// image quality, reducing graininess and preventing overly-bright pixels ("fireflies").
|
||||
// However, how much GPU power you need to render a frame scales linearly with
|
||||
// the number of samples.
|
||||
#define SAMPLES 1
|
||||
|
||||
// The maximum number of times light can reflect or scatter before it is extinguished.
|
||||
#define PATH_SEGMENTS 14
|
||||
|
||||
// If a pixel color is too bright for fit in sRGB, there are two ways to handle it:
|
||||
//
|
||||
// 1. Clamp the pixel within the limits of sRGB, resulting in (near-)maximum
|
||||
// brightness at the cost of the color's saturation. (If it's too bright, it'll
|
||||
// become entirely white.)
|
||||
// 2. Reduce the brightness of the color until it fits within sRGB, preserving
|
||||
// the color's saturation, but losing even *more* brightness.
|
||||
//
|
||||
// Correction for saturation generally looks better, but isn't usually necessary
|
||||
// for more than five or so samples (because the bright pixels will average out
|
||||
// with the dark pixels and fall back within sRGB), so this is *on* by default
|
||||
// with 1-2 samples and *off* by default with 5+ samples.
|
||||
//#define SATURATION_CORRECTION 1
|
||||
|
||||
////
|
||||
//// Perspective settings
|
||||
////
|
||||
|
||||
// This shader natively uses a square (circular?) aspect ratio. With ASPECT_RATIO_CROP
|
||||
// enabled, if you use a wide aspect ratio, the frame will have its height
|
||||
// cropped so that the image can take up the full width of the screen.
|
||||
#define ASPECT_RATIO_CROP 1
|
||||
|
||||
// This setting affects how far you zoom in on the scene.
|
||||
// Greater values = more zoom. Fractional values zoom out. Negative values mirror the scene.
|
||||
#define FOV 1.5
|
||||
|
||||
// Camera position and angle. (Feel free to reference `time` here.)
|
||||
#define CAMERA_POS vec3(0.)
|
||||
// (Don't worry, we call `normalize` for you.
|
||||
#define CAMERA_DIR vec3(0., 0., 1.)
|
||||
|
||||
///
|
||||
/// TILE_PERSPECTIVE and CLAMP_PERSPECTIVE are only relevant if you zoom out
|
||||
/// (e.g. an FOV < ~1.15). For more information on how and why these settings
|
||||
/// behave the way they do, see their extended descriptions in the `project` function.
|
||||
///
|
||||
|
||||
// Points on the screen >1 or <-1 show the portion of the scene *behind* you,
|
||||
// mirrored so that the edges of each adjacent tile lines up (e.g. tiles above
|
||||
// and below are mirrored vertically, to the left and right horizontally).
|
||||
// This tiling is infinite. You might want to combine this with an IMAGE_OFFSET of
|
||||
// (-1, 0) so that you can see two whole hemispheres instead of one whole hemisphere
|
||||
// and two halves on opposite sides.
|
||||
#define TILE_PERSPECTIVE 0
|
||||
|
||||
// Points on the screen outside of the unit circle (within a tile) are clamped
|
||||
// to the nearest point on the unit circle. This doesn't look very good, but
|
||||
// might be preferable to just rendering black?
|
||||
#define CLAMP_PERSPECTIVE 0
|
||||
|
||||
// Slide the image around on the screen. Each time is `2x2` centered on the
|
||||
// origin, so an offset of e.g. (2,0) with TILE_PERSPECTIVE enabled
|
||||
// will show you the portion of the scene *behind* you.
|
||||
#define IMAGE_OFFSET vec2(0., 0.)
|
||||
|
||||
////
|
||||
//// Simulation settings
|
||||
////
|
||||
|
||||
// The maximum number of steps a ray can take during marching before giving up
|
||||
// and colliding with nothing. This prevents scenes from taking infinite time to render.
|
||||
#define MAX_STEPS 200
|
||||
|
||||
// The maximum distance a ray can travel before we give up and just say it collides
|
||||
// with nothing. This helps prevent the background from appearing warped by the foreground
|
||||
// due to rays which march close to a foreground object run out of steps before
|
||||
// reaching their destination when slightly farther rays do reach their target.
|
||||
#define MAX_DIST 20.
|
||||
|
||||
// Average the color across frames by storing them in the buffer.
|
||||
// This is like supersampling, but across frames instead of within a pixel,
|
||||
// which lets you render with thousands of samples without crashing.
|
||||
// It's strongly advised that you enable FREEZE_TIME when this is enabled!
|
||||
// This uses iFrame, so if you want to enable this, make sure you hit the
|
||||
// "reset time" function or things will get screwed up.
|
||||
//#define AVERAGE_FRAMES 1
|
||||
|
||||
// Set a time in seconds. The simulation will be frozen at this point in time every frame.
|
||||
// Comment this out to allow time to pass normally.
|
||||
//#define FREEZE_TIME 2.75
|
||||
|
||||
// Loop time over an interval of this duration, beginning at FREEZE_TIME,
|
||||
// or 0, if FREEZE_TIME is not set.
|
||||
//#define LOOP_TIME 0.
|
||||
|
||||
// Set the maximum duration of temporal antialiasing (i.e. how much time
|
||||
// motion blur smears across). Note that this is a *maximum* time, and motion
|
||||
// blur will never be greater than the duration of a frame. That said, when rendering
|
||||
// a still image with FREEZE_TIME you probably want this set to 0., and if you're
|
||||
// stuttering a lot, the large variance in frame times can make objects in the image
|
||||
// appear to jerk back and forth, so this probably shouldn't be any higher
|
||||
// than (the reciprocal of) your average framerate. Comment this out to
|
||||
// remove any cap on the amount of motion blur.
|
||||
#define MAX_TAA_DIFF (1./30.)
|
||||
|
||||
|
||||
//////// --------------------------------
|
||||
//////// Internal settings
|
||||
//////// --------------------------------
|
||||
//////// If you're just viewing the shader, you shouldn't usually need to tweak these.
|
||||
|
||||
// The minimum distance between two points before they are considered the same point.
|
||||
// Setting a minimum distance prevents graphical glitches when ray marching parallel
|
||||
// to a surface, where the ray does not intersect an object, but comes close enough
|
||||
// that the march becomes so slow that it fails to reach its actual destination.
|
||||
#define MIN_DIST (0.001953125/128.)
|
||||
// The distance between samples when estimating a surface's normal.
|
||||
// Setting lower values increases the sharpness of the image at the cost of performance
|
||||
// and rounding errors at objects very far from 0.
|
||||
//
|
||||
// Ray marching halves the distance to the surface of an object each iteration, but the
|
||||
// end goal of ray marching is to pass slightly *inside* the object. Setting a minimum
|
||||
// distance prevents zeno's paradox. This also serves as a optimization
|
||||
// because the number of steps increases logarithmically as you decrease the minimum distance.
|
||||
//
|
||||
// Chosen to be 2^(-9), or about ~2mm, because that's the largest you can set it before
|
||||
// the quality of the image is significantly effected. You can set it as low as about
|
||||
// 2^(-19) before things begin to break. It's good to experiment with both high and low
|
||||
// values to help find bugs in the numerical precision of the light simulation.
|
||||
// If you have precision bugs, the simulation ends up getting affected pretty dramatically
|
||||
// by changes to MIN_DIST, whereas a numerically stable simulation is not affected much at all.
|
||||
//
|
||||
// I expect that a minimum distance of 2^(-9) would work until about 10km from the origin
|
||||
// with 32-bit floating point before starting to break down, but I have not tested it.
|
||||
#define MIN_DIST (0.001953125/8.)
|
||||
// The distance between samples when estimating a surface's normal. Smaller values result
|
||||
// in more precise calculations, but are more sensitive to numerical imprecision.
|
||||
// This should probably be less than MIN_DIST.
|
||||
#define NORMAL_DELTA (MIN_DIST/4.)
|
||||
// Only march this much of MIN_DIST at a time to account for imprecision in the distance
|
||||
// calculations. Chosen by experimentation. If you have to set this low, that often means
|
||||
// that there's a bug somewhere (e.g. you forgot to call `normalize`).
|
||||
#define IMPRECISION_FACTOR 0.9
|
||||
//
|
||||
// Right now, the simulation is numerically stable and I don't have to use it at all!
|
||||
// But I often find that it's necessary to set this to around ~0.92 when debugging
|
||||
// numerical issues.
|
||||
#define IMPRECISION_FACTOR 1.
|
||||
|
||||
//////// --------------------------------
|
||||
//////// Default settings
|
||||
//////// --------------------------------
|
||||
//////// So you can restore a setting to its default value by commenting it out.
|
||||
#ifndef SAMPLES
|
||||
#define SAMPLES 1
|
||||
#endif
|
||||
#ifndef PATH_SEGMENTS
|
||||
#define PATH_SEGMENTS 14
|
||||
|
||||
#endif
|
||||
#ifndef SATURATION_CORRECTION
|
||||
#if SAMPLES > 5
|
||||
#define SATURATION_CORRECTION 0
|
||||
#else
|
||||
#define SATURATION_CORRECTION 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef FOV
|
||||
#define FOV 1.5
|
||||
#endif
|
||||
#ifndef ASPECT_RATIO_CROP
|
||||
#define ASPECT_RATIO_CROP 1
|
||||
#endif
|
||||
#ifndef TILE_PERSPECTIVE
|
||||
#define TILE_PERSPECTIVE 0
|
||||
#endif
|
||||
#ifndef CLAMP_PERSPECTIVE
|
||||
#define CLAMP_PERSPECTIVE 0
|
||||
#endif
|
||||
#ifndef IMAGE_OFFSET
|
||||
#define IMAGE_OFFSET vec2(0., 0.)
|
||||
#endif
|
||||
#ifndef MAX_STEPS
|
||||
#define MAX_STEPS 200
|
||||
#endif
|
||||
#ifndef MAX_DIST
|
||||
#define MAX_DIST 20.
|
||||
#endif
|
||||
#ifndef AVERAGE_FRAMES
|
||||
#define AVERAGE_FRAMES 0
|
||||
#endif
|
||||
// FREEZE_TIME, LOOP_TIME, and MAX_TAA_DIFF are *undefined* by default.
|
||||
|
||||
#ifndef MIN_DIST
|
||||
#define MIN_DIST (0.001953125/8.)
|
||||
#endif
|
||||
#ifndef NORMAL_DELTA
|
||||
#define NORMAL_DELTA (MIN_DIST/4.)
|
||||
#endif
|
||||
#ifndef IMPRECISION_FACTOR
|
||||
#define IMPRECISION_FACTOR 1.
|
||||
#endif
|
||||
|
||||
//////// ================================
|
||||
//////// DOCS: Declarations & documentation
|
||||
|
@ -53,9 +233,6 @@
|
|||
/// Assign each pixel a color in the sRGB color space.
|
||||
void mainImage(out vec4 color, vec2 pixel);
|
||||
|
||||
/// Convert a color from linear RGB to the sRGB color space.
|
||||
vec3 linear2srgb(vec3 color);
|
||||
|
||||
/// Take the coordinate of a pixel on the screen and return a color
|
||||
/// in the linear RGBA color space.
|
||||
vec4 color_pixel(vec2 pixel);
|
||||
|
@ -153,6 +330,7 @@ float dist(vec3 pos);
|
|||
/// (This is used primarily when computing normals in the `normal` function.)
|
||||
float mdist(vec3 pos, int medium);
|
||||
|
||||
|
||||
/// The medium which encompasses a point.
|
||||
/// In the case of multiple overlapping media (e.g. one object inside another object),
|
||||
/// the innermost medium will be returned (i.e. the one it is *least* far from the surface of).
|
||||
|
@ -185,6 +363,10 @@ void seed_randoms(vec2 fragCoord);
|
|||
/// rendering equation.
|
||||
vec3 cosine_direction(vec3 norm);
|
||||
|
||||
// Convert between RGB and HSV. Used for SATURATION_CORRECTION.
|
||||
vec3 rgb2hsv(vec3 c);
|
||||
vec3 hsv2rgb(vec3 c);
|
||||
|
||||
//////// ================================
|
||||
//////// IMPL: Implementation
|
||||
//////// ================================
|
||||
|
@ -208,6 +390,20 @@ void mainImage(out vec4 fragColor, vec2 fragCoord) {
|
|||
// Random offsets are more common in path tracing.
|
||||
vec2 pixel = fragCoord + vec2(rand(), rand()) - 0.5;
|
||||
|
||||
#ifdef FREEZE_TIME
|
||||
time = FREEZE_TIME;
|
||||
#else
|
||||
time = 0.;
|
||||
#endif
|
||||
|
||||
#ifdef LOOP_TIME
|
||||
time += mod(iTime, LOOP_TIME);
|
||||
#else
|
||||
#ifndef FREEZE_TIME
|
||||
time = iTime;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Apply temporal antialiasing (motion blur) by slightly randomizing the time.
|
||||
// We distribute our samples across the time we estimate the frame will take
|
||||
// to render, which in this case, is simply the time the *last* frame took
|
||||
|
@ -215,11 +411,11 @@ void mainImage(out vec4 fragColor, vec2 fragCoord) {
|
|||
// changes suddenly (stutters).
|
||||
//
|
||||
// TODO: a more sophisticated frame time estimate
|
||||
#ifdef MOTION_BLUR
|
||||
time = iTime + rand() * iTimeDelta;
|
||||
#else
|
||||
time = iTime;
|
||||
float max_taa_diff = INF;
|
||||
#ifdef MAX_TAA_DIFF
|
||||
max_taa_diff = MAX_TAA_DIFF;
|
||||
#endif
|
||||
time += rand() * min(iTimeDelta, max_taa_diff);
|
||||
|
||||
vec4 samp = color_pixel(pixel);
|
||||
|
||||
|
@ -240,7 +436,7 @@ void mainImage(out vec4 fragColor, vec2 fragCoord) {
|
|||
|
||||
if (!any(isnan(_bug))) { color = vec4(_bug, 1.); }
|
||||
|
||||
// Note that it is possible for this renderer to emit colors brighter than 1.0,
|
||||
// NOTE: it is possible for this renderer to emit colors brighter than 1.0,
|
||||
// for example if you use very bright or many light sources. These colors will be
|
||||
// displayed incorrectly, appearing desaturated and having their brightness
|
||||
// clamped to whatever color output is supported.
|
||||
|
@ -252,36 +448,46 @@ void mainImage(out vec4 fragColor, vec2 fragCoord) {
|
|||
// pixel; the average brightness per pixel is generally less than 1.0 when averaged
|
||||
// out with the (more common) black pixels when no light source is encountered.
|
||||
//
|
||||
// Another mitigation approach would be to do color correction, where instead of
|
||||
// Another mitigation approach is to do color correction, where instead of
|
||||
// trying to preserve the brightness by clamping the RGB values and losing saturation,
|
||||
// you try to preserve the saturation by scaling down the brightness until the
|
||||
// full saturation of the colors is visible (or at least part of it).
|
||||
//
|
||||
// TODO: Implement that more sophisticated color correction (it'd be really helpful
|
||||
// when using only one sample per pixel).
|
||||
|
||||
#if SATURATION_CORRECTION
|
||||
// TODO: I'm sure there's a way to do this directly without having to
|
||||
// convert between color spaces twice. This was just more convenient in the moment.
|
||||
color.xyz = rgb2hsv(color.rgb);
|
||||
color.z = min(color.z, 1.); // clamp value to 1
|
||||
color.rgb = hsv2rgb(color.xyz);
|
||||
#else
|
||||
//color = clamp(vec4(0.), color, vec4(1.));
|
||||
#endif
|
||||
|
||||
#if AVERAGE_FRAMES
|
||||
vec2 uv = fragCoord/iResolution.xy;
|
||||
vec4 rest = texture(iChannel0, uv).rgba;
|
||||
|
||||
color = (rest*float(iFrame) + color) / (float(iFrame + 1));
|
||||
|
||||
// Don't output NaN or inf or it'll corrupt the buffer and leave you with
|
||||
// black pixels that never go away because they break the average!
|
||||
if (any(isnan(color)) || any(isinf(color))) {
|
||||
fragColor = rest;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
// This shader operates in the linear RGB color space,
|
||||
// but fragColor is expected to be in sRGB, so we convert.
|
||||
fragColor = vec4(linear2srgb(color.rgb), color.a);
|
||||
// NOTE: We do this in the main image now. (It's basically the *only* thing we do
|
||||
// in the main image.)
|
||||
fragColor = color;
|
||||
}
|
||||
|
||||
// NOTE: linear2srgb is in the vendored code section at the bottom of the file
|
||||
|
||||
vec2 screen2square(vec2 screen) {
|
||||
// Map rectangular screen into square coordinate space.
|
||||
vec2 square = ((screen / iResolution.xy) - 0.5) * 2.;
|
||||
// Adjust for aspect ratio to get square coordinates.
|
||||
if (iResolution.x > iResolution.y) {
|
||||
return vec2(square.x, square.y * iResolution.y / iResolution.x);
|
||||
} else {
|
||||
return vec2(square.x * iResolution.x / iResolution.y, square.y);
|
||||
}
|
||||
}
|
||||
|
||||
vec4 color_pixel(vec2 pixel) {
|
||||
vec2 coord = screen2square(pixel) * 1.;
|
||||
vec2 coord = pixel2square(pixel) * 1.;
|
||||
|
||||
// Apply zoom.
|
||||
//
|
||||
|
@ -290,6 +496,10 @@ vec4 color_pixel(vec2 pixel) {
|
|||
// the maximum area defined under curvilinear projection.
|
||||
coord /= FOV;
|
||||
|
||||
#ifdef IMAGE_OFFSET
|
||||
coord += IMAGE_OFFSET;
|
||||
#endif
|
||||
|
||||
ray r = camera_project(coord);
|
||||
if (any(isnan(r.dir))) {
|
||||
// The projection is undefined at this pixel coordinate (see `project`);
|
||||
|
@ -303,6 +513,22 @@ vec4 color_pixel(vec2 pixel) {
|
|||
return vec4(light(r), 1.);
|
||||
}
|
||||
|
||||
vec2 pixel2square(vec2 screen) {
|
||||
// Map rectangular screen into square coordinate space.
|
||||
vec2 square = ((screen / iResolution.xy) - 0.5) * 2.;
|
||||
|
||||
// Adjust for aspect ratio to get square coordinates.
|
||||
#if ASPECT_RATIO_CROP
|
||||
if (iResolution.x > iResolution.y) {
|
||||
#else
|
||||
if (iResolution.x < iResolution.y) {
|
||||
#endif
|
||||
return vec2(square.x, square.y * iResolution.y / iResolution.x);
|
||||
} else {
|
||||
return vec2(square.x * iResolution.x / iResolution.y, square.y);
|
||||
}
|
||||
}
|
||||
|
||||
ray camera_project(vec2 coord) {
|
||||
return camera(ray(vec3(0.), project(coord)));
|
||||
}
|
||||
|
@ -312,7 +538,7 @@ ray camera_project(vec2 coord) {
|
|||
vec3 project(vec2 coord) {
|
||||
// The sign of the direction we're facing. 1 is forward, -1 is backward.
|
||||
float dir = 1.;
|
||||
#ifdef TILE_PERSPECTIVE
|
||||
#if TILE_PERSPECTIVE
|
||||
// This projection only supports coordinates within the unit circle
|
||||
// and only projects into the unit hemisphere. Ideally we'd want
|
||||
// some sort of extension which takes points outside the unit circle
|
||||
|
@ -339,7 +565,7 @@ vec3 project(vec2 coord) {
|
|||
}
|
||||
#endif
|
||||
float z = dir*sqrt(1. - coord.x*coord.x - coord.y*coord.y);
|
||||
#ifdef CLAMP_PERSPECTIVE
|
||||
#if CLAMP_PERSPECTIVE
|
||||
// We can "define" the remaining undefined region of the screen
|
||||
// by clamping it to the nearest unit circle. This is sometimes
|
||||
// better than nothing, though it can also be a lot worse because
|
||||
|
@ -357,10 +583,10 @@ vec3 project(vec2 coord) {
|
|||
|
||||
ray camera(ray r) {
|
||||
// camera position
|
||||
vec3 pos = vec3(0.);
|
||||
vec3 pos = CAMERA_POS;
|
||||
|
||||
// camera direction (faces forward, not up)
|
||||
vec3 d = vec3(0., 0., 1.);
|
||||
vec3 d = normalize(CAMERA_DIR);
|
||||
|
||||
// point projection relative to direction
|
||||
// this really ought to be simplified,
|
||||
|
@ -518,7 +744,7 @@ transmission transmit(ray r) {
|
|||
|
||||
// fall-through
|
||||
case 2: // sphere material
|
||||
if (rand() < 0.2) {
|
||||
if (rand() < 0.25) {
|
||||
vec3 refl = 2.*dot(-r.dir, norm)*norm + r.dir;
|
||||
return transmission(ray(np, normalize(cosine_direction(refl) + norm)), 0.);
|
||||
}
|
||||
|
@ -561,12 +787,12 @@ vec3 emit(ray i, ray o, vec3 color) {
|
|||
color.r *= 0.3;
|
||||
color.g *= 0.2;
|
||||
color.b *= 0.9;
|
||||
color += vec3(0., 0., 0.01);
|
||||
//color += vec3(0., 0., 0.01);
|
||||
return color;
|
||||
|
||||
case 2: // sphere material
|
||||
color.gb *= 0.3;
|
||||
color += vec3(0.004, 0., 0.);
|
||||
//color += vec3(0.004, 0., 0.);
|
||||
return color;
|
||||
|
||||
case 3: // light material
|
||||
|
@ -792,29 +1018,10 @@ void scene(vec3 p) {
|
|||
//////// ================================
|
||||
|
||||
////
|
||||
//// AUTHOR: unknown
|
||||
////
|
||||
|
||||
vec3 linear2srgb(vec3 linear_rgb) {
|
||||
// I believe the first version is technically more accurate,
|
||||
// but the difference is usually negligable in practice.
|
||||
#if 1
|
||||
// copied from somewhere on the internet
|
||||
bvec3 cutoff = lessThan(linear_rgb, vec3(0.0031308));
|
||||
vec3 higher = vec3(1.055)*pow(linear_rgb, vec3(1.0/2.4)) - vec3(0.055);
|
||||
vec3 lower = linear_rgb * vec3(12.92);
|
||||
|
||||
return mix(higher, lower, cutoff);
|
||||
// end copied from somewhere on the internet
|
||||
#else
|
||||
return pow(linear_rgb, vec3(1./2.2));
|
||||
#endif
|
||||
}
|
||||
|
||||
////
|
||||
//// AUTHOR: iq (https://www.shadertoy.com/view/4sfGzS)
|
||||
//// AUTHOR: iq
|
||||
////
|
||||
|
||||
// Randoms (https://www.shadertoy.com/view/4sfGzS))
|
||||
// oldschool rand() from Visual Studio
|
||||
int seed = 1;
|
||||
int irand(void) { seed = seed*0x343fd+0x269ec3; return (seed>>16)&32767; }
|
||||
|
@ -831,6 +1038,16 @@ void seed_randoms(vec2 fragCoord) {
|
|||
seed = hash(q.x+hash(q.y+hash(iFrame)));
|
||||
}
|
||||
|
||||
// HSV (https://www.shadertoy.com/view/MsS3Wc), via nmz
|
||||
vec3 hsv2rgb( in vec3 c )
|
||||
{
|
||||
vec3 rgb = clamp( abs(mod(c.x*6.0+vec3(0.0,4.0,2.0),6.0)-3.0)-1.0, 0.0, 1.0 );
|
||||
|
||||
rgb = rgb*rgb*(3.0-2.0*rgb); // cubic smoothing
|
||||
|
||||
return c.z * mix( vec3(1.0), rgb, c.y);
|
||||
}
|
||||
|
||||
////
|
||||
//// AUTHOR: fizzer, via iq: http://www.amietia.com/lambertnotangent.html
|
||||
////
|
||||
|
@ -843,3 +1060,18 @@ vec3 cosine_direction(vec3 norm) {
|
|||
u = 2.0*u - 1.0;
|
||||
return normalize(norm + vec3(sqrt(1.0 - u*u)*vec2(cos(a), sin(a)), u));
|
||||
}
|
||||
|
||||
////
|
||||
//// AUTHOR: Sam Hocevar, via nmz (http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl)
|
||||
////
|
||||
|
||||
vec3 rgb2hsv(vec3 c)
|
||||
{
|
||||
vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0);
|
||||
vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g));
|
||||
vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r));
|
||||
|
||||
float d = q.x - min(q.w, q.y);
|
||||
float e = 1.0e-10;
|
||||
return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue