Update dependencies to latest versions.

master
James T. Martin 2023-07-23 20:19:25 -07:00
parent cb6322e379
commit 6e1c169952
Signed by: james
GPG Key ID: D6FB2F9892F9B225
5 changed files with 988 additions and 690 deletions

1502
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@ server = []
# use Rust structs like C structs for wgpu
[dependencies.bytemuck]
version = "1.9"
version = "1.13"
features = ["derive"]
# CBOR (serialization format)
@ -33,7 +33,10 @@ version = "0.4.3"
# audio output
[dependencies.cpal]
version = "0.13.5"
version = "0.15.2"
[dependencies.dasp_sample]
version = "0.11"
# locations of configuration/cache/etc paths
#[dependencies.directories]
@ -41,7 +44,7 @@ version = "0.13.5"
# logging backend
[dependencies.fern]
version = "0.6.1"
version = "0.6.2"
features = ["colored"]
# text rendering
@ -59,14 +62,14 @@ features = ["colored"]
# images
[dependencies.image]
version = "0.24.2"
version = "0.24.6"
default-features = false
# for now, no formats; we just use this for manipulating image buffers internally.
features = []
# logging
[dependencies.log]
version = "0.4.17"
version = "0.4.19"
features = ["std"]
# noise functions
@ -87,7 +90,7 @@ features = ["std"]
# async runtime
[dependencies.tokio]
version = "1.19"
version = "1.29.1"
# TODO: Is rt-multi-thread faster for our use case?
features = ["rt", "macros"]
@ -97,11 +100,11 @@ features = ["rt", "macros"]
# graphics API
[dependencies.wgpu]
version = "0.13.1"
version = "0.17.0"
# window creation
[dependencies.winit]
version = "0.26.1"
version = "0.28.6"
features = ["x11", "wayland"]
[profile.release]

View File

@ -1,6 +1,7 @@
use cpal::*;
use cpal::traits::*;
use claxon::*;
use cpal::FromSample;
pub struct Audio {
device: Device,
@ -20,13 +21,21 @@ impl Audio {
let stream = match config.sample_format() {
SampleFormat::F32 => create_output_stream::<f32>(&device, &config.config()),
SampleFormat::I16 => create_output_stream::<i16>(&device, &config.config()),
SampleFormat::U16 => create_output_stream::<u16>(&device, &config.config())
SampleFormat::U16 => create_output_stream::<u16>(&device, &config.config()),
SampleFormat::I8 => create_output_stream::<i8>(&device, &config.config()),
SampleFormat::I32 => create_output_stream::<i32>(&device, &config.config()),
SampleFormat::I64 => create_output_stream::<i64>(&device, &config.config()),
SampleFormat::U8 => create_output_stream::<u8>(&device, &config.config()),
SampleFormat::U32 => create_output_stream::<u32>(&device, &config.config()),
SampleFormat::U64 => create_output_stream::<u64>(&device, &config.config()),
SampleFormat::F64 => create_output_stream::<f64>(&device, &config.config()),
_ => panic!("Unknown sample format."),
};
Self { device, stream }
}
}
fn create_output_stream<T: Sample>(device: &Device, config: &StreamConfig) -> Stream {
fn create_output_stream<T: Sample + FromSample<f32> + SizedSample>(device: &Device, config: &StreamConfig) -> Stream {
let sample_rate = config.sample_rate.0;
let channels = config.channels as usize;
let mut clock = 0;
@ -40,16 +49,17 @@ fn create_output_stream<T: Sample>(device: &Device, config: &StreamConfig) -> St
for sample in frame.iter_mut() {
clock += 1;
if clock >= music.len() {
*sample = Sample::from(&0.0);
*sample = Sample::from_sample(0.0);
return;
}
*sample = Sample::from(&music[clock]);
*sample = Sample::from_sample(music[clock]);
}
}
},
move |err| {
log::error!("Audio stream error: {}", err);
}
},
None
).expect("Failed to create audio output stream.")
}

View File

@ -48,8 +48,8 @@ impl Graphics {
// TODO: I don't think there's any reason we can't support ALL, but with ALL it defaults to OpenGL
// on my machine for some resason. We should support ALL, so long as the PRIMARY backends
// are used by default.
let instance = Instance::new(Backends::PRIMARY);
let surface = unsafe { instance.create_surface(&window) };
let instance = Instance::new(InstanceDescriptor { backends: Backends::PRIMARY, dx12_shader_compiler: Dx12Compiler::Fxc });
let surface = unsafe { instance.create_surface(&window) }.expect("Failed to create wgpu surface.");
let adapter = instance.request_adapter(
&RequestAdapterOptionsBase {
power_preference: PowerPreference::HighPerformance,
@ -57,7 +57,7 @@ impl Graphics {
compatible_surface: Some(&surface)
}
).await.expect("Failed to get wgpu adapter.");
let format = surface.get_supported_formats(&adapter)[0];
let format = surface.get_capabilities(&adapter).formats[0];
let (device, queue) = adapter.request_device(&DeviceDescriptor {
label: None,
features: Features::default(),
@ -76,9 +76,10 @@ impl Graphics {
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::Rgba32Float,
format: format,
view_formats: &[format],
usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST,
label: Some("dither texture")
label: Some("dither texture"),
},
bytemuck::cast_slice(&*dither::bayer_texture())
);
@ -211,7 +212,9 @@ impl Graphics {
format: self.surface_format,
width: size.width,
height: size.height,
present_mode: PresentMode::Mailbox
present_mode: PresentMode::Mailbox,
alpha_mode: CompositeAlphaMode::Auto,
view_formats: Vec::from([self.surface_format]),
});
self.uniform_copy_buffer.slice(..).get_mapped_range_mut().copy_from_slice(bytemuck::cast_slice(&[Uniforms {
dimensions: [self.desired_size.width as f32, self.desired_size.height as f32],

View File

@ -7,103 +7,57 @@ struct Uniforms {
@binding(0)
var<uniform> uniforms: Uniforms;
let PI: f32 = 3.14159265358979323846264338327950288; // 3.14159274
const PI: f32 = 3.14159265358979323846264338327950288; // 3.14159274
struct Ray {
pos: vec3<f32>, // POSition (aka the origin)
dir: vec3<f32>, // DIRection (normalized)
}
/// Map a rectangle with the provided dimensions onto a square from (-1,-1) to (1,1).
/// This is a linear scaling transformation. Some of the output square will
/// be cropped if the rectangle dimensions are not square.
///
/// Convert from pixel coordinates to window-independent square coordinates.
///
/// Input coordinates:
/// x: from 0 (left) to dimensions.x (right)
/// y: from 0 (bottom) to dimensions.y (top)
///
/// Output coordinates:
/// x: from -1 (left) to 1 (right)
/// y: from -1 (down) to 1 (up)
///
/// The output coordinates are square and independent of the
/// window's dimensions and aspect ratio. Some of the image
/// will be cropped if the window's aspect ratio is not square.
fn pixel_to_square(pixel: vec2<f32>) -> vec2<f32> {
let square = ((pixel / uniforms.dimensions) - 0.5) * 2.0;
// Scale the window's smaller aspect ratio to make the coordinates square.
// For example, a 16:9 window will have an x coordinate from -1 to 1 and
/// We use this function because the coordinates provided to our shader are
/// pixel coordinates, but we want the shader to behave the same way
/// regardless of the shape or size of the window.
fn rectangle_to_square(rect: vec2<f32>, dims: vec2<f32>) -> vec2<f32> {
var sq = rect / dims * 2.0 - 1.0;
// Scale the rectangle's smaller aspect ratio to make the coordinates square.
// For example, a 16:9 rectangle will have an x coordinate from -1 to 1 and
// a y coordinate from -9/16ths to 9/16ths. The rest of the image lying outside
// of that range will be cropped out.
if (uniforms.dimensions.x > uniforms.dimensions.y) {
return vec2<f32>(square.x, square.y * uniforms.dimensions.y / uniforms.dimensions.x);
if (dims.x > dims.y) {
return vec2<f32>(sq.x, sq.y * dims.y / dims.x);
} else {
return vec2<f32>(square.x * uniforms.dimensions.x / uniforms.dimensions.y, square.y);
return vec2<f32>(sq.x * dims.x / dims.y, sq.y);
}
}
/// Project a coordinate on the unit circle onto the unit hemisphere.
/// This is used for curvilinear perspective.
///
/// Coordinates:
/// x: from -1 (90 degrees left) to 1 (90 degrees right)
/// y: from -1 (90 degrees down) to 1 (90 degrees up)
///
/// TODO: add support for the usual, non-curvilinear perspective projection
/// (and possibly other projections, just for fun?)
fn project(coord_: vec2<f32>) -> vec3<f32> {
var coord = coord_;
// This projection only supports coordinates within the unit circle
// and only projects into the unit hemisphere. Ideally we'd want
// some sort of extension which takes points outside the unit circle
// and projects them somewhere behind you (with the point at infinity
// being directly behind you), but I haven't come up with any reasonable
// extension of this perspective system which behaves in that manner.
/// Map from a square grid to the surface of a (double-covered) sphere.
/// We use this as a (curvilinear) perspective projection for the camera.
fn project(grid: vec2<f32>) -> vec3<f32> {
// The real plane is the product of two lines, R x R, and the torus
// is the product of two circles, S^1 x S^1. Therefore, we can map
// from the real plane to the torus by taking each axis modulo tau.
//
// What we can do instead is *tile* the projection so that adjacent projections
// are a mirrored projection of the unit hemisphere *behind* you.
// This is a logical extension because the projection becomes continuous
// along the x and y axis (you're just looking around in perfect circles),
// and it allows you to view the entire space. The main problem to this approach
// is that all of the space between the tiled circles is still undefined,
// but this is still the best solution which I'm aware of.
// If we set the major radius of the torus to 0, then the torus
// becomes a double-covered sphere. The points on the double-covered
// sphere are identical to the points on the regular sphere,
// but the coordinate system is different:
//
// Rotate left-right and what you see behind you is backwards; rotate
// up-down and what you see is upside-down. This means that the projection
// is continous, and a translation on the grid corresponds with
// a rotation of the camera. (Compare with the behavior of a mirror,
// which is z-inverted, resulting in text appearing backwards.)
var dir: f32 = 1.; // the sign of the direction we're facing: 1 forward, -1 backward.
// Tile coordinates:
// (0-2, 0-2): forward
// (2-4, 0-2): backward, left/right mirrored
// (0-2, 2-4): backward, up/down mirrored
// (2-4, 2-4): forward, left/right and up/down mirrored
// FIXME: Use modulus which handles negatives properly so I don't have to arbitrarily add 8.
coord = (coord + 1. + 8.) % 4.;
// mirror/reverse and map back into 0 to 2 range
if (coord.x > 2.) {
coord.x = 4. - coord.x;
dir = -dir;
}
if (coord.y > 2.) {
coord.y = 4. - coord.y;
dir = -dir;
}
// map back into -1 to 1 range
coord = coord - 1.;
// Avoid NaN because implementations are allowed to assume it won't occur.
let preZ = 1. - coord.x*coord.x - coord.y*coord.y;
// We can "define" the remaining undefined region of the screen
// by clamping it to the nearest unit circle. This is sometimes
// better than nothing, though it can also be a lot worse because
// we still have to actually *render* all of those pixels.
// TODO: Add an option to allow stretching into a square instead of clamping?
// I imagine things could get pretty badly warped, but maybe it could be useful?
// TODO: Is this clamping behavior correct? It doesn't look like it actually is, tbh.
if (preZ < 0.) {
return vec3<f32>(normalize(coord), 0.);
}
return normalize(vec3<f32>(coord, dir*sqrt(preZ)));
// The parametric definition of a torus with R = 0 and r = 1.
return vec3<f32>(
cos(grid.x) * cos(grid.y),
cos(grid.x) * sin(grid.y),
sin(grid.x)
);
}
/// After converting pixel coordinates to screen coordinates, we still have a problem:
@ -114,7 +68,7 @@ fn camera_project(square: vec2<f32>) -> Ray {
// Our coordinates already range from -1 to 1, corresponding with the
// edges of the window, but we want the edges of the window to correspond
// with the angle of the FOV instead.
let circle = square * uniforms.field_of_view / PI;
let circle = square * uniforms.field_of_view;
let sphere = project(circle);
return Ray(vec3<f32>(0.), sphere);
}
@ -167,7 +121,7 @@ fn clamp_value(_color: vec3<f32>) -> vec3<f32> {
@fragment
fn fs_main(@builtin(position) position: vec4<f32>) -> @location(0) vec4<f32> {
let ray = camera_project(pixel_to_square(position.xy));
let ray = camera_project(rectangle_to_square(position.xy, uniforms.dimensions));
var color = ray.dir / 2.0 + 0.5;
// TODO: Separate postprocessing pass.