Compare commits

..

1 Commits
master ... wasm

Author SHA1 Message Date
James T. Martin a0ae573258
Ported to WASM + WebGPU. (Untested.)
I was not able to test this because:

* Firefox nightly does not support the WebGPU spec of wgpu 0.13
* Chrome Dev WebGPU does not work consistently on AMD+Linux
* I don't feel like setting up a Windows VM or reverting wgpu

Pushing onto a separate branch for whenever Firefox gets updated,
so I can test it then.
2022-07-09 00:19:58 -07:00
16 changed files with 8594 additions and 1135 deletions

View File

@ -7,3 +7,6 @@ charset = utf-8
indent_style = space
trim_trailing_whitespace = true
insert_final_newline = true
[*.{js, json, html}]
indent_size = 2

10
.gitignore vendored
View File

@ -11,3 +11,13 @@
!/Cargo.lock
!/Cargo.toml
!/LICENSE.txt
# www source code
!/www/bootstrap.js
!/www/index.js
!/www/index.html
# www configuration
!/www/package.json
!/www/package-lock.json
!/www/webpack.config.js

1529
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -6,13 +6,14 @@ description = ""
repository = "https://github.com/jamestmartin/pathland"
license = "0BSD"
publish = false
autobins = false
[features]
client = []
[lib]
crate-type = ["cdylib", "rlib"]
server = []
[dependencies]
[[bin]]
name = "pathland"
path = "src/main.rs"
# brotli (compression format)
#[dependencies.brotli]
@ -20,7 +21,7 @@ server = []
# use Rust structs like C structs for wgpu
[dependencies.bytemuck]
version = "1.13"
version = "1.9"
features = ["derive"]
# CBOR (serialization format)
@ -33,20 +34,12 @@ version = "0.4.3"
# audio output
[dependencies.cpal]
version = "0.15.2"
[dependencies.dasp_sample]
version = "0.11"
version = "0.13.5"
# locations of configuration/cache/etc paths
#[dependencies.directories]
#version = "4.0"
# logging backend
[dependencies.fern]
version = "0.6.2"
features = ["colored"]
# text rendering
#[dependencies.fontdue]
#version = "0.7.2"
@ -62,14 +55,14 @@ features = ["colored"]
# images
[dependencies.image]
version = "0.24.6"
version = "0.24.2"
default-features = false
# for now, no formats; we just use this for manipulating image buffers internally.
features = []
# logging
[dependencies.log]
version = "0.4.19"
version = "0.4.17"
features = ["std"]
# noise functions
@ -88,26 +81,39 @@ features = ["std"]
#[dependencies.serde]
#version = "1.0"
# async runtime
[dependencies.tokio]
version = "1.29.1"
# TODO: Is rt-multi-thread faster for our use case?
features = ["rt", "macros"]
# TOML (configuration format)
#[dependencies.toml_edit]
#version = "0.14.4"
# graphics API
[dependencies.wgpu]
version = "0.17.0"
version = "0.13.1"
# window creation
[dependencies.winit]
version = "0.28.6"
version = "0.26.1"
features = ["x11", "wayland"]
# logging backend
[target.'cfg(not(target_arch = "wasm32"))'.dependencies.fern]
version = "0.6.1"
features = ["colored"]
# async runtime
[target.'cfg(not(target_arch = "wasm32"))'.dependencies.tokio]
version = "1.19"
# TODO: Is rt-multi-thread faster for our use case?
features = ["rt"]
[target.'cfg(target_arch = "wasm32")'.dependencies]
console_log = "0.2.0"
console_error_panic_hook = "0.1.7"
wasm-bindgen = "0.2.81"
wasm-bindgen-futures = "0.4.31"
web-sys = { version = "0.3.58", features = ["HtmlCanvasElement"] }
[profile.release]
strip = "symbols"
lto = "fat"
codegen-units = 1
opt-level = "s"

View File

@ -1,7 +1,6 @@
use cpal::*;
use cpal::traits::*;
use claxon::*;
use cpal::FromSample;
pub struct Audio {
device: Device,
@ -21,21 +20,13 @@ impl Audio {
let stream = match config.sample_format() {
SampleFormat::F32 => create_output_stream::<f32>(&device, &config.config()),
SampleFormat::I16 => create_output_stream::<i16>(&device, &config.config()),
SampleFormat::U16 => create_output_stream::<u16>(&device, &config.config()),
SampleFormat::I8 => create_output_stream::<i8>(&device, &config.config()),
SampleFormat::I32 => create_output_stream::<i32>(&device, &config.config()),
SampleFormat::I64 => create_output_stream::<i64>(&device, &config.config()),
SampleFormat::U8 => create_output_stream::<u8>(&device, &config.config()),
SampleFormat::U32 => create_output_stream::<u32>(&device, &config.config()),
SampleFormat::U64 => create_output_stream::<u64>(&device, &config.config()),
SampleFormat::F64 => create_output_stream::<f64>(&device, &config.config()),
_ => panic!("Unknown sample format."),
SampleFormat::U16 => create_output_stream::<u16>(&device, &config.config())
};
Self { device, stream }
}
}
fn create_output_stream<T: Sample + FromSample<f32> + SizedSample>(device: &Device, config: &StreamConfig) -> Stream {
fn create_output_stream<T: Sample>(device: &Device, config: &StreamConfig) -> Stream {
let sample_rate = config.sample_rate.0;
let channels = config.channels as usize;
let mut clock = 0;
@ -49,17 +40,16 @@ fn create_output_stream<T: Sample + FromSample<f32> + SizedSample>(device: &Devi
for sample in frame.iter_mut() {
clock += 1;
if clock >= music.len() {
*sample = Sample::from_sample(0.0);
*sample = Sample::from(&0.0);
return;
}
*sample = Sample::from_sample(music[clock]);
*sample = Sample::from(&music[clock]);
}
}
},
move |err| {
log::error!("Audio stream error: {}", err);
},
None
}
).expect("Failed to create audio output stream.")
}

View File

@ -27,7 +27,7 @@ const VERTICES: &[Vertex] = &[
pub struct Graphics {
instance: Instance,
pub window: Window,
window: Window,
surface: Surface,
adapter: Adapter,
surface_format: TextureFormat,
@ -35,6 +35,7 @@ pub struct Graphics {
queue: Queue,
shader: ShaderModule,
pipeline: RenderPipeline,
vertex_buffer: Buffer,
surface_stale: bool,
desired_size: winit::dpi::PhysicalSize<u32>,
dither_bind_group: BindGroup,
@ -48,8 +49,8 @@ impl Graphics {
// TODO: I don't think there's any reason we can't support ALL, but with ALL it defaults to OpenGL
// on my machine for some resason. We should support ALL, so long as the PRIMARY backends
// are used by default.
let instance = Instance::new(InstanceDescriptor { backends: Backends::PRIMARY, dx12_shader_compiler: Dx12Compiler::Fxc });
let surface = unsafe { instance.create_surface(&window) }.expect("Failed to create wgpu surface.");
let instance = Instance::new(Backends::PRIMARY);
let surface = unsafe { instance.create_surface(&window) };
let adapter = instance.request_adapter(
&RequestAdapterOptionsBase {
power_preference: PowerPreference::HighPerformance,
@ -57,14 +58,13 @@ impl Graphics {
compatible_surface: Some(&surface)
}
).await.expect("Failed to get wgpu adapter.");
let format = surface.get_capabilities(&adapter).formats[0];
let format = surface.get_supported_formats(&adapter)[0];
let (device, queue) = adapter.request_device(&DeviceDescriptor {
label: None,
features: Features::default(),
limits: Limits::downlevel_defaults()
}, None).await.expect("Failed to get wgpu device.");
let cover_screen_shader = device.create_shader_module(include_wgsl!("graphics/cover_screen.wgsl"));
let shader = device.create_shader_module(include_wgsl!("graphics/shader.wgsl"));
let shader = device.create_shader_module(include_wgsl!("shader.wgsl"));
let dither_texture = device.create_texture_with_data(
&queue,
&TextureDescriptor {
@ -76,10 +76,9 @@ impl Graphics {
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: format,
view_formats: &[format],
format: TextureFormat::Rgba32Float,
usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST,
label: Some("dither texture"),
label: Some("dither texture")
},
bytemuck::cast_slice(&*dither::bayer_texture())
);
@ -154,9 +153,15 @@ impl Graphics {
push_constant_ranges: &[]
})),
vertex: VertexState {
module: &cover_screen_shader,
module: &shader,
entry_point: "vs_main",
buffers: &[]
buffers: &[
VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as BufferAddress,
step_mode: VertexStepMode::Vertex,
attributes: &vertex_attr_array![0 => Float32x2]
}
]
},
fragment: Some(FragmentState {
module: &shader,
@ -186,6 +191,11 @@ impl Graphics {
},
multiview: None
});
let vertex_buffer = device.create_buffer_init(&BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(VERTICES),
usage: BufferUsages::VERTEX
});
let desired_size = window.inner_size();
Self {
instance,
@ -197,6 +207,7 @@ impl Graphics {
queue,
shader,
pipeline,
vertex_buffer,
surface_stale: true,
desired_size,
dither_bind_group,
@ -207,14 +218,13 @@ impl Graphics {
}
fn reconfigure_surface(&self, size: winit::dpi::PhysicalSize<u32>) {
log::debug!("Reconfiguring wgpu surface.");
self.surface.configure(&self.device, &SurfaceConfiguration {
usage: TextureUsages::RENDER_ATTACHMENT,
format: self.surface_format,
width: size.width,
height: size.height,
present_mode: PresentMode::Mailbox,
alpha_mode: CompositeAlphaMode::Auto,
view_formats: Vec::from([self.surface_format]),
present_mode: PresentMode::AutoVsync
});
self.uniform_copy_buffer.slice(..).get_mapped_range_mut().copy_from_slice(bytemuck::cast_slice(&[Uniforms {
dimensions: [self.desired_size.width as f32, self.desired_size.height as f32],
@ -225,10 +235,15 @@ impl Graphics {
let mut encoder = self.device.create_command_encoder(&CommandEncoderDescriptor::default());
encoder.copy_buffer_to_buffer(&self.uniform_copy_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as u64);
self.queue.submit(std::iter::once(encoder.finish()));
self.uniform_copy_buffer.slice(..).map_async(MapMode::Write, |err| err.unwrap());
self.uniform_copy_buffer.slice(..).map_async(MapMode::Write, |err| {
if let Err(err) = err {
log::error!("buffer async error: {}", err);
}
});
}
fn reconfigure_surface_if_stale(&mut self) {
log::info!("reconfigure");
if self.surface_stale {
self.reconfigure_surface(self.desired_size);
self.surface_stale = false;
@ -241,6 +256,7 @@ impl Graphics {
}
pub fn draw(&mut self) {
log::info!("redraw");
self.reconfigure_surface_if_stale();
let frame = self.surface.get_current_texture().expect("Failed to get surface texture");
let view = frame.texture.create_view(&TextureViewDescriptor::default());
@ -267,8 +283,8 @@ impl Graphics {
render_pass.set_pipeline(&self.pipeline);
render_pass.set_bind_group(0, &self.dither_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
//render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.draw(0..5 as u32, 0..1);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.draw(0..VERTICES.len() as u32, 0..1);
}
self.queue.submit(std::iter::once(encoder.finish()));
frame.present();

View File

@ -1,17 +0,0 @@
@vertex
fn vs_main(@builtin(vertex_index) ix: u32) -> @builtin(position) vec4<f32> {
var x: f32;
var y: f32;
// (-1, 1), (-1, -1), (1, -1), (1, 1), (-1, 1), triangle strip.
if ix == u32(2) || ix == u32(3) {
x = 1.0;
} else {
x = -1.0;
}
if ix == u32(1) || ix == u32(2) {
y = -1.0;
} else {
y = 1.0;
}
return vec4<f32>(x, y, 0.0, 1.0);
}

122
src/lib.rs Normal file
View File

@ -0,0 +1,122 @@
mod audio;
mod graphics;
use winit::window::WindowBuilder;
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen::prelude::wasm_bindgen]
pub fn main(canvas: web_sys::HtmlCanvasElement) {
console_error_panic_hook::set_once();
wasm_bindgen_futures::spawn_local(_main(move |wb| {
log::info!("callback");
use winit::platform::web::WindowBuilderExtWebSys;
wb.with_canvas(Some(canvas))
}));
}
pub async fn _main<F>(wb_platform_specific: F)
where F: FnOnce(WindowBuilder) -> WindowBuilder
{
setup_logger();
log::info!("main");
use winit::event_loop::EventLoop;
// TODO: class and app id on unix
//use winit::platform::unix::WindowBuilderExtUnix;
let event_loop = EventLoop::new();
let window = wb_platform_specific(WindowBuilder::new())
// Arbitrarily chosen as the minimum resolution the game is designed to support (for e.g. UI scaling).
.with_min_inner_size(winit::dpi::LogicalSize { height: 360, width: 640 })
.with_title("Pathland")
.with_maximized(true)
// TODO: hide window until first frame is drawn (default behavior on wayland)
.with_visible(true)
.with_decorations(true)
.build(&event_loop)
.expect("Failed to create window.");
// TODO: window icon, fullscreen, IME position, cursor grab, cursor visibility
let mut graphics = graphics::Graphics::setup(window).await;
//let audio = audio::Audio::setup();
event_loop.run(move |event, target, control_flow| {
use winit::event::*;
*control_flow = winit::event_loop::ControlFlow::Wait;
match event {
Event::WindowEvent { window_id, event } => {
match event {
WindowEvent::CloseRequested => {
std::process::exit(0);
},
WindowEvent::Destroyed => {
std::process::exit(0);
},
WindowEvent::Focused(focused) => {
// TODO: handle focus/unfocus (e.g. pause, resume)
},
WindowEvent::Resized(new_size) => {
graphics.window_resized(new_size)
},
WindowEvent::ScaleFactorChanged { new_inner_size: new_size, .. } => {
graphics.window_resized(*new_size)
},
// TODO: handle user input
_ => {}
}
},
Event::DeviceEvent { device_id, event } => {
// TODO: handle user input
},
Event::MainEventsCleared => {
// TODO: main event loop. queue simulation calculations, screen redrawing, etc.
},
Event::RedrawRequested(_) => {
graphics.draw();
},
Event::LoopDestroyed => {
std::process::exit(0);
},
_ => {}
}
// TODO: What is suspending/resuming? Do I want to support it?
});
}
#[cfg(not(target_arch = "wasm32"))]
fn setup_logger() {
use fern::Dispatch;
use fern::colors::ColoredLevelConfig;
use log::LevelFilter;
Dispatch::new()
.chain(
Dispatch::new()
.format(|out, message, record| {
out.finish(format_args!(
"[{}] {}",
ColoredLevelConfig::default().color(record.level()),
message
));
})
.level(LevelFilter::Warn)
.level_for("pathland", LevelFilter::Info)
.chain(std::io::stderr()))
.chain(
fern::Dispatch::new()
.format(|out, message, record| {
out.finish(format_args!(
"[{}] {}",
record.level(),
message
))
})
.level(LevelFilter::Debug)
.level_for("pathland", LevelFilter::Trace)
// FIXME: linux-specific path
.chain(std::fs::OpenOptions::new().write(true).create(true).truncate(true).open("/tmp/pathland.log").unwrap()))
.apply().unwrap();
}
#[cfg(target_arch = "wasm32")]
fn setup_logger() {
console_log::init().unwrap();
}

View File

@ -1,104 +1,19 @@
mod audio;
mod graphics;
use winit::window::WindowBuilder;
use std::sync::{Arc, RwLock};
#[tokio::main(flavor = "current_thread")]
async fn main() {
setup_logger();
use winit::event_loop::EventLoop;
//use winit::platform::unix::WindowBuilderExtUnix;
let event_loop = EventLoop::new();
let window = winit::window::WindowBuilder::new()
// Arbitrarily chosen as the minimum resolution the game is designed to support (for e.g. UI scaling).
.with_min_inner_size(winit::dpi::LogicalSize { height: 360, width: 640 })
.with_title("Pathland")
.with_maximized(true)
// TODO: hide window until first frame is drawn (default behavior on wayland)
.with_visible(true)
.with_decorations(true)
//.with_class("pathland".to_string(), "pathland".to_string())
//.with_app_id("pathland".to_string())
.build(&event_loop)
.expect("Failed to create window.");
// TODO: window icon, fullscreen, IME position, cursor grab, cursor visibility
let mut graphics = graphics::Graphics::setup(window).await;
//let audio = audio::Audio::setup();
event_loop.run(move |event, target, control_flow| {
use winit::event::*;
*control_flow = winit::event_loop::ControlFlow::Wait;
match event {
Event::WindowEvent { window_id, event } => {
match event {
WindowEvent::CloseRequested => {
std::process::exit(0);
},
WindowEvent::Destroyed => {
std::process::exit(0);
},
WindowEvent::Focused(focused) => {
// TODO: handle focus/unfocus (e.g. pause, resume)
},
WindowEvent::Resized(new_size) => {
graphics.window_resized(new_size)
},
WindowEvent::ScaleFactorChanged { new_inner_size: new_size, .. } => {
graphics.window_resized(*new_size)
},
// TODO: handle user input
_ => {}
}
},
Event::DeviceEvent { device_id, event } => {
// TODO: handle user input
},
Event::MainEventsCleared => {
// TODO: main event loop. queue simulation calculations, screen redrawing, etc.
},
Event::RedrawRequested(_) => {
graphics.draw();
},
Event::LoopDestroyed => {
std::process::exit(0);
},
_ => {}
}
// TODO: What is suspending/resuming? Do I want to support it?
});
fn main() {
tokio::runtime::Builder::new_current_thread().build().unwrap().block_on(pathland::_main(wb_platform_specific));
}
fn setup_logger() {
use fern::Dispatch;
use fern::colors::ColoredLevelConfig;
use log::LevelFilter;
#[cfg(unix)]
fn wb_platform_specific(wb: WindowBuilder) -> WindowBuilder {
use winit::platform::unix::WindowBuilderExtUnix;
wb
.with_class("pathland".to_string(), "pathland".to_string())
.with_app_id("pathland".to_string())
Dispatch::new()
.chain(
Dispatch::new()
.format(|out, message, record| {
out.finish(format_args!(
"[{}] {}",
ColoredLevelConfig::default().color(record.level()),
message
));
})
.level(LevelFilter::Warn)
.level_for("pathland", LevelFilter::Info)
.chain(std::io::stderr()))
.chain(
fern::Dispatch::new()
.format(|out, message, record| {
out.finish(format_args!(
"[{}] {}",
record.level(),
message
))
})
.level(LevelFilter::Debug)
.level_for("pathland", LevelFilter::Trace)
.chain(std::fs::OpenOptions::new().write(true).create(true).truncate(true).open("/tmp/pathland.log").unwrap()))
.apply().unwrap();
}
#[cfg(not(unix))]
fn wb_platform_specific(wb: WindowBuilder) -> WindowBuilder {
wb
}

View File

@ -1,3 +1,20 @@
struct VertexInput {
@location(0) position: vec2<f32>,
}
struct VertexOutput {
@builtin(position) position: vec4<f32>,
}
@vertex
fn vs_main(
in: VertexInput,
) -> VertexOutput {
var out: VertexOutput;
out.position = vec4<f32>(in.position, 0.0, 1.0);
return out;
}
struct Uniforms {
dimensions: vec2<f32>,
field_of_view: f32,
@ -7,57 +24,103 @@ struct Uniforms {
@binding(0)
var<uniform> uniforms: Uniforms;
const PI: f32 = 3.14159265358979323846264338327950288; // 3.14159274
let PI: f32 = 3.14159265358979323846264338327950288; // 3.14159274
struct Ray {
pos: vec3<f32>, // POSition (aka the origin)
dir: vec3<f32>, // DIRection (normalized)
}
/// Map a rectangle with the provided dimensions onto a square from (-1,-1) to (1,1).
/// This is a linear scaling transformation. Some of the output square will
/// be cropped if the rectangle dimensions are not square.
///
/// We use this function because the coordinates provided to our shader are
/// pixel coordinates, but we want the shader to behave the same way
/// regardless of the shape or size of the window.
fn rectangle_to_square(rect: vec2<f32>, dims: vec2<f32>) -> vec2<f32> {
var sq = rect / dims * 2.0 - 1.0;
// Scale the rectangle's smaller aspect ratio to make the coordinates square.
// For example, a 16:9 rectangle will have an x coordinate from -1 to 1 and
/// Convert from pixel coordinates to window-independent square coordinates.
///
/// Input coordinates:
/// x: from 0 (left) to dimensions.x (right)
/// y: from 0 (bottom) to dimensions.y (top)
///
/// Output coordinates:
/// x: from -1 (left) to 1 (right)
/// y: from -1 (down) to 1 (up)
///
/// The output coordinates are square and independent of the
/// window's dimensions and aspect ratio. Some of the image
/// will be cropped if the window's aspect ratio is not square.
fn pixel_to_square(pixel: vec2<f32>) -> vec2<f32> {
let square = ((pixel / uniforms.dimensions) - 0.5) * 2.0;
// Scale the window's smaller aspect ratio to make the coordinates square.
// For example, a 16:9 window will have an x coordinate from -1 to 1 and
// a y coordinate from -9/16ths to 9/16ths. The rest of the image lying outside
// of that range will be cropped out.
if (dims.x > dims.y) {
return vec2<f32>(sq.x, sq.y * dims.y / dims.x);
if (uniforms.dimensions.x > uniforms.dimensions.y) {
return vec2<f32>(square.x, square.y * uniforms.dimensions.y / uniforms.dimensions.x);
} else {
return vec2<f32>(sq.x * dims.x / dims.y, sq.y);
return vec2<f32>(square.x * uniforms.dimensions.x / uniforms.dimensions.y, square.y);
}
}
/// Map from a square grid to the surface of a (double-covered) sphere.
/// We use this as a (curvilinear) perspective projection for the camera.
fn project(grid: vec2<f32>) -> vec3<f32> {
// The real plane is the product of two lines, R x R, and the torus
// is the product of two circles, S^1 x S^1. Therefore, we can map
// from the real plane to the torus by taking each axis modulo tau.
/// Project a coordinate on the unit circle onto the unit hemisphere.
/// This is used for curvilinear perspective.
///
/// Coordinates:
/// x: from -1 (90 degrees left) to 1 (90 degrees right)
/// y: from -1 (90 degrees down) to 1 (90 degrees up)
///
/// TODO: add support for the usual, non-curvilinear perspective projection
/// (and possibly other projections, just for fun?)
fn project(coord_: vec2<f32>) -> vec3<f32> {
var coord = coord_;
// This projection only supports coordinates within the unit circle
// and only projects into the unit hemisphere. Ideally we'd want
// some sort of extension which takes points outside the unit circle
// and projects them somewhere behind you (with the point at infinity
// being directly behind you), but I haven't come up with any reasonable
// extension of this perspective system which behaves in that manner.
//
// If we set the major radius of the torus to 0, then the torus
// becomes a double-covered sphere. The points on the double-covered
// sphere are identical to the points on the regular sphere,
// but the coordinate system is different:
//
// Rotate left-right and what you see behind you is backwards; rotate
// up-down and what you see is upside-down. This means that the projection
// is continous, and a translation on the grid corresponds with
// a rotation of the camera. (Compare with the behavior of a mirror,
// which is z-inverted, resulting in text appearing backwards.)
// What we can do instead is *tile* the projection so that adjacent projections
// are a mirrored projection of the unit hemisphere *behind* you.
// This is a logical extension because the projection becomes continuous
// along the x and y axis (you're just looking around in perfect circles),
// and it allows you to view the entire space. The main problem to this approach
// is that all of the space between the tiled circles is still undefined,
// but this is still the best solution which I'm aware of.
// The parametric definition of a torus with R = 0 and r = 1.
return vec3<f32>(
cos(grid.x) * cos(grid.y),
cos(grid.x) * sin(grid.y),
sin(grid.x)
);
var dir: f32 = 1.; // the sign of the direction we're facing: 1 forward, -1 backward.
// Tile coordinates:
// (0-2, 0-2): forward
// (2-4, 0-2): backward, left/right mirrored
// (0-2, 2-4): backward, up/down mirrored
// (2-4, 2-4): forward, left/right and up/down mirrored
// FIXME: Use modulus which handles negatives properly so I don't have to arbitrarily add 8.
coord = (coord + 1. + 8.) % 4.;
// mirror/reverse and map back into 0 to 2 range
if (coord.x > 2.) {
coord.x = 4. - coord.x;
dir = -dir;
}
if (coord.y > 2.) {
coord.y = 4. - coord.y;
dir = -dir;
}
// map back into -1 to 1 range
coord = coord - 1.;
// Avoid NaN because implementations are allowed to assume it won't occur.
let preZ = 1. - coord.x*coord.x - coord.y*coord.y;
// We can "define" the remaining undefined region of the screen
// by clamping it to the nearest unit circle. This is sometimes
// better than nothing, though it can also be a lot worse because
// we still have to actually *render* all of those pixels.
// TODO: Add an option to allow stretching into a square instead of clamping?
// I imagine things could get pretty badly warped, but maybe it could be useful?
// TODO: Is this clamping behavior correct? It doesn't look like it actually is, tbh.
if (preZ < 0.) {
return vec3<f32>(normalize(coord), 0.);
}
return normalize(vec3<f32>(coord, dir*sqrt(preZ)));
}
/// After converting pixel coordinates to screen coordinates, we still have a problem:
@ -68,7 +131,7 @@ fn camera_project(square: vec2<f32>) -> Ray {
// Our coordinates already range from -1 to 1, corresponding with the
// edges of the window, but we want the edges of the window to correspond
// with the angle of the FOV instead.
let circle = square * uniforms.field_of_view;
let circle = square * uniforms.field_of_view / PI;
let sphere = project(circle);
return Ray(vec3<f32>(0.), sphere);
}
@ -120,8 +183,8 @@ fn clamp_value(_color: vec3<f32>) -> vec3<f32> {
}
@fragment
fn fs_main(@builtin(position) position: vec4<f32>) -> @location(0) vec4<f32> {
let ray = camera_project(rectangle_to_square(position.xy, uniforms.dimensions));
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
let ray = camera_project(pixel_to_square(in.position.xy));
var color = ray.dir / 2.0 + 0.5;
// TODO: Separate postprocessing pass.
@ -151,7 +214,7 @@ fn fs_main(@builtin(position) position: vec4<f32>) -> @location(0) vec4<f32> {
//
// FIXME: This shader uses linear color space.
return dither(
vec2<u32>(u32(position.x), u32(position.y)),
vec2<u32>(u32(in.position.x), u32(in.position.y)),
vec4<f32>(color, 1.0)
);
}

5
www/bootstrap.js Normal file
View File

@ -0,0 +1,5 @@
// A dependency graph that contains any wasm must all be imported
// asynchronously. This `bootstrap.js` file does the single async import, so
// that no one else needs to worry about it again.
import("./index.js")
.catch(e => console.error("Error importing `index.js`:", e));

7
www/index.html Normal file
View File

@ -0,0 +1,7 @@
<!DOCTYPE html>
<html lang="en">
<meta charset="utf-8">
<title>pathland</title>
<noscript>This page contains webassembly and javascript content, please enable javascript in your browser.</noscript>
<canvas id="canvas"></canvas>
<script src="bootstrap.js"></script>

4
www/index.js Normal file
View File

@ -0,0 +1,4 @@
import * as pathland from "pathland";
let canvas = document.getElementById("canvas");
pathland.main(document.getElementById("canvas"));

7604
www/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

28
www/package.json Normal file
View File

@ -0,0 +1,28 @@
{
"name": "pathland",
"version": "0.1.0",
"main": "index.js",
"scripts": {
"build": "webpack --config webpack.config.js",
"start": "webpack-dev-server"
},
"repository": {
"type": "git",
"url": "git+https://github.com/jamestmartin/pathland"
},
"keywords": [
"webassembly",
"wasm",
"rust",
"webpack"
],
"dependencies": {
"pathland": "file:../pkg"
},
"devDependencies": {
"webpack": "^5.73.0",
"webpack-cli": "^4.10.0",
"webpack-dev-server": "^4.9.3",
"copy-webpack-plugin": "^5.1.1"
}
}

18
www/webpack.config.js Normal file
View File

@ -0,0 +1,18 @@
const CopyWebpackPlugin = require("copy-webpack-plugin");
const path = require('path');
module.exports = {
entry: "./bootstrap.js",
output: {
path: path.resolve(__dirname, "dist"),
filename: "bootstrap.js",
hashFunction: "xxhash64",
},
mode: "development",
plugins: [
new CopyWebpackPlugin(['index.html'])
],
experiments: {
syncWebAssembly: true
}
};