You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
474 lines
18 KiB
474 lines
18 KiB
const std = @import("std"); |
|
|
|
const zigimg = @import("zigimg"); |
|
const Rgb24 = zigimg.color.Rgb24; |
|
const Rgba32 = zigimg.color.Rgba32; |
|
|
|
const core = @import("mach").core; |
|
const gpu = core.gpu; |
|
|
|
const zm = @import("zmath"); |
|
const vec = zm.f32x4; |
|
const Mat = zm.Mat; |
|
|
|
const App = @import("./main.zig"); |
|
|
|
const primitives = @import("./primitives.zig"); |
|
const VertexData = primitives.VertexData; |
|
const PrimitiveData = primitives.PrimitiveData; |
|
|
|
const flecszigble = @import("flecs-zig-ble"); |
|
const Context = flecszigble.Context(void); |
|
const Entity = Context.Entity; |
|
const Iter = Context.Iter; |
|
|
|
const flecs = flecszigble.flecs; |
|
const OnStore = flecs.pipeline.OnStore; |
|
|
|
const Transform = struct { value: Mat }; |
|
const CameraPerspective = struct { |
|
/// Vertical field of view (in degrees). |
|
field_of_view: f32, |
|
/// The near clip plane distance. Objects closer than this value are |
|
/// "cut off". Should be set to a positive value close to zero, depending |
|
/// on how close rendered objects typically get to the camera. |
|
near_plane: f32, |
|
/// The far clip place distance. Objects further than this value are |
|
/// "cut off" and therefore won't be visible. Should be set to a larger |
|
/// positive value, depending on how far objects can get from the camera. |
|
far_plane: f32, |
|
}; |
|
|
|
/// Holds data needed to render an object in a rendering pass. |
|
const ObjectData = struct { |
|
/// Bind group which associates model-related buffers with parameters |
|
/// in our shader. This one is accessible via `@group(1)` in our shader. |
|
model_bind_group: *gpu.BindGroup, |
|
/// Reference to the primitive (shape or model) to render for this object. |
|
primitive: *PrimitiveData, |
|
}; |
|
|
|
const Renderer = @This(); |
|
|
|
app: *App, |
|
time: f32 = 0.0, |
|
|
|
pipeline: *gpu.RenderPipeline, |
|
view_proj_buffer: *gpu.Buffer, |
|
camera_bind_group: *gpu.BindGroup, |
|
|
|
depth_texture: ?*gpu.Texture = null, |
|
depth_texture_view: ?*gpu.TextureView = null, |
|
|
|
primitive_data: []PrimitiveData, |
|
object_data: []ObjectData, |
|
|
|
pub fn init(app: *App) !*Renderer { |
|
// A string buffer used to format objects' labels. |
|
var label_buffer: [256]u8 = undefined; |
|
|
|
// Embed `default.png` texture in the executable and upload it to the GPU. |
|
const texture_bytes = @embedFile("./gfx/default.png"); |
|
const texture_view = try loadTexture(app.allocator, texture_bytes); |
|
defer texture_view.release(); |
|
|
|
const shader_module = core.device.createShaderModuleWGSL("shader.wgsl", @embedFile("shader.wgsl")); |
|
defer shader_module.release(); |
|
|
|
// Define layouts for our bind groups and pipeline. |
|
// This helps find errors with missing or mismatching shader properties. |
|
var camera_bind_group_layout = core.device.createBindGroupLayout(&gpu.BindGroupLayout.Descriptor.init(.{ |
|
.label = "Camera Bind Group Layout", |
|
.entries = &.{ |
|
gpu.BindGroupLayout.Entry.buffer(0, .{ .vertex = true }, .uniform, false, 0), |
|
}, |
|
})); |
|
defer camera_bind_group_layout.release(); |
|
|
|
var model_bind_group_layout = core.device.createBindGroupLayout(&gpu.BindGroupLayout.Descriptor.init(.{ |
|
.label = "Model Bind Group Layout", |
|
.entries = &.{ |
|
gpu.BindGroupLayout.Entry.sampler(0, .{ .fragment = true }, .filtering), |
|
gpu.BindGroupLayout.Entry.texture(1, .{ .fragment = true }, .float, .dimension_2d, false), |
|
gpu.BindGroupLayout.Entry.buffer(2, .{ .vertex = true }, .uniform, false, 0), |
|
gpu.BindGroupLayout.Entry.buffer(3, .{ .vertex = true }, .uniform, false, 0), |
|
}, |
|
})); |
|
defer model_bind_group_layout.release(); |
|
|
|
const pipeline_layout = core.device.createPipelineLayout(&gpu.PipelineLayout.Descriptor.init(.{ |
|
.bind_group_layouts = &.{ |
|
camera_bind_group_layout, |
|
model_bind_group_layout, |
|
}, |
|
})); |
|
defer pipeline_layout.release(); |
|
|
|
// Set up rendering pipeline. |
|
const pipeline = core.device.createRenderPipeline(&.{ |
|
.layout = pipeline_layout, |
|
.vertex = gpu.VertexState.init(.{ |
|
.module = shader_module, |
|
.entry_point = "vertex_main", |
|
.buffers = &.{ |
|
gpu.VertexBufferLayout.init(.{ |
|
.array_stride = @sizeOf(VertexData), |
|
.step_mode = .vertex, |
|
.attributes = &.{ |
|
.{ .format = .float32x3, .shader_location = 0, .offset = @offsetOf(VertexData, "position") }, |
|
.{ .format = .float32x2, .shader_location = 1, .offset = @offsetOf(VertexData, "uv") }, |
|
}, |
|
}), |
|
}, |
|
}), |
|
.primitive = .{ |
|
.topology = .triangle_list, |
|
.front_face = .ccw, |
|
.cull_mode = .back, |
|
}, |
|
.depth_stencil = &.{ |
|
.format = .depth24_plus, |
|
.depth_write_enabled = .true, |
|
.depth_compare = .less, |
|
}, |
|
.fragment = &gpu.FragmentState.init(.{ |
|
.module = shader_module, |
|
.entry_point = "frag_main", |
|
.targets = &.{.{ .format = core.descriptor.format }}, |
|
}), |
|
}); |
|
|
|
// Set up scene related uniform buffers and bind groups. |
|
const view_proj_buffer = createAndWriteBuffer(zm.Mat, &.{zm.identity()}, .{ .copy_dst = true, .uniform = true }); |
|
|
|
// "Bind groups" are used to associate data from buffers with shader parameters. |
|
// So for example the `camera_bind_group` is accessible via `@group(0)` in our shader. |
|
const camera_bind_group = core.device.createBindGroup(&gpu.BindGroup.Descriptor.init(.{ |
|
.label = "Camera Bind Group", |
|
.layout = camera_bind_group_layout, |
|
.entries = &.{ |
|
gpu.BindGroup.Entry.buffer(0, view_proj_buffer, 0, @sizeOf(zm.Mat)), |
|
}, |
|
})); |
|
|
|
// Create a sampler that tells the GPU how to sample pixels from a texture. |
|
// Includes filtering (nearest-neighbor, linear, ..) and wrapping behavior. |
|
const texture_sampler = core.device.createSampler(&.{}); |
|
defer texture_sampler.release(); |
|
|
|
// Set up the primitives we want to render. |
|
// |
|
// Using `dupe` to allocate a slice here allows easily adjusting the |
|
// primitives to use, without changing the type of `primitive_data`. |
|
const primitive_data = try app.allocator.dupe(PrimitiveData, &.{ |
|
// primitives.createTrianglePrimitive(1.0), |
|
// primitives.createSquarePrimitive(0.8), |
|
// primitives.createCirclePrimitive(0.5, 24), |
|
primitives.createCubePrimitive(0.65), |
|
primitives.createPyramidPrimitive(0.75), |
|
}); |
|
|
|
// Set up object related uniform buffers and bind groups. |
|
// This uploads data to the GPU about all the object we |
|
// want to render, such as their location and color. |
|
const grid_size = 8; |
|
|
|
// Allocate a slice to store as many ObjectData as we want to create. |
|
// |
|
// Using a slice instead of an array means that we could change how |
|
// many object we want to render at compile time, however it requires |
|
// allocating, and later freeing, memory to store the slice. |
|
const object_data = try app.allocator.alloc(ObjectData, grid_size * grid_size); |
|
|
|
// Note that for loops in Zig are a little different than you might |
|
// know from other languages. They only look over arrays, slices, |
|
// tuples and ranges, potentially multiple at once. |
|
for (object_data, 0..) |*object, i| { |
|
const grid_max: f32 = @floatFromInt(grid_size - 1); |
|
const x = @as(f32, @floatFromInt(i % grid_size)) / grid_max; |
|
const z = @as(f32, @floatFromInt(i / grid_size)) / grid_max; |
|
|
|
const rotation = zm.rotationY(std.math.tau * (x + z) / 2.0); |
|
const translation = zm.translation((x - 0.5) * grid_size, 0, (z - 0.5) * grid_size); |
|
const model_matrix = zm.mul(rotation, translation); |
|
|
|
// Make the object have a color depending on its location in the grid. |
|
// These values are layed out so each corner is red, green, blue and black. |
|
const model_color = .{ |
|
std.math.clamp(1.0 - x - z, 0.0, 1.0), |
|
std.math.clamp(x - z, 0.0, 1.0), |
|
std.math.clamp(z - x, 0.0, 1.0), |
|
}; |
|
|
|
const model_matrix_buffer = createAndWriteBuffer(zm.Mat, &.{zm.transpose(model_matrix)}, .{ .copy_dst = true, .uniform = true }); |
|
defer model_matrix_buffer.release(); |
|
|
|
const model_color_buffer = createAndWriteBuffer([3]f32, &.{model_color}, .{ .copy_dst = true, .uniform = true }); |
|
defer model_color_buffer.release(); |
|
|
|
const model_bind_group_label = try std.fmt.bufPrintZ(&label_buffer, "Model Bind Group {d}", .{i}); |
|
const model_bind_group = core.device.createBindGroup(&gpu.BindGroup.Descriptor.init(.{ |
|
.label = model_bind_group_label, |
|
.layout = model_bind_group_layout, |
|
.entries = &.{ |
|
gpu.BindGroup.Entry.sampler(0, texture_sampler), |
|
gpu.BindGroup.Entry.textureView(1, texture_view), |
|
gpu.BindGroup.Entry.buffer(2, model_matrix_buffer, 0, @sizeOf(zm.Mat)), |
|
gpu.BindGroup.Entry.buffer(3, model_color_buffer, 0, @sizeOf([3]f32)), |
|
}, |
|
})); |
|
|
|
// Pick a "random" primitive to use for this object. |
|
const primitive_index = app.random.int(usize) % primitive_data.len; |
|
const primitive = &primitive_data[primitive_index]; |
|
|
|
// The `*object` syntax gets us a pointer to each element in the |
|
// `object_data` slice, allowing us to override it within the loop. |
|
object.* = .{ |
|
.model_bind_group = model_bind_group, |
|
.primitive = primitive, |
|
}; |
|
} |
|
|
|
// Register components necessary for the camera. |
|
_ = try app.world.component("Transform", Transform); |
|
_ = try app.world.component("CameraPerspective", CameraPerspective); |
|
|
|
const camera_entity = try app.world.entity( |
|
.{ .name = "Camera", .symbol = "Camera" }, |
|
.{ Transform, CameraPerspective }, |
|
); |
|
camera_entity.set(CameraPerspective, .{ |
|
.field_of_view = 45.0, |
|
.near_plane = 0.05, |
|
.far_plane = 80.0, |
|
}); |
|
|
|
const render_expr = "App, [in] CameraPerspective(Camera), [out] Transform(Camera)"; |
|
_ = try app.world.system("Render", render, OnStore, render_expr); |
|
|
|
const result = try app.allocator.create(Renderer); |
|
result.* = .{ |
|
.app = app, |
|
.pipeline = pipeline, |
|
.view_proj_buffer = view_proj_buffer, |
|
.camera_bind_group = camera_bind_group, |
|
.primitive_data = primitive_data, |
|
.object_data = object_data, |
|
}; |
|
|
|
// Initialize the depth texture. |
|
// This is called also whenever the window is resized. |
|
result.recreateDepthTexture(); |
|
|
|
return result; |
|
} |
|
|
|
pub fn deinit(self: *Renderer) void { |
|
// Using `defer` here, so we can specify resources we |
|
// want to free in the order they were created in `init`. |
|
defer self.app.allocator.destroy(self); |
|
|
|
defer self.pipeline.release(); |
|
defer self.view_proj_buffer.release(); |
|
defer self.camera_bind_group.release(); |
|
|
|
defer self.app.allocator.free(self.primitive_data); |
|
defer for (self.primitive_data) |p| { |
|
p.vertex_buffer.release(); |
|
p.index_buffer.release(); |
|
}; |
|
defer self.app.allocator.free(self.object_data); |
|
defer for (self.object_data) |o| { |
|
o.model_bind_group.release(); |
|
}; |
|
|
|
defer if (self.depth_texture) |t| t.release(); |
|
defer if (self.depth_texture_view) |v| v.release(); |
|
} |
|
|
|
pub fn resize(self: *Renderer) void { |
|
// Recreate depth texture with the proper size, otherwise |
|
// the application may crash when the window is resized. |
|
self.recreateDepthTexture(); |
|
} |
|
|
|
pub fn render(it: Iter) void { |
|
const app = it.field(*App, 1)[0]; |
|
const camera_perspective = it.field(CameraPerspective, 2)[0]; |
|
const camera_transform = &it.field(Transform, 3)[0]; |
|
|
|
const self = app.renderer; |
|
self.time += it.deltaTime(); |
|
|
|
// Set up a view matrix from the camera transform. |
|
// This moves everything to be relative to the camera. |
|
// TODO: Actually implement camera transform instead of hardcoding a look-at matrix. |
|
// const view_matrix = zm.inverse(app.camera_transform); |
|
const camera_distance = 8.0; |
|
const x = @cos(self.time * std.math.tau / 20) * camera_distance; |
|
const z = @sin(self.time * std.math.tau / 20) * camera_distance; |
|
const camera_pos = vec(x, 2.0, z, 1.0); |
|
const view_matrix = zm.lookAtLh(camera_pos, vec(0, 0, 0, 1), vec(0, 1, 0, 1)); |
|
|
|
// Setting the transform here doesn't do anything because it's not used |
|
// anywhere. In the future we would want to set the camera transform |
|
// outside of the rendering step, and then get and use it here, instead. |
|
camera_transform.* = .{ .value = view_matrix }; |
|
// TODO: Not sure if this is the proper transform, or actually inverted. |
|
|
|
// Set up a projection matrix using the size of the window. |
|
// The perspective projection will make things further away appear smaller. |
|
const width: f32 = @floatFromInt(core.descriptor.width); |
|
const height: f32 = @floatFromInt(core.descriptor.height); |
|
const proj_matrix = zm.perspectiveFovLh( |
|
std.math.degreesToRadians(f32, camera_perspective.field_of_view), |
|
width / height, |
|
camera_perspective.near_plane, |
|
camera_perspective.far_plane, |
|
); |
|
|
|
const view_proj_matrix = zm.mul(view_matrix, proj_matrix); |
|
|
|
// Get back buffer texture to render to. |
|
const back_buffer_view = core.swap_chain.getCurrentTextureView().?; |
|
defer back_buffer_view.release(); |
|
// Once rendering is done (hence `defer`), swap back buffer to the front to display. |
|
defer core.swap_chain.present(); |
|
|
|
const render_pass_info = gpu.RenderPassDescriptor.init(.{ |
|
.color_attachments = &.{.{ |
|
.view = back_buffer_view, |
|
.clear_value = std.mem.zeroes(gpu.Color), |
|
.load_op = .clear, |
|
.store_op = .store, |
|
}}, |
|
.depth_stencil_attachment = &.{ |
|
.view = self.depth_texture_view.?, |
|
.depth_load_op = .clear, |
|
.depth_store_op = .store, |
|
.depth_clear_value = 1.0, |
|
}, |
|
}); |
|
|
|
// Create a `WGPUCommandEncoder` which provides an interface for recording GPU commands. |
|
const encoder = core.device.createCommandEncoder(null); |
|
defer encoder.release(); |
|
|
|
// Write to the scene uniform buffer for this set of commands. |
|
encoder.writeBuffer(self.view_proj_buffer, 0, &[_]zm.Mat{ |
|
// All matrices the GPU has to work with need to be transposed, |
|
// because WebGPU uses column-major matrices while zmath is row-major. |
|
zm.transpose(view_proj_matrix), |
|
}); |
|
|
|
{ |
|
const pass = encoder.beginRenderPass(&render_pass_info); |
|
defer pass.release(); |
|
defer pass.end(); |
|
|
|
pass.setPipeline(self.pipeline); |
|
pass.setBindGroup(0, self.camera_bind_group, &.{}); |
|
|
|
for (self.object_data) |object| { |
|
// Set the vertex and index buffer used to render this |
|
// object to the ones from the primitive it wants to use. |
|
const prim = object.primitive; |
|
pass.setVertexBuffer(0, prim.vertex_buffer, 0, prim.vertex_count * @sizeOf(VertexData)); |
|
pass.setIndexBuffer(prim.index_buffer, .uint32, 0, prim.index_count * @sizeOf(u32)); |
|
|
|
// Set the bind group for the object we want to render. |
|
pass.setBindGroup(1, object.model_bind_group, &.{}); |
|
|
|
// Draw a number of triangles as specified in the index buffer. |
|
pass.drawIndexed(prim.index_count, 1, 0, 0, 0); |
|
} |
|
} |
|
|
|
// Finish recording commands, creating a `WGPUCommandBuffer`. |
|
var command = encoder.finish(null); |
|
defer command.release(); |
|
|
|
// Submit the command(s) to the GPU. |
|
core.queue.submit(&.{command}); |
|
} |
|
|
|
/// Loads a texture from the provided buffer and uploads it to the GPU. |
|
pub fn loadTexture(allocator: std.mem.Allocator, buffer: []const u8) !*gpu.TextureView { |
|
var img = try zigimg.Image.fromMemory(allocator, buffer); |
|
defer img.deinit(); |
|
const img_size = gpu.Extent3D{ |
|
.width = @intCast(img.width), |
|
.height = @intCast(img.height), |
|
}; |
|
|
|
const texture = core.device.createTexture(&.{ |
|
.size = img_size, |
|
.format = .rgba8_unorm, |
|
.usage = .{ |
|
.texture_binding = true, |
|
.copy_dst = true, |
|
.render_attachment = true, |
|
}, |
|
}); |
|
defer texture.release(); |
|
|
|
const data_layout = gpu.Texture.DataLayout{ |
|
.bytes_per_row = @intCast(img.width * 4), |
|
.rows_per_image = @intCast(img.height), |
|
}; |
|
switch (img.pixels) { |
|
.rgba32 => |pixels| { |
|
core.queue.writeTexture(&.{ .texture = texture }, &data_layout, &img_size, pixels); |
|
}, |
|
.rgb24 => |pixels_rgb24| { |
|
const pixels = try rgb24ToRgba32(allocator, pixels_rgb24); |
|
defer allocator.free(pixels); |
|
core.queue.writeTexture(&.{ .texture = texture }, &data_layout, &img_size, pixels); |
|
}, |
|
else => std.debug.panic("Unsupported image color format {s}", .{@tagName(img.pixels)}), |
|
} |
|
|
|
return texture.createView(&.{}); |
|
} |
|
|
|
/// Converts a raw 24-bit RGB pixel buffer to 32-bit RGBA. |
|
fn rgb24ToRgba32(allocator: std.mem.Allocator, in: []Rgb24) ![]Rgba32 { |
|
const out = try allocator.alloc(Rgba32, in.len); |
|
for (in, out) |src, *dest| |
|
dest.* = .{ .r = src.r, .g = src.g, .b = src.b, .a = 255 }; |
|
return out; |
|
} |
|
|
|
/// Creates a depth texture. This is used to ensure that when things are |
|
/// rendered, an object behind another won't draw over one in front, simply |
|
/// because it was rendered at a later point in time. |
|
pub fn recreateDepthTexture(self: *Renderer) void { |
|
// Release previous depth butter and view, if any. |
|
if (self.depth_texture) |t| t.release(); |
|
if (self.depth_texture_view) |v| v.release(); |
|
|
|
self.depth_texture = core.device.createTexture(&.{ |
|
.label = "Depth Texture", |
|
.usage = .{ .render_attachment = true }, |
|
.size = .{ .width = core.descriptor.width, .height = core.descriptor.height }, |
|
.format = .depth24_plus, |
|
}); |
|
self.depth_texture_view = self.depth_texture.?.createView(null); |
|
} |
|
|
|
/// Creates a buffer on the GPU with the specified usage |
|
/// flags and immediately fills it with the provided data. |
|
pub fn createAndWriteBuffer( |
|
comptime T: type, |
|
data: []const T, |
|
usage: gpu.Buffer.UsageFlags, |
|
) *gpu.Buffer { |
|
const buffer = core.device.createBuffer(&.{ |
|
.size = data.len * @sizeOf(T), |
|
.usage = usage, |
|
.mapped_at_creation = .false, |
|
}); |
|
core.queue.writeBuffer(buffer, 0, data); |
|
return buffer; |
|
}
|
|
|