|
|
|
@ -11,20 +11,19 @@ const zm = @import("zmath"); |
|
|
|
|
const vec = zm.f32x4; |
|
|
|
|
const Mat = zm.Mat; |
|
|
|
|
|
|
|
|
|
const App = @import("./main.zig"); |
|
|
|
|
const App = @import("./App.zig"); |
|
|
|
|
|
|
|
|
|
const primitives = @import("./primitives.zig"); |
|
|
|
|
const VertexData = primitives.VertexData; |
|
|
|
|
const PrimitiveData = primitives.PrimitiveData; |
|
|
|
|
|
|
|
|
|
const flecszigble = @import("flecs-zig-ble"); |
|
|
|
|
const flecs = flecszigble.flecs; |
|
|
|
|
|
|
|
|
|
const Context = flecszigble.Context(void); |
|
|
|
|
const Entity = Context.Entity; |
|
|
|
|
const Iter = Context.Iter; |
|
|
|
|
|
|
|
|
|
const flecs = flecszigble.flecs; |
|
|
|
|
const OnStore = flecs.pipeline.OnStore; |
|
|
|
|
|
|
|
|
|
const Transform = struct { value: Mat }; |
|
|
|
|
const CameraPerspective = struct { |
|
|
|
|
/// Vertical field of view (in degrees). |
|
|
|
@ -231,8 +230,8 @@ pub fn init(app: *App) !*Renderer { |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Register components necessary for the camera. |
|
|
|
|
_ = try app.world.component("Transform", Transform); |
|
|
|
|
_ = try app.world.component("CameraPerspective", CameraPerspective); |
|
|
|
|
_ = try app.world.component(Transform); |
|
|
|
|
_ = try app.world.component(CameraPerspective); |
|
|
|
|
|
|
|
|
|
const camera_entity = try app.world.entity( |
|
|
|
|
.{ .name = "Camera", .symbol = "Camera" }, |
|
|
|
@ -244,8 +243,7 @@ pub fn init(app: *App) !*Renderer { |
|
|
|
|
.far_plane = 80.0, |
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
const render_expr = "App, [in] CameraPerspective(Camera), [out] Transform(Camera)"; |
|
|
|
|
_ = try app.world.system("Render", render, OnStore, render_expr); |
|
|
|
|
_ = try app.world.system(Render); |
|
|
|
|
|
|
|
|
|
const result = try app.allocator.create(Renderer); |
|
|
|
|
result.* = .{ |
|
|
|
@ -293,105 +291,110 @@ pub fn resize(self: *Renderer) void { |
|
|
|
|
self.recreateDepthTexture(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
pub fn render(it: Iter) void { |
|
|
|
|
const app = it.field(*App, 1)[0]; |
|
|
|
|
const camera_perspective = it.field(CameraPerspective, 2)[0]; |
|
|
|
|
const camera_transform = &it.field(Transform, 3)[0]; |
|
|
|
|
|
|
|
|
|
const self = app.renderer; |
|
|
|
|
self.time += it.deltaTime(); |
|
|
|
|
|
|
|
|
|
// Set up a view matrix from the camera transform. |
|
|
|
|
// This moves everything to be relative to the camera. |
|
|
|
|
// TODO: Actually implement camera transform instead of hardcoding a look-at matrix. |
|
|
|
|
// const view_matrix = zm.inverse(app.camera_transform); |
|
|
|
|
const camera_distance = 8.0; |
|
|
|
|
const x = @cos(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
|
const z = @sin(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
|
const camera_pos = vec(x, 2.0, z, 1.0); |
|
|
|
|
const view_matrix = zm.lookAtLh(camera_pos, vec(0, 0, 0, 1), vec(0, 1, 0, 1)); |
|
|
|
|
|
|
|
|
|
// Setting the transform here doesn't do anything because it's not used |
|
|
|
|
// anywhere. In the future we would want to set the camera transform |
|
|
|
|
// outside of the rendering step, and then get and use it here, instead. |
|
|
|
|
camera_transform.* = .{ .value = view_matrix }; |
|
|
|
|
// TODO: Not sure if this is the proper transform, or actually inverted. |
|
|
|
|
|
|
|
|
|
// Set up a projection matrix using the size of the window. |
|
|
|
|
// The perspective projection will make things further away appear smaller. |
|
|
|
|
const width: f32 = @floatFromInt(core.descriptor.width); |
|
|
|
|
const height: f32 = @floatFromInt(core.descriptor.height); |
|
|
|
|
const proj_matrix = zm.perspectiveFovLh( |
|
|
|
|
std.math.degreesToRadians(f32, camera_perspective.field_of_view), |
|
|
|
|
width / height, |
|
|
|
|
camera_perspective.near_plane, |
|
|
|
|
camera_perspective.far_plane, |
|
|
|
|
); |
|
|
|
|
|
|
|
|
|
const view_proj_matrix = zm.mul(view_matrix, proj_matrix); |
|
|
|
|
|
|
|
|
|
// Get back buffer texture to render to. |
|
|
|
|
const back_buffer_view = core.swap_chain.getCurrentTextureView().?; |
|
|
|
|
defer back_buffer_view.release(); |
|
|
|
|
// Once rendering is done (hence `defer`), swap back buffer to the front to display. |
|
|
|
|
defer core.swap_chain.present(); |
|
|
|
|
|
|
|
|
|
const render_pass_info = gpu.RenderPassDescriptor.init(.{ |
|
|
|
|
.color_attachments = &.{.{ |
|
|
|
|
.view = back_buffer_view, |
|
|
|
|
.clear_value = std.mem.zeroes(gpu.Color), |
|
|
|
|
.load_op = .clear, |
|
|
|
|
.store_op = .store, |
|
|
|
|
}}, |
|
|
|
|
.depth_stencil_attachment = &.{ |
|
|
|
|
.view = self.depth_texture_view.?, |
|
|
|
|
.depth_load_op = .clear, |
|
|
|
|
.depth_store_op = .store, |
|
|
|
|
.depth_clear_value = 1.0, |
|
|
|
|
}, |
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
// Create a `WGPUCommandEncoder` which provides an interface for recording GPU commands. |
|
|
|
|
const encoder = core.device.createCommandEncoder(null); |
|
|
|
|
defer encoder.release(); |
|
|
|
|
|
|
|
|
|
// Write to the scene uniform buffer for this set of commands. |
|
|
|
|
encoder.writeBuffer(self.view_proj_buffer, 0, &[_]zm.Mat{ |
|
|
|
|
// All matrices the GPU has to work with need to be transposed, |
|
|
|
|
// because WebGPU uses column-major matrices while zmath is row-major. |
|
|
|
|
zm.transpose(view_proj_matrix), |
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
{ |
|
|
|
|
const pass = encoder.beginRenderPass(&render_pass_info); |
|
|
|
|
defer pass.release(); |
|
|
|
|
defer pass.end(); |
|
|
|
|
|
|
|
|
|
pass.setPipeline(self.pipeline); |
|
|
|
|
pass.setBindGroup(0, self.camera_bind_group, &.{}); |
|
|
|
|
|
|
|
|
|
for (self.object_data) |object| { |
|
|
|
|
// Set the vertex and index buffer used to render this |
|
|
|
|
// object to the ones from the primitive it wants to use. |
|
|
|
|
const prim = object.primitive; |
|
|
|
|
pass.setVertexBuffer(0, prim.vertex_buffer, 0, prim.vertex_count * @sizeOf(VertexData)); |
|
|
|
|
pass.setIndexBuffer(prim.index_buffer, .uint32, 0, prim.index_count * @sizeOf(u32)); |
|
|
|
|
|
|
|
|
|
// Set the bind group for the object we want to render. |
|
|
|
|
pass.setBindGroup(1, object.model_bind_group, &.{}); |
|
|
|
|
|
|
|
|
|
// Draw a number of triangles as specified in the index buffer. |
|
|
|
|
pass.drawIndexed(prim.index_count, 1, 0, 0, 0); |
|
|
|
|
/// System which renders the game world from the camera entity's perspective. |
|
|
|
|
pub const Render = struct { |
|
|
|
|
pub const phase = flecs.pipeline.OnStore; |
|
|
|
|
pub const expr = "App($), [in] CameraPerspective(Camera), [out] Transform(Camera)"; |
|
|
|
|
pub fn callback(it: Iter) void { |
|
|
|
|
const app = it.field(*App, 1)[0]; |
|
|
|
|
const camera_perspective = it.field(CameraPerspective, 2)[0]; |
|
|
|
|
const camera_transform = &it.field(Transform, 3)[0]; |
|
|
|
|
|
|
|
|
|
const self = app.renderer; |
|
|
|
|
self.time += it.deltaTime(); |
|
|
|
|
|
|
|
|
|
// Set up a view matrix from the camera transform. |
|
|
|
|
// This moves everything to be relative to the camera. |
|
|
|
|
// TODO: Actually implement camera transform instead of hardcoding a look-at matrix. |
|
|
|
|
// const view_matrix = zm.inverse(app.camera_transform); |
|
|
|
|
const camera_distance = 8.0; |
|
|
|
|
const x = @cos(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
|
const z = @sin(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
|
const camera_pos = vec(x, 2.0, z, 1.0); |
|
|
|
|
const view_matrix = zm.lookAtLh(camera_pos, vec(0, 0, 0, 1), vec(0, 1, 0, 1)); |
|
|
|
|
|
|
|
|
|
// Setting the transform here doesn't do anything because it's not used |
|
|
|
|
// anywhere. In the future we would want to set the camera transform |
|
|
|
|
// outside of the rendering step, and then get and use it here, instead. |
|
|
|
|
camera_transform.* = .{ .value = view_matrix }; |
|
|
|
|
// TODO: Not sure if this is the proper transform, or actually inverted. |
|
|
|
|
|
|
|
|
|
// Set up a projection matrix using the size of the window. |
|
|
|
|
// The perspective projection will make things further away appear smaller. |
|
|
|
|
const width: f32 = @floatFromInt(core.descriptor.width); |
|
|
|
|
const height: f32 = @floatFromInt(core.descriptor.height); |
|
|
|
|
const proj_matrix = zm.perspectiveFovLh( |
|
|
|
|
std.math.degreesToRadians(f32, camera_perspective.field_of_view), |
|
|
|
|
width / height, |
|
|
|
|
camera_perspective.near_plane, |
|
|
|
|
camera_perspective.far_plane, |
|
|
|
|
); |
|
|
|
|
|
|
|
|
|
const view_proj_matrix = zm.mul(view_matrix, proj_matrix); |
|
|
|
|
|
|
|
|
|
// Get back buffer texture to render to. |
|
|
|
|
const back_buffer_view = core.swap_chain.getCurrentTextureView().?; |
|
|
|
|
defer back_buffer_view.release(); |
|
|
|
|
// Once rendering is done (hence `defer`), swap back buffer to the front to display. |
|
|
|
|
defer core.swap_chain.present(); |
|
|
|
|
|
|
|
|
|
const render_pass_info = gpu.RenderPassDescriptor.init(.{ |
|
|
|
|
.color_attachments = &.{.{ |
|
|
|
|
.view = back_buffer_view, |
|
|
|
|
.clear_value = std.mem.zeroes(gpu.Color), |
|
|
|
|
.load_op = .clear, |
|
|
|
|
.store_op = .store, |
|
|
|
|
}}, |
|
|
|
|
.depth_stencil_attachment = &.{ |
|
|
|
|
.view = self.depth_texture_view.?, |
|
|
|
|
.depth_load_op = .clear, |
|
|
|
|
.depth_store_op = .store, |
|
|
|
|
.depth_clear_value = 1.0, |
|
|
|
|
}, |
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
// Create a `WGPUCommandEncoder` which provides an interface for recording GPU commands. |
|
|
|
|
const encoder = core.device.createCommandEncoder(null); |
|
|
|
|
defer encoder.release(); |
|
|
|
|
|
|
|
|
|
// Write to the scene uniform buffer for this set of commands. |
|
|
|
|
encoder.writeBuffer(self.view_proj_buffer, 0, &[_]zm.Mat{ |
|
|
|
|
// All matrices the GPU has to work with need to be transposed, |
|
|
|
|
// because WebGPU uses column-major matrices while zmath is row-major. |
|
|
|
|
zm.transpose(view_proj_matrix), |
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
{ |
|
|
|
|
const pass = encoder.beginRenderPass(&render_pass_info); |
|
|
|
|
defer pass.release(); |
|
|
|
|
defer pass.end(); |
|
|
|
|
|
|
|
|
|
pass.setPipeline(self.pipeline); |
|
|
|
|
pass.setBindGroup(0, self.camera_bind_group, &.{}); |
|
|
|
|
|
|
|
|
|
for (self.object_data) |object| { |
|
|
|
|
// Set the vertex and index buffer used to render this |
|
|
|
|
// object to the ones from the primitive it wants to use. |
|
|
|
|
const prim = object.primitive; |
|
|
|
|
pass.setVertexBuffer(0, prim.vertex_buffer, 0, prim.vertex_count * @sizeOf(VertexData)); |
|
|
|
|
pass.setIndexBuffer(prim.index_buffer, .uint32, 0, prim.index_count * @sizeOf(u32)); |
|
|
|
|
|
|
|
|
|
// Set the bind group for the object we want to render. |
|
|
|
|
pass.setBindGroup(1, object.model_bind_group, &.{}); |
|
|
|
|
|
|
|
|
|
// Draw a number of triangles as specified in the index buffer. |
|
|
|
|
pass.drawIndexed(prim.index_count, 1, 0, 0, 0); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Finish recording commands, creating a `WGPUCommandBuffer`. |
|
|
|
|
var command = encoder.finish(null); |
|
|
|
|
defer command.release(); |
|
|
|
|
// Finish recording commands, creating a `WGPUCommandBuffer`. |
|
|
|
|
var command = encoder.finish(null); |
|
|
|
|
defer command.release(); |
|
|
|
|
|
|
|
|
|
// Submit the command(s) to the GPU. |
|
|
|
|
core.queue.submit(&.{command}); |
|
|
|
|
} |
|
|
|
|
// Submit the command(s) to the GPU. |
|
|
|
|
core.queue.submit(&.{command}); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
/// Loads a texture from the provided buffer and uploads it to the GPU. |
|
|
|
|
pub fn loadTexture(allocator: std.mem.Allocator, buffer: []const u8) !*gpu.TextureView { |