|
|
@ -11,19 +11,20 @@ const zm = @import("zmath"); |
|
|
|
const vec = zm.f32x4; |
|
|
|
const vec = zm.f32x4; |
|
|
|
const Mat = zm.Mat; |
|
|
|
const Mat = zm.Mat; |
|
|
|
|
|
|
|
|
|
|
|
const App = @import("./App.zig"); |
|
|
|
const App = @import("./main.zig"); |
|
|
|
|
|
|
|
|
|
|
|
const primitives = @import("./primitives.zig"); |
|
|
|
const primitives = @import("./primitives.zig"); |
|
|
|
const VertexData = primitives.VertexData; |
|
|
|
const VertexData = primitives.VertexData; |
|
|
|
const PrimitiveData = primitives.PrimitiveData; |
|
|
|
const PrimitiveData = primitives.PrimitiveData; |
|
|
|
|
|
|
|
|
|
|
|
const flecszigble = @import("flecs-zig-ble"); |
|
|
|
const flecszigble = @import("flecs-zig-ble"); |
|
|
|
const flecs = flecszigble.flecs; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const Context = flecszigble.Context(void); |
|
|
|
const Context = flecszigble.Context(void); |
|
|
|
const Entity = Context.Entity; |
|
|
|
const Entity = Context.Entity; |
|
|
|
const Iter = Context.Iter; |
|
|
|
const Iter = Context.Iter; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const flecs = flecszigble.flecs; |
|
|
|
|
|
|
|
const OnStore = flecs.pipeline.OnStore; |
|
|
|
|
|
|
|
|
|
|
|
const Transform = struct { value: Mat }; |
|
|
|
const Transform = struct { value: Mat }; |
|
|
|
const CameraPerspective = struct { |
|
|
|
const CameraPerspective = struct { |
|
|
|
/// Vertical field of view (in degrees). |
|
|
|
/// Vertical field of view (in degrees). |
|
|
@ -230,8 +231,8 @@ pub fn init(app: *App) !*Renderer { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Register components necessary for the camera. |
|
|
|
// Register components necessary for the camera. |
|
|
|
_ = try app.world.component(Transform); |
|
|
|
_ = try app.world.component("Transform", Transform); |
|
|
|
_ = try app.world.component(CameraPerspective); |
|
|
|
_ = try app.world.component("CameraPerspective", CameraPerspective); |
|
|
|
|
|
|
|
|
|
|
|
const camera_entity = try app.world.entity( |
|
|
|
const camera_entity = try app.world.entity( |
|
|
|
.{ .name = "Camera", .symbol = "Camera" }, |
|
|
|
.{ .name = "Camera", .symbol = "Camera" }, |
|
|
@ -243,7 +244,8 @@ pub fn init(app: *App) !*Renderer { |
|
|
|
.far_plane = 80.0, |
|
|
|
.far_plane = 80.0, |
|
|
|
}); |
|
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
_ = try app.world.system(Render); |
|
|
|
const render_expr = "App, [in] CameraPerspective(Camera), [out] Transform(Camera)"; |
|
|
|
|
|
|
|
_ = try app.world.system("Render", render, OnStore, render_expr); |
|
|
|
|
|
|
|
|
|
|
|
const result = try app.allocator.create(Renderer); |
|
|
|
const result = try app.allocator.create(Renderer); |
|
|
|
result.* = .{ |
|
|
|
result.* = .{ |
|
|
@ -291,110 +293,105 @@ pub fn resize(self: *Renderer) void { |
|
|
|
self.recreateDepthTexture(); |
|
|
|
self.recreateDepthTexture(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/// System which renders the game world from the camera entity's perspective. |
|
|
|
pub fn render(it: Iter) void { |
|
|
|
pub const Render = struct { |
|
|
|
const app = it.field(*App, 1)[0]; |
|
|
|
pub const phase = flecs.pipeline.OnStore; |
|
|
|
const camera_perspective = it.field(CameraPerspective, 2)[0]; |
|
|
|
pub const expr = "App($), [in] CameraPerspective(Camera), [out] Transform(Camera)"; |
|
|
|
const camera_transform = &it.field(Transform, 3)[0]; |
|
|
|
pub fn callback(it: Iter) void { |
|
|
|
|
|
|
|
const app = it.field(*App, 1)[0]; |
|
|
|
const self = app.renderer; |
|
|
|
const camera_perspective = it.field(CameraPerspective, 2)[0]; |
|
|
|
self.time += it.deltaTime(); |
|
|
|
const camera_transform = &it.field(Transform, 3)[0]; |
|
|
|
|
|
|
|
|
|
|
|
// Set up a view matrix from the camera transform. |
|
|
|
const self = app.renderer; |
|
|
|
// This moves everything to be relative to the camera. |
|
|
|
self.time += it.deltaTime(); |
|
|
|
// TODO: Actually implement camera transform instead of hardcoding a look-at matrix. |
|
|
|
|
|
|
|
// const view_matrix = zm.inverse(app.camera_transform); |
|
|
|
// Set up a view matrix from the camera transform. |
|
|
|
const camera_distance = 8.0; |
|
|
|
// This moves everything to be relative to the camera. |
|
|
|
const x = @cos(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
// TODO: Actually implement camera transform instead of hardcoding a look-at matrix. |
|
|
|
const z = @sin(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
// const view_matrix = zm.inverse(app.camera_transform); |
|
|
|
const camera_pos = vec(x, 2.0, z, 1.0); |
|
|
|
const camera_distance = 8.0; |
|
|
|
const view_matrix = zm.lookAtLh(camera_pos, vec(0, 0, 0, 1), vec(0, 1, 0, 1)); |
|
|
|
const x = @cos(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
|
|
|
|
const z = @sin(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
// Setting the transform here doesn't do anything because it's not used |
|
|
|
const camera_pos = vec(x, 2.0, z, 1.0); |
|
|
|
// anywhere. In the future we would want to set the camera transform |
|
|
|
const view_matrix = zm.lookAtLh(camera_pos, vec(0, 0, 0, 1), vec(0, 1, 0, 1)); |
|
|
|
// outside of the rendering step, and then get and use it here, instead. |
|
|
|
|
|
|
|
camera_transform.* = .{ .value = view_matrix }; |
|
|
|
// Setting the transform here doesn't do anything because it's not used |
|
|
|
// TODO: Not sure if this is the proper transform, or actually inverted. |
|
|
|
// anywhere. In the future we would want to set the camera transform |
|
|
|
|
|
|
|
// outside of the rendering step, and then get and use it here, instead. |
|
|
|
// Set up a projection matrix using the size of the window. |
|
|
|
camera_transform.* = .{ .value = view_matrix }; |
|
|
|
// The perspective projection will make things further away appear smaller. |
|
|
|
// TODO: Not sure if this is the proper transform, or actually inverted. |
|
|
|
const width: f32 = @floatFromInt(core.descriptor.width); |
|
|
|
|
|
|
|
const height: f32 = @floatFromInt(core.descriptor.height); |
|
|
|
// Set up a projection matrix using the size of the window. |
|
|
|
const proj_matrix = zm.perspectiveFovLh( |
|
|
|
// The perspective projection will make things further away appear smaller. |
|
|
|
std.math.degreesToRadians(f32, camera_perspective.field_of_view), |
|
|
|
const width: f32 = @floatFromInt(core.descriptor.width); |
|
|
|
width / height, |
|
|
|
const height: f32 = @floatFromInt(core.descriptor.height); |
|
|
|
camera_perspective.near_plane, |
|
|
|
const proj_matrix = zm.perspectiveFovLh( |
|
|
|
camera_perspective.far_plane, |
|
|
|
std.math.degreesToRadians(f32, camera_perspective.field_of_view), |
|
|
|
); |
|
|
|
width / height, |
|
|
|
|
|
|
|
camera_perspective.near_plane, |
|
|
|
|
|
|
|
camera_perspective.far_plane, |
|
|
|
|
|
|
|
); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const view_proj_matrix = zm.mul(view_matrix, proj_matrix); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Get back buffer texture to render to. |
|
|
|
|
|
|
|
const back_buffer_view = core.swap_chain.getCurrentTextureView().?; |
|
|
|
|
|
|
|
defer back_buffer_view.release(); |
|
|
|
|
|
|
|
// Once rendering is done (hence `defer`), swap back buffer to the front to display. |
|
|
|
|
|
|
|
defer core.swap_chain.present(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const render_pass_info = gpu.RenderPassDescriptor.init(.{ |
|
|
|
|
|
|
|
.color_attachments = &.{.{ |
|
|
|
|
|
|
|
.view = back_buffer_view, |
|
|
|
|
|
|
|
.clear_value = std.mem.zeroes(gpu.Color), |
|
|
|
|
|
|
|
.load_op = .clear, |
|
|
|
|
|
|
|
.store_op = .store, |
|
|
|
|
|
|
|
}}, |
|
|
|
|
|
|
|
.depth_stencil_attachment = &.{ |
|
|
|
|
|
|
|
.view = self.depth_texture_view.?, |
|
|
|
|
|
|
|
.depth_load_op = .clear, |
|
|
|
|
|
|
|
.depth_store_op = .store, |
|
|
|
|
|
|
|
.depth_clear_value = 1.0, |
|
|
|
|
|
|
|
}, |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Create a `WGPUCommandEncoder` which provides an interface for recording GPU commands. |
|
|
|
|
|
|
|
const encoder = core.device.createCommandEncoder(null); |
|
|
|
|
|
|
|
defer encoder.release(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Write to the scene uniform buffer for this set of commands. |
|
|
|
|
|
|
|
encoder.writeBuffer(self.view_proj_buffer, 0, &[_]zm.Mat{ |
|
|
|
|
|
|
|
// All matrices the GPU has to work with need to be transposed, |
|
|
|
|
|
|
|
// because WebGPU uses column-major matrices while zmath is row-major. |
|
|
|
|
|
|
|
zm.transpose(view_proj_matrix), |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
const pass = encoder.beginRenderPass(&render_pass_info); |
|
|
|
|
|
|
|
defer pass.release(); |
|
|
|
|
|
|
|
defer pass.end(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pass.setPipeline(self.pipeline); |
|
|
|
|
|
|
|
pass.setBindGroup(0, self.camera_bind_group, &.{}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (self.object_data) |object| { |
|
|
|
|
|
|
|
// Set the vertex and index buffer used to render this |
|
|
|
|
|
|
|
// object to the ones from the primitive it wants to use. |
|
|
|
|
|
|
|
const prim = object.primitive; |
|
|
|
|
|
|
|
pass.setVertexBuffer(0, prim.vertex_buffer, 0, prim.vertex_count * @sizeOf(VertexData)); |
|
|
|
|
|
|
|
pass.setIndexBuffer(prim.index_buffer, .uint32, 0, prim.index_count * @sizeOf(u32)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Set the bind group for the object we want to render. |
|
|
|
|
|
|
|
pass.setBindGroup(1, object.model_bind_group, &.{}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Draw a number of triangles as specified in the index buffer. |
|
|
|
|
|
|
|
pass.drawIndexed(prim.index_count, 1, 0, 0, 0); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Finish recording commands, creating a `WGPUCommandBuffer`. |
|
|
|
const view_proj_matrix = zm.mul(view_matrix, proj_matrix); |
|
|
|
var command = encoder.finish(null); |
|
|
|
|
|
|
|
defer command.release(); |
|
|
|
// Get back buffer texture to render to. |
|
|
|
|
|
|
|
const back_buffer_view = core.swap_chain.getCurrentTextureView().?; |
|
|
|
|
|
|
|
defer back_buffer_view.release(); |
|
|
|
|
|
|
|
// Once rendering is done (hence `defer`), swap back buffer to the front to display. |
|
|
|
|
|
|
|
defer core.swap_chain.present(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const render_pass_info = gpu.RenderPassDescriptor.init(.{ |
|
|
|
|
|
|
|
.color_attachments = &.{.{ |
|
|
|
|
|
|
|
.view = back_buffer_view, |
|
|
|
|
|
|
|
.clear_value = std.mem.zeroes(gpu.Color), |
|
|
|
|
|
|
|
.load_op = .clear, |
|
|
|
|
|
|
|
.store_op = .store, |
|
|
|
|
|
|
|
}}, |
|
|
|
|
|
|
|
.depth_stencil_attachment = &.{ |
|
|
|
|
|
|
|
.view = self.depth_texture_view.?, |
|
|
|
|
|
|
|
.depth_load_op = .clear, |
|
|
|
|
|
|
|
.depth_store_op = .store, |
|
|
|
|
|
|
|
.depth_clear_value = 1.0, |
|
|
|
|
|
|
|
}, |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
// Submit the command(s) to the GPU. |
|
|
|
// Create a `WGPUCommandEncoder` which provides an interface for recording GPU commands. |
|
|
|
core.queue.submit(&.{command}); |
|
|
|
const encoder = core.device.createCommandEncoder(null); |
|
|
|
|
|
|
|
defer encoder.release(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Write to the scene uniform buffer for this set of commands. |
|
|
|
|
|
|
|
encoder.writeBuffer(self.view_proj_buffer, 0, &[_]zm.Mat{ |
|
|
|
|
|
|
|
// All matrices the GPU has to work with need to be transposed, |
|
|
|
|
|
|
|
// because WebGPU uses column-major matrices while zmath is row-major. |
|
|
|
|
|
|
|
zm.transpose(view_proj_matrix), |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
const pass = encoder.beginRenderPass(&render_pass_info); |
|
|
|
|
|
|
|
defer pass.release(); |
|
|
|
|
|
|
|
defer pass.end(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pass.setPipeline(self.pipeline); |
|
|
|
|
|
|
|
pass.setBindGroup(0, self.camera_bind_group, &.{}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (self.object_data) |object| { |
|
|
|
|
|
|
|
// Set the vertex and index buffer used to render this |
|
|
|
|
|
|
|
// object to the ones from the primitive it wants to use. |
|
|
|
|
|
|
|
const prim = object.primitive; |
|
|
|
|
|
|
|
pass.setVertexBuffer(0, prim.vertex_buffer, 0, prim.vertex_count * @sizeOf(VertexData)); |
|
|
|
|
|
|
|
pass.setIndexBuffer(prim.index_buffer, .uint32, 0, prim.index_count * @sizeOf(u32)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Set the bind group for the object we want to render. |
|
|
|
|
|
|
|
pass.setBindGroup(1, object.model_bind_group, &.{}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Draw a number of triangles as specified in the index buffer. |
|
|
|
|
|
|
|
pass.drawIndexed(prim.index_count, 1, 0, 0, 0); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
// Finish recording commands, creating a `WGPUCommandBuffer`. |
|
|
|
|
|
|
|
var command = encoder.finish(null); |
|
|
|
|
|
|
|
defer command.release(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Submit the command(s) to the GPU. |
|
|
|
|
|
|
|
core.queue.submit(&.{command}); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/// Loads a texture from the provided buffer and uploads it to the GPU. |
|
|
|
/// Loads a texture from the provided buffer and uploads it to the GPU. |
|
|
|
pub fn loadTexture(allocator: std.mem.Allocator, buffer: []const u8) !*gpu.TextureView { |
|
|
|
pub fn loadTexture(allocator: std.mem.Allocator, buffer: []const u8) !*gpu.TextureView { |