|
|
@ -11,20 +11,19 @@ const zm = @import("zmath"); |
|
|
|
const vec = zm.f32x4; |
|
|
|
const vec = zm.f32x4; |
|
|
|
const Mat = zm.Mat; |
|
|
|
const Mat = zm.Mat; |
|
|
|
|
|
|
|
|
|
|
|
const App = @import("./main.zig"); |
|
|
|
const App = @import("./App.zig"); |
|
|
|
|
|
|
|
|
|
|
|
const primitives = @import("./primitives.zig"); |
|
|
|
const primitives = @import("./primitives.zig"); |
|
|
|
const VertexData = primitives.VertexData; |
|
|
|
const VertexData = primitives.VertexData; |
|
|
|
const PrimitiveData = primitives.PrimitiveData; |
|
|
|
const PrimitiveData = primitives.PrimitiveData; |
|
|
|
|
|
|
|
|
|
|
|
const flecszigble = @import("flecs-zig-ble"); |
|
|
|
const flecszigble = @import("flecs-zig-ble"); |
|
|
|
|
|
|
|
const flecs = flecszigble.flecs; |
|
|
|
|
|
|
|
|
|
|
|
const Context = flecszigble.Context(void); |
|
|
|
const Context = flecszigble.Context(void); |
|
|
|
const Entity = Context.Entity; |
|
|
|
const Entity = Context.Entity; |
|
|
|
const Iter = Context.Iter; |
|
|
|
const Iter = Context.Iter; |
|
|
|
|
|
|
|
|
|
|
|
const flecs = flecszigble.flecs; |
|
|
|
|
|
|
|
const OnStore = flecs.pipeline.OnStore; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const Transform = struct { value: Mat }; |
|
|
|
const Transform = struct { value: Mat }; |
|
|
|
const CameraPerspective = struct { |
|
|
|
const CameraPerspective = struct { |
|
|
|
/// Vertical field of view (in degrees). |
|
|
|
/// Vertical field of view (in degrees). |
|
|
@ -231,8 +230,8 @@ pub fn init(app: *App) !*Renderer { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Register components necessary for the camera. |
|
|
|
// Register components necessary for the camera. |
|
|
|
_ = try app.world.component("Transform", Transform); |
|
|
|
_ = try app.world.component(Transform); |
|
|
|
_ = try app.world.component("CameraPerspective", CameraPerspective); |
|
|
|
_ = try app.world.component(CameraPerspective); |
|
|
|
|
|
|
|
|
|
|
|
const camera_entity = try app.world.entity( |
|
|
|
const camera_entity = try app.world.entity( |
|
|
|
.{ .name = "Camera", .symbol = "Camera" }, |
|
|
|
.{ .name = "Camera", .symbol = "Camera" }, |
|
|
@ -244,8 +243,7 @@ pub fn init(app: *App) !*Renderer { |
|
|
|
.far_plane = 80.0, |
|
|
|
.far_plane = 80.0, |
|
|
|
}); |
|
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
const render_expr = "App, [in] CameraPerspective(Camera), [out] Transform(Camera)"; |
|
|
|
_ = try app.world.system(Render); |
|
|
|
_ = try app.world.system("Render", render, OnStore, render_expr); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const result = try app.allocator.create(Renderer); |
|
|
|
const result = try app.allocator.create(Renderer); |
|
|
|
result.* = .{ |
|
|
|
result.* = .{ |
|
|
@ -293,105 +291,110 @@ pub fn resize(self: *Renderer) void { |
|
|
|
self.recreateDepthTexture(); |
|
|
|
self.recreateDepthTexture(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
pub fn render(it: Iter) void { |
|
|
|
/// System which renders the game world from the camera entity's perspective. |
|
|
|
const app = it.field(*App, 1)[0]; |
|
|
|
pub const Render = struct { |
|
|
|
const camera_perspective = it.field(CameraPerspective, 2)[0]; |
|
|
|
pub const phase = flecs.pipeline.OnStore; |
|
|
|
const camera_transform = &it.field(Transform, 3)[0]; |
|
|
|
pub const expr = "App($), [in] CameraPerspective(Camera), [out] Transform(Camera)"; |
|
|
|
|
|
|
|
pub fn callback(it: Iter) void { |
|
|
|
const self = app.renderer; |
|
|
|
const app = it.field(*App, 1)[0]; |
|
|
|
self.time += it.deltaTime(); |
|
|
|
const camera_perspective = it.field(CameraPerspective, 2)[0]; |
|
|
|
|
|
|
|
const camera_transform = &it.field(Transform, 3)[0]; |
|
|
|
// Set up a view matrix from the camera transform. |
|
|
|
|
|
|
|
// This moves everything to be relative to the camera. |
|
|
|
const self = app.renderer; |
|
|
|
// TODO: Actually implement camera transform instead of hardcoding a look-at matrix. |
|
|
|
self.time += it.deltaTime(); |
|
|
|
// const view_matrix = zm.inverse(app.camera_transform); |
|
|
|
|
|
|
|
const camera_distance = 8.0; |
|
|
|
// Set up a view matrix from the camera transform. |
|
|
|
const x = @cos(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
// This moves everything to be relative to the camera. |
|
|
|
const z = @sin(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
// TODO: Actually implement camera transform instead of hardcoding a look-at matrix. |
|
|
|
const camera_pos = vec(x, 2.0, z, 1.0); |
|
|
|
// const view_matrix = zm.inverse(app.camera_transform); |
|
|
|
const view_matrix = zm.lookAtLh(camera_pos, vec(0, 0, 0, 1), vec(0, 1, 0, 1)); |
|
|
|
const camera_distance = 8.0; |
|
|
|
|
|
|
|
const x = @cos(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
// Setting the transform here doesn't do anything because it's not used |
|
|
|
const z = @sin(self.time * std.math.tau / 20) * camera_distance; |
|
|
|
// anywhere. In the future we would want to set the camera transform |
|
|
|
const camera_pos = vec(x, 2.0, z, 1.0); |
|
|
|
// outside of the rendering step, and then get and use it here, instead. |
|
|
|
const view_matrix = zm.lookAtLh(camera_pos, vec(0, 0, 0, 1), vec(0, 1, 0, 1)); |
|
|
|
camera_transform.* = .{ .value = view_matrix }; |
|
|
|
|
|
|
|
// TODO: Not sure if this is the proper transform, or actually inverted. |
|
|
|
// Setting the transform here doesn't do anything because it's not used |
|
|
|
|
|
|
|
// anywhere. In the future we would want to set the camera transform |
|
|
|
// Set up a projection matrix using the size of the window. |
|
|
|
// outside of the rendering step, and then get and use it here, instead. |
|
|
|
// The perspective projection will make things further away appear smaller. |
|
|
|
camera_transform.* = .{ .value = view_matrix }; |
|
|
|
const width: f32 = @floatFromInt(core.descriptor.width); |
|
|
|
// TODO: Not sure if this is the proper transform, or actually inverted. |
|
|
|
const height: f32 = @floatFromInt(core.descriptor.height); |
|
|
|
|
|
|
|
const proj_matrix = zm.perspectiveFovLh( |
|
|
|
// Set up a projection matrix using the size of the window. |
|
|
|
std.math.degreesToRadians(f32, camera_perspective.field_of_view), |
|
|
|
// The perspective projection will make things further away appear smaller. |
|
|
|
width / height, |
|
|
|
const width: f32 = @floatFromInt(core.descriptor.width); |
|
|
|
camera_perspective.near_plane, |
|
|
|
const height: f32 = @floatFromInt(core.descriptor.height); |
|
|
|
camera_perspective.far_plane, |
|
|
|
const proj_matrix = zm.perspectiveFovLh( |
|
|
|
); |
|
|
|
std.math.degreesToRadians(f32, camera_perspective.field_of_view), |
|
|
|
|
|
|
|
width / height, |
|
|
|
const view_proj_matrix = zm.mul(view_matrix, proj_matrix); |
|
|
|
camera_perspective.near_plane, |
|
|
|
|
|
|
|
camera_perspective.far_plane, |
|
|
|
// Get back buffer texture to render to. |
|
|
|
); |
|
|
|
const back_buffer_view = core.swap_chain.getCurrentTextureView().?; |
|
|
|
|
|
|
|
defer back_buffer_view.release(); |
|
|
|
const view_proj_matrix = zm.mul(view_matrix, proj_matrix); |
|
|
|
// Once rendering is done (hence `defer`), swap back buffer to the front to display. |
|
|
|
|
|
|
|
defer core.swap_chain.present(); |
|
|
|
// Get back buffer texture to render to. |
|
|
|
|
|
|
|
const back_buffer_view = core.swap_chain.getCurrentTextureView().?; |
|
|
|
const render_pass_info = gpu.RenderPassDescriptor.init(.{ |
|
|
|
defer back_buffer_view.release(); |
|
|
|
.color_attachments = &.{.{ |
|
|
|
// Once rendering is done (hence `defer`), swap back buffer to the front to display. |
|
|
|
.view = back_buffer_view, |
|
|
|
defer core.swap_chain.present(); |
|
|
|
.clear_value = std.mem.zeroes(gpu.Color), |
|
|
|
|
|
|
|
.load_op = .clear, |
|
|
|
const render_pass_info = gpu.RenderPassDescriptor.init(.{ |
|
|
|
.store_op = .store, |
|
|
|
.color_attachments = &.{.{ |
|
|
|
}}, |
|
|
|
.view = back_buffer_view, |
|
|
|
.depth_stencil_attachment = &.{ |
|
|
|
.clear_value = std.mem.zeroes(gpu.Color), |
|
|
|
.view = self.depth_texture_view.?, |
|
|
|
.load_op = .clear, |
|
|
|
.depth_load_op = .clear, |
|
|
|
.store_op = .store, |
|
|
|
.depth_store_op = .store, |
|
|
|
}}, |
|
|
|
.depth_clear_value = 1.0, |
|
|
|
.depth_stencil_attachment = &.{ |
|
|
|
}, |
|
|
|
.view = self.depth_texture_view.?, |
|
|
|
}); |
|
|
|
.depth_load_op = .clear, |
|
|
|
|
|
|
|
.depth_store_op = .store, |
|
|
|
// Create a `WGPUCommandEncoder` which provides an interface for recording GPU commands. |
|
|
|
.depth_clear_value = 1.0, |
|
|
|
const encoder = core.device.createCommandEncoder(null); |
|
|
|
}, |
|
|
|
defer encoder.release(); |
|
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
// Write to the scene uniform buffer for this set of commands. |
|
|
|
// Create a `WGPUCommandEncoder` which provides an interface for recording GPU commands. |
|
|
|
encoder.writeBuffer(self.view_proj_buffer, 0, &[_]zm.Mat{ |
|
|
|
const encoder = core.device.createCommandEncoder(null); |
|
|
|
// All matrices the GPU has to work with need to be transposed, |
|
|
|
defer encoder.release(); |
|
|
|
// because WebGPU uses column-major matrices while zmath is row-major. |
|
|
|
|
|
|
|
zm.transpose(view_proj_matrix), |
|
|
|
// Write to the scene uniform buffer for this set of commands. |
|
|
|
}); |
|
|
|
encoder.writeBuffer(self.view_proj_buffer, 0, &[_]zm.Mat{ |
|
|
|
|
|
|
|
// All matrices the GPU has to work with need to be transposed, |
|
|
|
{ |
|
|
|
// because WebGPU uses column-major matrices while zmath is row-major. |
|
|
|
const pass = encoder.beginRenderPass(&render_pass_info); |
|
|
|
zm.transpose(view_proj_matrix), |
|
|
|
defer pass.release(); |
|
|
|
}); |
|
|
|
defer pass.end(); |
|
|
|
|
|
|
|
|
|
|
|
{ |
|
|
|
pass.setPipeline(self.pipeline); |
|
|
|
const pass = encoder.beginRenderPass(&render_pass_info); |
|
|
|
pass.setBindGroup(0, self.camera_bind_group, &.{}); |
|
|
|
defer pass.release(); |
|
|
|
|
|
|
|
defer pass.end(); |
|
|
|
for (self.object_data) |object| { |
|
|
|
|
|
|
|
// Set the vertex and index buffer used to render this |
|
|
|
pass.setPipeline(self.pipeline); |
|
|
|
// object to the ones from the primitive it wants to use. |
|
|
|
pass.setBindGroup(0, self.camera_bind_group, &.{}); |
|
|
|
const prim = object.primitive; |
|
|
|
|
|
|
|
pass.setVertexBuffer(0, prim.vertex_buffer, 0, prim.vertex_count * @sizeOf(VertexData)); |
|
|
|
for (self.object_data) |object| { |
|
|
|
pass.setIndexBuffer(prim.index_buffer, .uint32, 0, prim.index_count * @sizeOf(u32)); |
|
|
|
// Set the vertex and index buffer used to render this |
|
|
|
|
|
|
|
// object to the ones from the primitive it wants to use. |
|
|
|
// Set the bind group for the object we want to render. |
|
|
|
const prim = object.primitive; |
|
|
|
pass.setBindGroup(1, object.model_bind_group, &.{}); |
|
|
|
pass.setVertexBuffer(0, prim.vertex_buffer, 0, prim.vertex_count * @sizeOf(VertexData)); |
|
|
|
|
|
|
|
pass.setIndexBuffer(prim.index_buffer, .uint32, 0, prim.index_count * @sizeOf(u32)); |
|
|
|
// Draw a number of triangles as specified in the index buffer. |
|
|
|
|
|
|
|
pass.drawIndexed(prim.index_count, 1, 0, 0, 0); |
|
|
|
// Set the bind group for the object we want to render. |
|
|
|
|
|
|
|
pass.setBindGroup(1, object.model_bind_group, &.{}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Draw a number of triangles as specified in the index buffer. |
|
|
|
|
|
|
|
pass.drawIndexed(prim.index_count, 1, 0, 0, 0); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Finish recording commands, creating a `WGPUCommandBuffer`. |
|
|
|
// Finish recording commands, creating a `WGPUCommandBuffer`. |
|
|
|
var command = encoder.finish(null); |
|
|
|
var command = encoder.finish(null); |
|
|
|
defer command.release(); |
|
|
|
defer command.release(); |
|
|
|
|
|
|
|
|
|
|
|
// Submit the command(s) to the GPU. |
|
|
|
// Submit the command(s) to the GPU. |
|
|
|
core.queue.submit(&.{command}); |
|
|
|
core.queue.submit(&.{command}); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
/// Loads a texture from the provided buffer and uploads it to the GPU. |
|
|
|
/// Loads a texture from the provided buffer and uploads it to the GPU. |
|
|
|
pub fn loadTexture(allocator: std.mem.Allocator, buffer: []const u8) !*gpu.TextureView { |
|
|
|
pub fn loadTexture(allocator: std.mem.Allocator, buffer: []const u8) !*gpu.TextureView { |