Split rendering code into its own file

main
copygirl 9 months ago
parent e978a98265
commit 9109bd7328
  1. 306
      src/main.zig
  2. 11
      src/primitives.zig
  3. 326
      src/renderer.zig

@ -2,36 +2,7 @@ const std = @import("std");
const GeneralPurposeAllocator = std.heap.GeneralPurposeAllocator(.{}); const GeneralPurposeAllocator = std.heap.GeneralPurposeAllocator(.{});
const core = @import("mach-core"); const core = @import("mach-core");
const gpu = core.gpu; const Renderer = @import("./renderer.zig");
const zm = @import("zmath");
const vec = zm.f32x4;
const Mat = zm.Mat;
const primitives = @import("./primitives.zig");
const VertexData = primitives.VertexData;
const PrimitiveData = primitives.PrimitiveData;
/// Holds information about how a perticular scene should be rendered.
const SceneUniformBuffer = struct {
view_proj_matrix: zm.Mat,
};
/// Holds information about where and how an object should be rendered.
const ObjectUniformBuffer = struct {
model_matrix: zm.Mat,
color: [3]f32,
};
/// Holds data on what is needed to render an object in a rendering pass.
const ObjectData = struct {
/// Reference to data stored on the GPU of type `ObjectUniformBuffer`.
uniform_buffer: *gpu.Buffer,
/// Bind group used to associate the buffer to the `object` shader parameter.
uniform_bind_group: *gpu.BindGroup,
/// Reference to the primitive (shape or model) to render for this object.
primitive: *PrimitiveData,
};
pub const App = @This(); pub const App = @This();
@ -39,17 +10,11 @@ gpa: GeneralPurposeAllocator,
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
random: std.rand.Random, random: std.rand.Random,
renderer: *Renderer,
app_timer: core.Timer, app_timer: core.Timer,
title_timer: core.Timer, title_timer: core.Timer,
depth_texture: *gpu.Texture,
depth_texture_view: *gpu.TextureView,
pipeline: *gpu.RenderPipeline,
scene_uniform_buffer: *gpu.Buffer,
scene_uniform_bind_group: *gpu.BindGroup,
primitive_data: []PrimitiveData,
object_data: []ObjectData,
pub fn init(app: *App) !void { pub fn init(app: *App) !void {
try core.init(.{}); try core.init(.{});
@ -68,170 +33,10 @@ pub fn init(app: *App) !void {
var prng = std.rand.DefaultPrng.init(0); var prng = std.rand.DefaultPrng.init(0);
app.random = prng.random(); app.random = prng.random();
app.renderer = try Renderer.init(app);
app.app_timer = try core.Timer.start(); app.app_timer = try core.Timer.start();
app.title_timer = try core.Timer.start(); app.title_timer = try core.Timer.start();
// Create a depth texture. This is used to ensure that when things are
// rendered, an object behind another won't draw over one in front, simply
// because it was rendered at a later point in time.
app.depth_texture = core.device.createTexture(&.{
.usage = .{ .render_attachment = true },
.size = .{ .width = core.descriptor.width, .height = core.descriptor.height },
.format = .depth24_plus,
});
app.depth_texture_view = app.depth_texture.createView(null);
const shader_module = core.device.createShaderModuleWGSL("shader.wgsl", @embedFile("shader.wgsl"));
defer shader_module.release();
// Set up rendering pipeline.
app.pipeline = core.device.createRenderPipeline(&.{
.vertex = gpu.VertexState.init(.{
.module = shader_module,
.entry_point = "vertex_main",
.buffers = &.{
gpu.VertexBufferLayout.init(.{
.array_stride = @sizeOf(VertexData),
.step_mode = .vertex,
.attributes = &.{
.{ .format = .float32x3, .shader_location = 0, .offset = @offsetOf(VertexData, "position") },
},
}),
},
}),
.primitive = .{
.topology = .triangle_list,
.front_face = .ccw,
.cull_mode = .back,
},
.depth_stencil = &.{
.format = .depth24_plus,
.depth_write_enabled = .true,
.depth_compare = .less,
},
.fragment = &gpu.FragmentState.init(.{
.module = shader_module,
.entry_point = "frag_main",
.targets = &.{.{ .format = core.descriptor.format }},
}),
});
// Set up scene related uniform buffers and bind groups.
{
const result = createAndWriteUniformBuffer(
app.pipeline.getBindGroupLayout(0),
SceneUniformBuffer{ .view_proj_matrix = zm.identity() },
);
app.scene_uniform_buffer = result.buffer;
app.scene_uniform_bind_group = result.bind_group;
}
// Set up the primitives we want to render.
// Using `dupe` to allocate a slice here allows easily adjusting the
// primitives to use, without changing the type of `primitive_data`.
app.primitive_data = try app.allocator.dupe(PrimitiveData, &.{
// primitives.createTrianglePrimitive(1.0),
// primitives.createSquarePrimitive(0.8),
// primitives.createCirclePrimitive(0.5, 24),
primitives.createCubePrimitive(0.65),
primitives.createPyramidPrimitive(0.75),
});
// Set up object related uniform buffers and bind groups.
// This uploads data to the GPU about all the object we
// want to render, such as their location and color.
{
const grid_size = 8;
// Allocate a slice to store as many ObjectData as we want to create.
//
// Using a slice instead of an array means that we could change how
// many object we want to render at compile time, however it requires
// allocating, and later freeing, memory to store the slice.
app.object_data = try app.allocator.alloc(ObjectData, grid_size * grid_size);
// Note that for loops in Zig are a little different than you might
// know from other languages. They only look over arrays, slices,
// tuples and ranges, potentially multiple at once.
for (app.object_data, 0..) |*object, i| {
const grid_max: f32 = @floatFromInt(grid_size - 1);
const x = @as(f32, @floatFromInt(i % grid_size)) / grid_max;
const z = @as(f32, @floatFromInt(i / grid_size)) / grid_max;
const rotation = zm.rotationY(std.math.tau * (x + z) / 2.0);
const translation = zm.translation((x - 0.5) * grid_size, 0, (z - 0.5) * grid_size);
const model_matrix = zm.mul(rotation, translation);
// Make the object have a color depending on its location in the grid.
// These values are layed out so each corner is red, green, blue and black.
const color = .{
std.math.clamp(1.0 - x - z, 0.0, 1.0),
std.math.clamp(x - z, 0.0, 1.0),
std.math.clamp(z - x, 0.0, 1.0),
};
const result = createAndWriteUniformBuffer(
app.pipeline.getBindGroupLayout(1),
ObjectUniformBuffer{
.model_matrix = zm.transpose(model_matrix),
.color = color,
},
);
// Pick a "random" primitive to use for this object.
const primitive_index = app.random.int(usize) % app.primitive_data.len;
const primitive = &app.primitive_data[primitive_index];
// The `*object` syntax gets us a pointer to each element in the
// `object_data` slice, allowing us to override it within the loop.
object.* = .{
.uniform_buffer = result.buffer,
.uniform_bind_group = result.bind_group,
.primitive = primitive,
};
}
}
}
/// Creates a buffer on the GPU to store uniform parameter information as
/// well as a bind group with the specified layout pointing to that buffer.
/// Additionally, immediately fills the buffer with the provided data.
pub fn createAndWriteUniformBuffer(
layout: *gpu.BindGroupLayout,
data: anytype,
) struct {
buffer: *gpu.Buffer,
bind_group: *gpu.BindGroup,
} {
const T = @TypeOf(data);
const usage = gpu.Buffer.UsageFlags{ .copy_dst = true, .uniform = true };
const buffer = createAndWriteBuffer(T, &.{data}, usage);
// "Bind groups" are used to associate data from buffers with shader parameters.
// So for example the `scene_uniform_bind_group` is accessible via `scene` in our shader.
// Essentially, buffer = data, and bind group = binding parameter to that data.
const bind_group_entry = gpu.BindGroup.Entry.buffer(0, buffer, 0, @sizeOf(T));
const bind_group_desc = gpu.BindGroup.Descriptor.init(.{ .layout = layout, .entries = &.{bind_group_entry} });
const bind_group = core.device.createBindGroup(&bind_group_desc);
return .{ .buffer = buffer, .bind_group = bind_group };
}
/// Creates a buffer on the GPU with the specified usage
/// flags and immediately fills it with the provided data.
pub fn createAndWriteBuffer(
comptime T: type,
data: []const T,
usage: gpu.Buffer.UsageFlags,
) *gpu.Buffer {
const buffer = core.device.createBuffer(&.{
.size = data.len * @sizeOf(T),
.usage = usage,
.mapped_at_creation = .false,
});
core.queue.writeBuffer(buffer, 0, data);
return buffer;
} }
pub fn deinit(app: *App) void { pub fn deinit(app: *App) void {
@ -239,113 +44,22 @@ pub fn deinit(app: *App) void {
// in the order they were created in `init`. // in the order they were created in `init`.
defer core.deinit(); defer core.deinit();
defer _ = app.gpa.deinit(); // TODO: Check for memory leaks? defer _ = app.gpa.deinit(); // TODO: Check for memory leaks?
defer app.depth_texture.release(); defer app.renderer.deinit();
defer app.depth_texture_view.release();
defer app.pipeline.release();
defer app.scene_uniform_buffer.release();
defer app.scene_uniform_bind_group.release();
defer app.allocator.free(app.primitive_data);
defer for (app.primitive_data) |p| {
p.vertex_buffer.release();
p.index_buffer.release();
};
defer app.allocator.free(app.object_data);
defer for (app.object_data) |o| {
o.uniform_buffer.release();
o.uniform_bind_group.release();
};
} }
pub fn update(app: *App) !bool { pub fn update(app: *App) !bool {
// Read events from the OS such as input.
var iter = core.pollEvents(); var iter = core.pollEvents();
while (iter.next()) |event| { while (iter.next()) |event| {
switch (event) { switch (event) {
// Close the window when requested, such as when
// pressing the X button in the window title bar.
.close => return true, .close => return true,
else => {}, else => {},
} }
} }
// Set up a view matrix from the camera transform. app.renderer.update();
// This moves everything to be relative to the camera.
// TODO: Actually implement camera transform instead of hardcoding a look-at matrix.
// const view_matrix = zm.inverse(app.camera_transform);
const time = app.app_timer.read();
const camera_distance = 8.0;
const x = @cos(time * std.math.tau / 20) * camera_distance;
const z = @sin(time * std.math.tau / 20) * camera_distance;
const camera_pos = vec(x, 2.0, z, 1.0);
const view_matrix = zm.lookAtLh(camera_pos, vec(0, 0, 0, 1), vec(0, 1, 0, 1));
// Set up a projection matrix using the size of the window.
// The perspective projection will make things further away appear smaller.
const width: f32 = @floatFromInt(core.descriptor.width);
const height: f32 = @floatFromInt(core.descriptor.height);
const field_of_view = std.math.degreesToRadians(f32, 45.0);
const proj_matrix = zm.perspectiveFovLh(field_of_view, width / height, 0.05, 80.0);
const view_proj_matrix = zm.mul(view_matrix, proj_matrix);
// Get back buffer texture to render to.
const back_buffer_view = core.swap_chain.getCurrentTextureView().?;
defer back_buffer_view.release();
// Once rendering is done (hence `defer`), swap back buffer to the front to display.
defer core.swap_chain.present();
const render_pass_info = gpu.RenderPassDescriptor.init(.{
.color_attachments = &.{.{
.view = back_buffer_view,
.clear_value = std.mem.zeroes(gpu.Color),
.load_op = .clear,
.store_op = .store,
}},
.depth_stencil_attachment = &.{
.view = app.depth_texture_view,
.depth_load_op = .clear,
.depth_store_op = .store,
.depth_clear_value = 1.0,
},
});
// Create a `WGPUCommandEncoder` which provides an interface for recording GPU commands.
const encoder = core.device.createCommandEncoder(null);
defer encoder.release();
// Write to the scene uniform buffer for this set of commands.
encoder.writeBuffer(app.scene_uniform_buffer, 0, &[_]SceneUniformBuffer{.{
// All matrices the GPU has to work with need to be transposed,
// because WebGPU uses column-major matrices while zmath is row-major.
.view_proj_matrix = zm.transpose(view_proj_matrix),
}});
{
const pass = encoder.beginRenderPass(&render_pass_info);
defer pass.release();
defer pass.end();
pass.setPipeline(app.pipeline);
pass.setBindGroup(0, app.scene_uniform_bind_group, &.{});
for (app.object_data) |object| {
// Set the vertex and index buffer used to render this object
// to the primitive it wants to use (either triangle or square).
const prim = object.primitive;
pass.setVertexBuffer(0, prim.vertex_buffer, 0, prim.vertex_count * @sizeOf(VertexData));
pass.setIndexBuffer(prim.index_buffer, .uint32, 0, prim.index_count * @sizeOf(u32));
// Set the bind group for an object we want to render.
pass.setBindGroup(1, object.uniform_bind_group, &.{});
// Draw a number of triangles as specified in the index buffer.
pass.drawIndexed(prim.index_count, 1, 0, 0, 0);
}
}
// Finish recording commands, creating a `WGPUCommandBuffer`.
var command = encoder.finish(null);
defer command.release();
// Submit the command(s) to the GPU.
core.queue.submit(&.{command});
// Update the window title to show FPS and input frequency. // Update the window title to show FPS and input frequency.
if (app.title_timer.read() >= 1.0) { if (app.title_timer.read() >= 1.0) {

@ -1,10 +1,11 @@
const std = @import("std"); const std = @import("std");
const tau = std.math.tau;
const core = @import("mach-core"); const core = @import("mach-core");
const gpu = core.gpu; const gpu = core.gpu;
const main = @import("./main.zig"); const Renderer = @import("./renderer.zig");
const createAndWriteBuffer = main.createAndWriteBuffer; const createAndWriteBuffer = Renderer.createAndWriteBuffer;
/// Describes the layout of each vertex that a primitive is made of. /// Describes the layout of each vertex that a primitive is made of.
pub const VertexData = struct { pub const VertexData = struct {
@ -49,8 +50,8 @@ fn vert(x: f32, y: f32, z: f32) VertexData {
pub fn createTrianglePrimitive(length: f32) PrimitiveData { pub fn createTrianglePrimitive(length: f32) PrimitiveData {
const radius = length / @sqrt(3.0); const radius = length / @sqrt(3.0);
const a0 = 0.0; const a0 = 0.0;
const a1 = std.math.tau / 3.0; const a1 = tau / 3.0;
const a2 = std.math.tau / 3.0 * 2.0; const a2 = tau / 3.0 * 2.0;
return createPrimitive( return createPrimitive(
// A triangle is made up of 3 vertices. // A triangle is made up of 3 vertices.
// //
@ -106,7 +107,7 @@ pub fn createCirclePrimitive(radius: f32, comptime sides: usize) PrimitiveData {
var vertices: [sides]VertexData = undefined; var vertices: [sides]VertexData = undefined;
for (&vertices, 0..) |*vertex, i| { for (&vertices, 0..) |*vertex, i| {
const angle = std.math.tau / @as(f32, @floatFromInt(sides)) * @as(f32, @floatFromInt(i)); const angle = tau / @as(f32, @floatFromInt(sides)) * @as(f32, @floatFromInt(i));
vertex.* = vert(@sin(angle) * radius, @cos(angle) * radius, 0.0); vertex.* = vert(@sin(angle) * radius, @cos(angle) * radius, 0.0);
} }

@ -0,0 +1,326 @@
const std = @import("std");
const core = @import("mach-core");
const gpu = core.gpu;
const zm = @import("zmath");
const vec = zm.f32x4;
const Mat = zm.Mat;
const App = @import("./main.zig");
const primitives = @import("./primitives.zig");
const VertexData = primitives.VertexData;
const PrimitiveData = primitives.PrimitiveData;
/// Holds information about how a perticular scene should be rendered.
const SceneUniformBuffer = struct {
view_proj_matrix: Mat,
};
/// Holds information about where and how an object should be rendered.
const ObjectUniformBuffer = struct {
model_matrix: Mat,
color: [3]f32,
};
/// Holds data needed to render an object in a rendering pass.
const ObjectData = struct {
/// Reference to data stored on the GPU of type `ObjectUniformBuffer`.
uniform_buffer: *gpu.Buffer,
/// Bind group used to associate the buffer to the `object` shader parameter.
uniform_bind_group: *gpu.BindGroup,
/// Reference to the primitive (shape or model) to render for this object.
primitive: *PrimitiveData,
};
const Renderer = @This();
app: *App,
depth_texture: *gpu.Texture,
depth_texture_view: *gpu.TextureView,
pipeline: *gpu.RenderPipeline,
scene_uniform_buffer: *gpu.Buffer,
scene_uniform_bind_group: *gpu.BindGroup,
primitive_data: []PrimitiveData,
object_data: []ObjectData,
pub fn init(app: *App) !*Renderer {
// Create a depth texture. This is used to ensure that when things are
// rendered, an object behind another won't draw over one in front, simply
// because it was rendered at a later point in time.
const depth_texture = core.device.createTexture(&.{
.usage = .{ .render_attachment = true },
.size = .{ .width = core.descriptor.width, .height = core.descriptor.height },
.format = .depth24_plus,
});
const depth_texture_view = depth_texture.createView(null);
const shader_module = core.device.createShaderModuleWGSL("shader.wgsl", @embedFile("shader.wgsl"));
defer shader_module.release();
// Set up rendering pipeline.
const pipeline = core.device.createRenderPipeline(&.{
.vertex = gpu.VertexState.init(.{
.module = shader_module,
.entry_point = "vertex_main",
.buffers = &.{
gpu.VertexBufferLayout.init(.{
.array_stride = @sizeOf(VertexData),
.step_mode = .vertex,
.attributes = &.{
.{ .format = .float32x3, .shader_location = 0, .offset = @offsetOf(VertexData, "position") },
},
}),
},
}),
.primitive = .{
.topology = .triangle_list,
.front_face = .ccw,
.cull_mode = .back,
},
.depth_stencil = &.{
.format = .depth24_plus,
.depth_write_enabled = .true,
.depth_compare = .less,
},
.fragment = &gpu.FragmentState.init(.{
.module = shader_module,
.entry_point = "frag_main",
.targets = &.{.{ .format = core.descriptor.format }},
}),
});
// Set up scene related uniform buffers and bind groups.
const scene_uniform = createAndWriteUniformBuffer(
pipeline.getBindGroupLayout(0),
SceneUniformBuffer{ .view_proj_matrix = zm.identity() },
);
// Set up the primitives we want to render.
//
// Using `dupe` to allocate a slice here allows easily adjusting the
// primitives to use, without changing the type of `primitive_data`.
const primitive_data = try app.allocator.dupe(PrimitiveData, &.{
// primitives.createTrianglePrimitive(1.0),
// primitives.createSquarePrimitive(0.8),
// primitives.createCirclePrimitive(0.5, 24),
primitives.createCubePrimitive(0.65),
primitives.createPyramidPrimitive(0.75),
});
// Set up object related uniform buffers and bind groups.
// This uploads data to the GPU about all the object we
// want to render, such as their location and color.
const grid_size = 8;
// Allocate a slice to store as many ObjectData as we want to create.
//
// Using a slice instead of an array means that we could change how
// many object we want to render at compile time, however it requires
// allocating, and later freeing, memory to store the slice.
const object_data = try app.allocator.alloc(ObjectData, grid_size * grid_size);
// Note that for loops in Zig are a little different than you might
// know from other languages. They only look over arrays, slices,
// tuples and ranges, potentially multiple at once.
for (object_data, 0..) |*object, i| {
const grid_max: f32 = @floatFromInt(grid_size - 1);
const x = @as(f32, @floatFromInt(i % grid_size)) / grid_max;
const z = @as(f32, @floatFromInt(i / grid_size)) / grid_max;
const rotation = zm.rotationY(std.math.tau * (x + z) / 2.0);
const translation = zm.translation((x - 0.5) * grid_size, 0, (z - 0.5) * grid_size);
const model_matrix = zm.mul(rotation, translation);
// Make the object have a color depending on its location in the grid.
// These values are layed out so each corner is red, green, blue and black.
const color = .{
std.math.clamp(1.0 - x - z, 0.0, 1.0),
std.math.clamp(x - z, 0.0, 1.0),
std.math.clamp(z - x, 0.0, 1.0),
};
const object_uniform = createAndWriteUniformBuffer(
pipeline.getBindGroupLayout(1),
ObjectUniformBuffer{
.model_matrix = zm.transpose(model_matrix),
.color = color,
},
);
// Pick a "random" primitive to use for this object.
const primitive_index = app.random.int(usize) % primitive_data.len;
const primitive = &primitive_data[primitive_index];
// The `*object` syntax gets us a pointer to each element in the
// `object_data` slice, allowing us to override it within the loop.
object.* = .{
.uniform_buffer = object_uniform.buffer,
.uniform_bind_group = object_uniform.bind_group,
.primitive = primitive,
};
}
const result = try app.allocator.create(Renderer);
result.* = .{
.app = app,
.depth_texture = depth_texture,
.depth_texture_view = depth_texture_view,
.pipeline = pipeline,
.scene_uniform_buffer = scene_uniform.buffer,
.scene_uniform_bind_group = scene_uniform.bind_group,
.primitive_data = primitive_data,
.object_data = object_data,
};
return result;
}
pub fn deinit(self: *Renderer) void {
// Using `defer` here, so we can specify them
// in the order they were created in `init`.
defer self.app.allocator.destroy(self);
defer self.depth_texture.release();
defer self.depth_texture_view.release();
defer self.pipeline.release();
defer self.scene_uniform_buffer.release();
defer self.scene_uniform_bind_group.release();
defer self.app.allocator.free(self.primitive_data);
defer for (self.primitive_data) |p| {
p.vertex_buffer.release();
p.index_buffer.release();
};
defer self.app.allocator.free(self.object_data);
defer for (self.object_data) |o| {
o.uniform_buffer.release();
o.uniform_bind_group.release();
};
}
pub fn update(self: *Renderer) void {
// Set up a view matrix from the camera transform.
// This moves everything to be relative to the camera.
// TODO: Actually implement camera transform instead of hardcoding a look-at matrix.
// const view_matrix = zm.inverse(app.camera_transform);
const time = self.app.app_timer.read();
const camera_distance = 8.0;
const x = @cos(time * std.math.tau / 20) * camera_distance;
const z = @sin(time * std.math.tau / 20) * camera_distance;
const camera_pos = vec(x, 2.0, z, 1.0);
const view_matrix = zm.lookAtLh(camera_pos, vec(0, 0, 0, 1), vec(0, 1, 0, 1));
// Set up a projection matrix using the size of the window.
// The perspective projection will make things further away appear smaller.
const width: f32 = @floatFromInt(core.descriptor.width);
const height: f32 = @floatFromInt(core.descriptor.height);
const field_of_view = std.math.degreesToRadians(f32, 45.0);
const proj_matrix = zm.perspectiveFovLh(field_of_view, width / height, 0.05, 80.0);
const view_proj_matrix = zm.mul(view_matrix, proj_matrix);
// Get back buffer texture to render to.
const back_buffer_view = core.swap_chain.getCurrentTextureView().?;
defer back_buffer_view.release();
// Once rendering is done (hence `defer`), swap back buffer to the front to display.
defer core.swap_chain.present();
const render_pass_info = gpu.RenderPassDescriptor.init(.{
.color_attachments = &.{.{
.view = back_buffer_view,
.clear_value = std.mem.zeroes(gpu.Color),
.load_op = .clear,
.store_op = .store,
}},
.depth_stencil_attachment = &.{
.view = self.depth_texture_view,
.depth_load_op = .clear,
.depth_store_op = .store,
.depth_clear_value = 1.0,
},
});
// Create a `WGPUCommandEncoder` which provides an interface for recording GPU commands.
const encoder = core.device.createCommandEncoder(null);
defer encoder.release();
// Write to the scene uniform buffer for this set of commands.
encoder.writeBuffer(self.scene_uniform_buffer, 0, &[_]SceneUniformBuffer{.{
// All matrices the GPU has to work with need to be transposed,
// because WebGPU uses column-major matrices while zmath is row-major.
.view_proj_matrix = zm.transpose(view_proj_matrix),
}});
{
const pass = encoder.beginRenderPass(&render_pass_info);
defer pass.release();
defer pass.end();
pass.setPipeline(self.pipeline);
pass.setBindGroup(0, self.scene_uniform_bind_group, &.{});
for (self.object_data) |object| {
// Set the vertex and index buffer used to render this object
// to the primitive it wants to use (either triangle or square).
const prim = object.primitive;
pass.setVertexBuffer(0, prim.vertex_buffer, 0, prim.vertex_count * @sizeOf(VertexData));
pass.setIndexBuffer(prim.index_buffer, .uint32, 0, prim.index_count * @sizeOf(u32));
// Set the bind group for an object we want to render.
pass.setBindGroup(1, object.uniform_bind_group, &.{});
// Draw a number of triangles as specified in the index buffer.
pass.drawIndexed(prim.index_count, 1, 0, 0, 0);
}
}
// Finish recording commands, creating a `WGPUCommandBuffer`.
var command = encoder.finish(null);
defer command.release();
// Submit the command(s) to the GPU.
core.queue.submit(&.{command});
}
/// Creates a buffer on the GPU to store uniform parameter information as
/// well as a bind group with the specified layout pointing to that buffer.
/// Additionally, immediately fills the buffer with the provided data.
pub fn createAndWriteUniformBuffer(
layout: *gpu.BindGroupLayout,
data: anytype,
) struct {
buffer: *gpu.Buffer,
bind_group: *gpu.BindGroup,
} {
const T = @TypeOf(data);
const usage = gpu.Buffer.UsageFlags{ .copy_dst = true, .uniform = true };
const buffer = createAndWriteBuffer(T, &.{data}, usage);
// "Bind groups" are used to associate data from buffers with shader parameters.
// So for example the `scene_uniform_bind_group` is accessible via `scene` in our shader.
// Essentially, buffer = data, and bind group = binding parameter to that data.
const bind_group_entry = gpu.BindGroup.Entry.buffer(0, buffer, 0, @sizeOf(T));
const bind_group_desc = gpu.BindGroup.Descriptor.init(.{ .layout = layout, .entries = &.{bind_group_entry} });
const bind_group = core.device.createBindGroup(&bind_group_desc);
return .{ .buffer = buffer, .bind_group = bind_group };
}
/// Creates a buffer on the GPU with the specified usage
/// flags and immediately fills it with the provided data.
pub fn createAndWriteBuffer(
comptime T: type,
data: []const T,
usage: gpu.Buffer.UsageFlags,
) *gpu.Buffer {
const buffer = core.device.createBuffer(&.{
.size = data.len * @sizeOf(T),
.usage = usage,
.mapped_at_creation = .false,
});
core.queue.writeBuffer(buffer, 0, data);
return buffer;
}
Loading…
Cancel
Save