{examples,shaderexp}: update to new mach/gpu API
Signed-off-by: Stephen Gutekanst <stephen@hexops.com>
This commit is contained in:
parent
852d232335
commit
f299d87aa2
39 changed files with 473 additions and 338 deletions
|
|
@ -33,7 +33,7 @@ struct Light {
|
|||
@group(1) @binding(1) var s_diffuse: sampler;
|
||||
@group(2) @binding(0) var<uniform> light: Light;
|
||||
|
||||
@stage(vertex)
|
||||
@vertex
|
||||
fn vs_main(model: VertexInput, instance: InstanceInput) -> VertexOutput {
|
||||
let model_matrix = mat4x4<f32>(
|
||||
instance.model_matrix_0,
|
||||
|
|
@ -50,7 +50,7 @@ fn vs_main(model: VertexInput, instance: InstanceInput) -> VertexOutput {
|
|||
return out;
|
||||
}
|
||||
|
||||
@stage(fragment)
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
let object_color = textureSample(t_diffuse, s_diffuse, in.tex_coords);
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ struct Light {
|
|||
@group(0) @binding(0) var<uniform> camera: CameraUniform;
|
||||
@group(1) @binding(0) var<uniform> light: Light;
|
||||
|
||||
@stage(vertex)
|
||||
@vertex
|
||||
fn vs_main(model: VertexInput) -> VertexOutput {
|
||||
var out: VertexOutput;
|
||||
let world_pos = vec4<f32>(model.position + light.position.xyz, 1.0);
|
||||
|
|
@ -29,7 +29,7 @@ fn vs_main(model: VertexInput) -> VertexOutput {
|
|||
return out;
|
||||
}
|
||||
|
||||
@stage(fragment)
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
return vec4<f32>(1.0, 1.0, 1.0, 0.5);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ const Quat = zm.Quat;
|
|||
|
||||
pub const App = @This();
|
||||
|
||||
queue: gpu.Queue,
|
||||
queue: *gpu.Queue,
|
||||
cube: Cube,
|
||||
camera: Camera,
|
||||
light: Light,
|
||||
|
|
@ -124,14 +124,15 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
.store_op = .store,
|
||||
};
|
||||
|
||||
const render_pass_descriptor = gpu.RenderPassEncoder.Descriptor{
|
||||
.color_attachments = &.{color_attachment},
|
||||
const render_pass_descriptor = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{color_attachment},
|
||||
.depth_stencil_attachment = &.{
|
||||
.view = app.depth.?.view,
|
||||
.depth_load_op = .clear,
|
||||
.depth_store_op = .store,
|
||||
.stencil_load_op = .none,
|
||||
.stencil_store_op = .none,
|
||||
.stencil_load_op = .undef,
|
||||
.stencil_store_op = .undef,
|
||||
.depth_clear_value = 1.0,
|
||||
},
|
||||
};
|
||||
|
|
@ -193,7 +194,7 @@ const Camera = struct {
|
|||
fovy: f32,
|
||||
near: f32,
|
||||
far: f32,
|
||||
bind_group: gpu.BindGroup,
|
||||
bind_group: *gpu.BindGroup,
|
||||
buffer: Buffer,
|
||||
|
||||
const Uniform = struct {
|
||||
|
|
@ -201,7 +202,7 @@ const Camera = struct {
|
|||
mat: Mat,
|
||||
};
|
||||
|
||||
fn init(device: gpu.Device, eye: Vec, target: Vec, up: Vec, aspect: f32, fovy: f32, near: f32, far: f32) Self {
|
||||
fn init(device: *gpu.Device, eye: Vec, target: Vec, up: Vec, aspect: f32, fovy: f32, near: f32, far: f32) Self {
|
||||
var self: Self = .{
|
||||
.eye = eye,
|
||||
.target = target,
|
||||
|
|
@ -228,6 +229,7 @@ const Camera = struct {
|
|||
|
||||
const bind_group = device.createBindGroup(&gpu.BindGroup.Descriptor{
|
||||
.layout = Self.bindGroupLayout(device),
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, buffer.buffer, 0, buffer.size),
|
||||
},
|
||||
|
|
@ -239,14 +241,14 @@ const Camera = struct {
|
|||
return self;
|
||||
}
|
||||
|
||||
fn update(self: *Self, queue: gpu.Queue) void {
|
||||
fn update(self: *Self, queue: *gpu.Queue) void {
|
||||
const mat = self.buildViewProjMatrix();
|
||||
const uniform = .{
|
||||
.pos = self.eye,
|
||||
.mat = mat,
|
||||
};
|
||||
|
||||
queue.writeBuffer(self.buffer.buffer, 0, Uniform, &.{uniform});
|
||||
queue.writeBuffer(self.buffer.buffer, 0, &[_]Uniform{uniform});
|
||||
}
|
||||
|
||||
inline fn buildViewProjMatrix(s: *const Camera) Mat {
|
||||
|
|
@ -255,9 +257,10 @@ const Camera = struct {
|
|||
return zm.mul(view, proj);
|
||||
}
|
||||
|
||||
inline fn bindGroupLayout(device: gpu.Device) gpu.BindGroupLayout {
|
||||
inline fn bindGroupLayout(device: *gpu.Device) *gpu.BindGroupLayout {
|
||||
const visibility = .{ .vertex = true, .fragment = true };
|
||||
return device.createBindGroupLayout(&gpu.BindGroupLayout.Descriptor{
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroupLayout.Entry{
|
||||
gpu.BindGroupLayout.Entry.buffer(0, visibility, .uniform, false, 0),
|
||||
},
|
||||
|
|
@ -266,7 +269,7 @@ const Camera = struct {
|
|||
};
|
||||
|
||||
const Buffer = struct {
|
||||
buffer: gpu.Buffer,
|
||||
buffer: *gpu.Buffer,
|
||||
size: usize,
|
||||
len: u32 = 0,
|
||||
};
|
||||
|
|
@ -274,7 +277,7 @@ const Buffer = struct {
|
|||
const Cube = struct {
|
||||
const Self = @This();
|
||||
|
||||
pipeline: gpu.RenderPipeline,
|
||||
pipeline: *gpu.RenderPipeline,
|
||||
mesh: Buffer,
|
||||
instance: Buffer,
|
||||
texture: Texture,
|
||||
|
|
@ -326,11 +329,12 @@ const Cube = struct {
|
|||
};
|
||||
}
|
||||
|
||||
fn pipeline(core: *mach.Core) gpu.RenderPipeline {
|
||||
fn pipeline(core: *mach.Core) *gpu.RenderPipeline {
|
||||
const device = core.device;
|
||||
|
||||
const layout_descriptor = gpu.PipelineLayout.Descriptor{
|
||||
.bind_group_layouts = &.{
|
||||
.bind_group_layout_count = 3,
|
||||
.bind_group_layouts = &[_]*gpu.BindGroupLayout{
|
||||
Camera.bindGroupLayout(device),
|
||||
Texture.bindGroupLayout(device),
|
||||
Light.bindGroupLayout(device),
|
||||
|
|
@ -341,7 +345,9 @@ const Cube = struct {
|
|||
defer layout.release();
|
||||
|
||||
const shader = device.createShaderModule(&.{
|
||||
.code = .{ .wgsl = @embedFile("cube.wgsl") },
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("cube.wgsl"),
|
||||
} },
|
||||
});
|
||||
defer shader.release();
|
||||
|
||||
|
|
@ -360,14 +366,15 @@ const Cube = struct {
|
|||
|
||||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.swap_chain_format,
|
||||
.write_mask = gpu.ColorWriteMask.all,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
.blend = &blend,
|
||||
};
|
||||
|
||||
const fragment = gpu.FragmentState{
|
||||
.module = shader,
|
||||
.entry_point = "fs_main",
|
||||
.targets = &.{color_target},
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{color_target},
|
||||
.constants = null,
|
||||
};
|
||||
|
||||
|
|
@ -377,7 +384,8 @@ const Cube = struct {
|
|||
.vertex = .{
|
||||
.module = shader,
|
||||
.entry_point = "vs_main",
|
||||
.buffers = &.{
|
||||
.buffer_count = 2,
|
||||
.buffers = &[_]gpu.VertexBufferLayout{
|
||||
Self.vertexBufferLayout(),
|
||||
Self.instanceLayout(),
|
||||
},
|
||||
|
|
@ -392,14 +400,14 @@ const Cube = struct {
|
|||
.cull_mode = .back,
|
||||
// .cull_mode = .none,
|
||||
.topology = .triangle_strip,
|
||||
.strip_index_format = .none,
|
||||
.strip_index_format = .undef,
|
||||
},
|
||||
};
|
||||
|
||||
return device.createRenderPipeline(&descriptor);
|
||||
}
|
||||
|
||||
fn mesh(device: gpu.Device) Buffer {
|
||||
fn mesh(device: *gpu.Device) Buffer {
|
||||
// generated texture has aspect ratio of 1:2
|
||||
// `h` reflects that ratio
|
||||
// `v` sets how many times texture repeats across surface
|
||||
|
|
@ -517,7 +525,7 @@ const Brick = struct {
|
|||
const W = 12;
|
||||
const H = 6;
|
||||
|
||||
fn texture(device: gpu.Device) Texture {
|
||||
fn texture(device: *gpu.Device) Texture {
|
||||
const slice: []const u8 = &data();
|
||||
return Texture.fromData(device, W, H, u8, slice);
|
||||
}
|
||||
|
|
@ -576,10 +584,10 @@ const Brick = struct {
|
|||
const Texture = struct {
|
||||
const Self = @This();
|
||||
|
||||
texture: gpu.Texture,
|
||||
view: gpu.TextureView,
|
||||
sampler: gpu.Sampler,
|
||||
bind_group: gpu.BindGroup,
|
||||
texture: *gpu.Texture,
|
||||
view: *gpu.TextureView,
|
||||
sampler: *gpu.Sampler,
|
||||
bind_group: *gpu.BindGroup,
|
||||
|
||||
const DEPTH_FORMAT = .depth32_float;
|
||||
const FORMAT = .rgba8_unorm;
|
||||
|
|
@ -590,7 +598,7 @@ const Texture = struct {
|
|||
self.sampler.release();
|
||||
}
|
||||
|
||||
fn fromData(device: gpu.Device, width: u32, height: u32, comptime T: type, data: []const T) Self {
|
||||
fn fromData(device: *gpu.Device, width: u32, height: u32, comptime T: type, data: []const T) Self {
|
||||
const extent = gpu.Extent3D{
|
||||
.width = width,
|
||||
.height = height,
|
||||
|
|
@ -622,7 +630,7 @@ const Texture = struct {
|
|||
.mag_filter = .linear,
|
||||
.min_filter = .linear,
|
||||
.mipmap_filter = .linear,
|
||||
.compare = .none,
|
||||
.compare = .undef,
|
||||
.lod_min_clamp = 0.0,
|
||||
.lod_max_clamp = std.math.f32_max,
|
||||
.max_anisotropy = 1, // 1,2,4,8,16
|
||||
|
|
@ -637,13 +645,13 @@ const Texture = struct {
|
|||
.rows_per_image = height,
|
||||
},
|
||||
&extent,
|
||||
T,
|
||||
data,
|
||||
);
|
||||
|
||||
const bind_group_layout = Self.bindGroupLayout(device);
|
||||
const bind_group = device.createBindGroup(&gpu.BindGroup.Descriptor{
|
||||
.layout = bind_group_layout,
|
||||
.entry_count = 2,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.textureView(0, view),
|
||||
gpu.BindGroup.Entry.sampler(1, sampler),
|
||||
|
|
@ -658,7 +666,7 @@ const Texture = struct {
|
|||
};
|
||||
}
|
||||
|
||||
fn depth(device: gpu.Device, width: u32, height: u32) Self {
|
||||
fn depth(device: *gpu.Device, width: u32, height: u32) Self {
|
||||
const extent = gpu.Extent3D{
|
||||
.width = width,
|
||||
.height = height,
|
||||
|
|
@ -678,7 +686,7 @@ const Texture = struct {
|
|||
|
||||
const view = texture.createView(&gpu.TextureView.Descriptor{
|
||||
.aspect = .all,
|
||||
.format = .none,
|
||||
.format = .undef,
|
||||
.dimension = .dimension_2d,
|
||||
.base_array_layer = 0,
|
||||
.array_layer_count = 1,
|
||||
|
|
@ -707,10 +715,11 @@ const Texture = struct {
|
|||
};
|
||||
}
|
||||
|
||||
inline fn bindGroupLayout(device: gpu.Device) gpu.BindGroupLayout {
|
||||
inline fn bindGroupLayout(device: *gpu.Device) *gpu.BindGroupLayout {
|
||||
const visibility = .{ .fragment = true };
|
||||
const Entry = gpu.BindGroupLayout.Entry;
|
||||
return device.createBindGroupLayout(&gpu.BindGroupLayout.Descriptor{
|
||||
.entry_count = 2,
|
||||
.entries = &[_]Entry{
|
||||
Entry.texture(0, visibility, .float, .dimension_2d, false),
|
||||
Entry.sampler(1, visibility, .filtering),
|
||||
|
|
@ -724,8 +733,8 @@ const Light = struct {
|
|||
|
||||
uniform: Uniform,
|
||||
buffer: Buffer,
|
||||
bind_group: gpu.BindGroup,
|
||||
pipeline: gpu.RenderPipeline,
|
||||
bind_group: *gpu.BindGroup,
|
||||
pipeline: *gpu.RenderPipeline,
|
||||
|
||||
const Uniform = struct {
|
||||
position: Vec,
|
||||
|
|
@ -746,6 +755,7 @@ const Light = struct {
|
|||
|
||||
const bind_group = device.createBindGroup(&gpu.BindGroup.Descriptor{
|
||||
.layout = Self.bindGroupLayout(device),
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, buffer.buffer, 0, buffer.size),
|
||||
},
|
||||
|
|
@ -759,31 +769,33 @@ const Light = struct {
|
|||
};
|
||||
}
|
||||
|
||||
fn update(self: *Self, queue: gpu.Queue, delta: f32) void {
|
||||
fn update(self: *Self, queue: *gpu.Queue, delta: f32) void {
|
||||
const old = self.uniform;
|
||||
const new = Light.Uniform{
|
||||
.position = zm.qmul(zm.quatFromAxisAngle(vec3u(0, 1, 0), delta), old.position),
|
||||
.color = old.color,
|
||||
};
|
||||
queue.writeBuffer(self.buffer.buffer, 0, Light.Uniform, &.{new});
|
||||
queue.writeBuffer(self.buffer.buffer, 0, &[_]Light.Uniform{new});
|
||||
self.uniform = new;
|
||||
}
|
||||
|
||||
inline fn bindGroupLayout(device: gpu.Device) gpu.BindGroupLayout {
|
||||
inline fn bindGroupLayout(device: *gpu.Device) *gpu.BindGroupLayout {
|
||||
const visibility = .{ .vertex = true, .fragment = true };
|
||||
const Entry = gpu.BindGroupLayout.Entry;
|
||||
return device.createBindGroupLayout(&gpu.BindGroupLayout.Descriptor{
|
||||
.entry_count = 1,
|
||||
.entries = &[_]Entry{
|
||||
Entry.buffer(0, visibility, .uniform, false, 0),
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
fn pipeline(core: *mach.Core) gpu.RenderPipeline {
|
||||
fn pipeline(core: *mach.Core) *gpu.RenderPipeline {
|
||||
const device = core.device;
|
||||
|
||||
const layout_descriptor = gpu.PipelineLayout.Descriptor{
|
||||
.bind_group_layouts = &.{
|
||||
.bind_group_layout_count = 2,
|
||||
.bind_group_layouts = &[_]*gpu.BindGroupLayout{
|
||||
Camera.bindGroupLayout(device),
|
||||
Light.bindGroupLayout(device),
|
||||
},
|
||||
|
|
@ -792,8 +804,10 @@ const Light = struct {
|
|||
const layout = device.createPipelineLayout(&layout_descriptor);
|
||||
defer layout.release();
|
||||
|
||||
const shader = device.createShaderModule(&.{
|
||||
.code = .{ .wgsl = @embedFile("light.wgsl") },
|
||||
const shader = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("light.wgsl"),
|
||||
} },
|
||||
});
|
||||
defer shader.release();
|
||||
|
||||
|
|
@ -812,14 +826,15 @@ const Light = struct {
|
|||
|
||||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.swap_chain_format,
|
||||
.write_mask = gpu.ColorWriteMask.all,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
.blend = &blend,
|
||||
};
|
||||
|
||||
const fragment = gpu.FragmentState{
|
||||
.module = shader,
|
||||
.entry_point = "fs_main",
|
||||
.targets = &.{color_target},
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{color_target},
|
||||
.constants = null,
|
||||
};
|
||||
|
||||
|
|
@ -829,7 +844,8 @@ const Light = struct {
|
|||
.vertex = .{
|
||||
.module = shader,
|
||||
.entry_point = "vs_main",
|
||||
.buffers = &.{
|
||||
.buffer_count = 1,
|
||||
.buffers = &[_]gpu.VertexBufferLayout{
|
||||
Cube.vertexBufferLayout(),
|
||||
},
|
||||
},
|
||||
|
|
@ -843,7 +859,7 @@ const Light = struct {
|
|||
.cull_mode = .back,
|
||||
// .cull_mode = .none,
|
||||
.topology = .triangle_strip,
|
||||
.strip_index_format = .none,
|
||||
.strip_index_format = .undef,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -851,7 +867,7 @@ const Light = struct {
|
|||
}
|
||||
};
|
||||
|
||||
inline fn initBuffer(device: gpu.Device, usage: gpu.BufferUsage, data: anytype) gpu.Buffer {
|
||||
inline fn initBuffer(device: *gpu.Device, usage: gpu.Buffer.UsageFlags, data: anytype) *gpu.Buffer {
|
||||
std.debug.assert(@typeInfo(@TypeOf(data)) == .Pointer);
|
||||
const T = std.meta.Elem(@TypeOf(data));
|
||||
|
||||
|
|
@ -864,7 +880,7 @@ inline fn initBuffer(device: gpu.Device, usage: gpu.BufferUsage, data: anytype)
|
|||
});
|
||||
|
||||
var mapped = buffer.getMappedRange(T, 0, data.len);
|
||||
std.mem.copy(T, mapped, data);
|
||||
std.mem.copy(T, mapped.?, data);
|
||||
buffer.unmap();
|
||||
return buffer;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,12 +4,12 @@ const std = @import("std");
|
|||
const mach = @import("mach");
|
||||
const gpu = @import("gpu");
|
||||
|
||||
compute_pipeline: gpu.ComputePipeline,
|
||||
render_pipeline: gpu.RenderPipeline,
|
||||
sprite_vertex_buffer: gpu.Buffer,
|
||||
particle_buffers: [2]gpu.Buffer,
|
||||
particle_bind_groups: [2]gpu.BindGroup,
|
||||
sim_param_buffer: gpu.Buffer,
|
||||
compute_pipeline: *gpu.ComputePipeline,
|
||||
render_pipeline: *gpu.RenderPipeline,
|
||||
sprite_vertex_buffer: *gpu.Buffer,
|
||||
particle_buffers: [2]*gpu.Buffer,
|
||||
particle_bind_groups: [2]*gpu.BindGroup,
|
||||
sim_param_buffer: *gpu.Buffer,
|
||||
frame_counter: usize,
|
||||
|
||||
pub const App = @This();
|
||||
|
|
@ -28,13 +28,17 @@ var sim_params = [_]f32{
|
|||
|
||||
pub fn init(app: *App, core: *mach.Core) !void {
|
||||
const sprite_shader_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("sprite.wgsl"),
|
||||
} },
|
||||
.label = "sprite shader module",
|
||||
.code = .{ .wgsl = @embedFile("sprite.wgsl") },
|
||||
});
|
||||
|
||||
const update_sprite_shader_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("updateSprites.wgsl"),
|
||||
} },
|
||||
.label = "update sprite shader module",
|
||||
.code = .{ .wgsl = @embedFile("updateSprites.wgsl") },
|
||||
});
|
||||
|
||||
const instanced_particles_attributes = [_]gpu.VertexAttribute{
|
||||
|
|
@ -65,6 +69,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.vertex = .{
|
||||
.module = sprite_shader_module,
|
||||
.entry_point = "vert_main",
|
||||
.buffer_count = 2,
|
||||
.buffers = &[_]gpu.VertexBufferLayout{
|
||||
.{
|
||||
// instanced particles buffer
|
||||
|
|
@ -82,11 +87,14 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
},
|
||||
},
|
||||
},
|
||||
.fragment = &gpu.FragmentState{ .module = sprite_shader_module, .entry_point = "frag_main", .targets = &[_]gpu.ColorTargetState{
|
||||
.{
|
||||
.fragment = &gpu.FragmentState{
|
||||
.module = sprite_shader_module,
|
||||
.entry_point = "frag_main",
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{.{
|
||||
.format = core.swap_chain_format,
|
||||
},
|
||||
}},
|
||||
},
|
||||
});
|
||||
|
||||
const compute_pipeline = core.device.createComputePipeline(&gpu.ComputePipeline.Descriptor{ .compute = gpu.ProgrammableStageDescriptor{
|
||||
|
|
@ -100,16 +108,21 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const sprite_vertex_buffer = core.device.createBuffer(&gpu.Buffer.Descriptor{
|
||||
.usage = .{ .vertex = true, .copy_dst = true },
|
||||
.label = "sprite_vertex_buffer",
|
||||
.usage = .{ .vertex = true },
|
||||
.mapped_at_creation = true,
|
||||
.size = vert_buffer_data.len * @sizeOf(f32),
|
||||
});
|
||||
core.device.getQueue().writeBuffer(sprite_vertex_buffer, 0, f32, &vert_buffer_data);
|
||||
var vertex_mapped = sprite_vertex_buffer.getMappedRange(f32, 0, vert_buffer_data.len);
|
||||
std.mem.copy(f32, vertex_mapped.?, vert_buffer_data[0..]);
|
||||
sprite_vertex_buffer.unmap();
|
||||
|
||||
const sim_param_buffer = core.device.createBuffer(&gpu.Buffer.Descriptor{
|
||||
.label = "sim_param_buffer",
|
||||
.usage = .{ .uniform = true, .copy_dst = true },
|
||||
.size = sim_params.len * @sizeOf(f32),
|
||||
});
|
||||
core.device.getQueue().writeBuffer(sim_param_buffer, 0, f32, &sim_params);
|
||||
core.device.getQueue().writeBuffer(sim_param_buffer, 0, sim_params[0..]);
|
||||
|
||||
var initial_particle_data: [num_particle * 4]f32 = undefined;
|
||||
var rng = std.rand.DefaultPrng.init(0);
|
||||
|
|
@ -122,28 +135,35 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
initial_particle_data[4 * i + 3] = 2 * (random.float(f32) - 0.5) * 0.1;
|
||||
}
|
||||
|
||||
var particle_buffers: [2]gpu.Buffer = undefined;
|
||||
var particle_bind_groups: [2]gpu.BindGroup = undefined;
|
||||
var particle_buffers: [2]*gpu.Buffer = undefined;
|
||||
var particle_bind_groups: [2]*gpu.BindGroup = undefined;
|
||||
i = 0;
|
||||
while (i < 2) : (i += 1) {
|
||||
particle_buffers[i] = core.device.createBuffer(&gpu.Buffer.Descriptor{
|
||||
.label = "particle_buffer",
|
||||
.mapped_at_creation = true,
|
||||
.usage = .{
|
||||
.vertex = true,
|
||||
.copy_dst = true,
|
||||
.storage = true,
|
||||
},
|
||||
.size = initial_particle_data.len * @sizeOf(f32),
|
||||
});
|
||||
core.device.getQueue().writeBuffer(particle_buffers[i], 0, f32, &initial_particle_data);
|
||||
var mapped = particle_buffers[i].getMappedRange(f32, 0, initial_particle_data.len);
|
||||
std.mem.copy(f32, mapped.?, initial_particle_data[0..]);
|
||||
particle_buffers[i].unmap();
|
||||
}
|
||||
|
||||
i = 0;
|
||||
while (i < 2) : (i += 1) {
|
||||
particle_bind_groups[i] = core.device.createBindGroup(&gpu.BindGroup.Descriptor{ .layout = compute_pipeline.getBindGroupLayout(0), .entries = &[_]gpu.BindGroup.Entry{
|
||||
particle_bind_groups[i] = core.device.createBindGroup(&gpu.BindGroup.Descriptor{
|
||||
.layout = compute_pipeline.getBindGroupLayout(0),
|
||||
.entry_count = 3,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, sim_param_buffer, 0, sim_params.len * @sizeOf(f32)),
|
||||
gpu.BindGroup.Entry.buffer(1, particle_buffers[i], 0, initial_particle_data.len * @sizeOf(f32)),
|
||||
gpu.BindGroup.Entry.buffer(2, particle_buffers[(i + 1) % 2], 0, initial_particle_data.len * @sizeOf(f32)),
|
||||
} });
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
app.compute_pipeline = compute_pipeline;
|
||||
|
|
@ -167,19 +187,22 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
.store_op = .store,
|
||||
};
|
||||
|
||||
const render_pass_descriptor = gpu.RenderPassEncoder.Descriptor{ .color_attachments = &[_]gpu.RenderPassColorAttachment{
|
||||
const render_pass_descriptor = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{
|
||||
color_attachment,
|
||||
} };
|
||||
},
|
||||
};
|
||||
|
||||
sim_params[0] = @floatCast(f32, core.delta_time);
|
||||
core.device.getQueue().writeBuffer(app.sim_param_buffer, 0, f32, &sim_params);
|
||||
core.device.getQueue().writeBuffer(app.sim_param_buffer, 0, sim_params[0..]);
|
||||
|
||||
const command_encoder = core.device.createCommandEncoder(null);
|
||||
{
|
||||
const pass_encoder = command_encoder.beginComputePass(null);
|
||||
pass_encoder.setPipeline(app.compute_pipeline);
|
||||
pass_encoder.setBindGroup(0, app.particle_bind_groups[app.frame_counter % 2], null);
|
||||
pass_encoder.dispatch(@floatToInt(u32, @ceil(@as(f32, num_particle) / 64)), 1, 1);
|
||||
pass_encoder.dispatchWorkgroups(@floatToInt(u32, @ceil(@as(f32, num_particle) / 64)), 1, 1);
|
||||
pass_encoder.end();
|
||||
pass_encoder.release();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
@stage(vertex)
|
||||
@vertex
|
||||
fn vert_main(@location(0) a_particlePos : vec2<f32>,
|
||||
@location(1) a_particleVel : vec2<f32>,
|
||||
@location(2) a_pos : vec2<f32>) -> @builtin(position) vec4<f32> {
|
||||
|
|
@ -9,7 +9,7 @@ fn vert_main(@location(0) a_particlePos : vec2<f32>,
|
|||
return vec4<f32>(pos + a_particlePos, 0.0, 1.0);
|
||||
}
|
||||
|
||||
@stage(fragment)
|
||||
@fragment
|
||||
fn frag_main() -> @location(0) vec4<f32> {
|
||||
return vec4<f32>(1.0, 1.0, 1.0, 1.0);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,25 +1,25 @@
|
|||
struct Particle {
|
||||
pos : vec2<f32>;
|
||||
vel : vec2<f32>;
|
||||
pos : vec2<f32>,
|
||||
vel : vec2<f32>,
|
||||
};
|
||||
struct SimParams {
|
||||
deltaT : f32;
|
||||
rule1Distance : f32;
|
||||
rule2Distance : f32;
|
||||
rule3Distance : f32;
|
||||
rule1Scale : f32;
|
||||
rule2Scale : f32;
|
||||
rule3Scale : f32;
|
||||
deltaT : f32,
|
||||
rule1Distance : f32,
|
||||
rule2Distance : f32,
|
||||
rule3Distance : f32,
|
||||
rule1Scale : f32,
|
||||
rule2Scale : f32,
|
||||
rule3Scale : f32,
|
||||
};
|
||||
struct Particles {
|
||||
particles : array<Particle>;
|
||||
particles : array<Particle>,
|
||||
};
|
||||
@binding(0) @group(0) var<uniform> params : SimParams;
|
||||
@binding(1) @group(0) var<storage, read> particlesA : Particles;
|
||||
@binding(2) @group(0) var<storage, read_write> particlesB : Particles;
|
||||
|
||||
// https://github.com/austinEng/Project6-Vulkan-Flocking/blob/master/data/shaders/computeparticles/particle.comp
|
||||
@stage(compute) @workgroup_size(64)
|
||||
@compute @workgroup_size(64)
|
||||
fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
|
||||
var index : u32 = GlobalInvocationID.x;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
@binding(1) @group(0) var mySampler: sampler;
|
||||
@binding(2) @group(0) var myTexture: texture_2d<f32>;
|
||||
|
||||
@stage(fragment) fn main(
|
||||
@fragment fn main(
|
||||
@location(0) fragUV: vec2<f32>,
|
||||
@location(1) fragPosition: vec4<f32>
|
||||
) -> @location(0) vec4<f32> {
|
||||
|
|
|
|||
|
|
@ -24,19 +24,19 @@ const UniformBufferObject = struct {
|
|||
|
||||
var timer: mach.Timer = undefined;
|
||||
|
||||
pipeline: gpu.RenderPipeline,
|
||||
queue: gpu.Queue,
|
||||
vertex_buffer: gpu.Buffer,
|
||||
uniform_buffer: gpu.Buffer,
|
||||
bind_group: gpu.BindGroup,
|
||||
depth_texture: ?gpu.Texture,
|
||||
depth_texture_view: gpu.TextureView,
|
||||
cube_texture: gpu.Texture,
|
||||
cube_texture_view: gpu.TextureView,
|
||||
cube_texture_render: gpu.Texture,
|
||||
cube_texture_view_render: gpu.TextureView,
|
||||
sampler: gpu.Sampler,
|
||||
bgl: gpu.BindGroupLayout,
|
||||
pipeline: *gpu.RenderPipeline,
|
||||
queue: *gpu.Queue,
|
||||
vertex_buffer: *gpu.Buffer,
|
||||
uniform_buffer: *gpu.Buffer,
|
||||
bind_group: *gpu.BindGroup,
|
||||
depth_texture: ?*gpu.Texture,
|
||||
depth_texture_view: *gpu.TextureView,
|
||||
cube_texture: *gpu.Texture,
|
||||
cube_texture_view: *gpu.TextureView,
|
||||
cube_texture_render: *gpu.Texture,
|
||||
cube_texture_view_render: *gpu.TextureView,
|
||||
sampler: *gpu.Sampler,
|
||||
bgl: *gpu.BindGroupLayout,
|
||||
|
||||
pub fn init(app: *App, core: *mach.Core) !void {
|
||||
timer = try mach.Timer.start();
|
||||
|
|
@ -46,8 +46,10 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
});
|
||||
|
||||
const vs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("vert.wgsl"),
|
||||
} },
|
||||
.label = "my vertex shader",
|
||||
.code = .{ .wgsl = @embedFile("vert.wgsl") },
|
||||
});
|
||||
|
||||
const vertex_attributes = [_]gpu.VertexAttribute{
|
||||
|
|
@ -62,8 +64,10 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const fs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("frag.wgsl"),
|
||||
} },
|
||||
.label = "my fragment shader",
|
||||
.code = .{ .wgsl = @embedFile("frag.wgsl") },
|
||||
});
|
||||
|
||||
const blend = gpu.BlendState{
|
||||
|
|
@ -81,12 +85,13 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.swap_chain_format,
|
||||
.blend = &blend,
|
||||
.write_mask = gpu.ColorWriteMask.all,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
};
|
||||
const fragment = gpu.FragmentState{
|
||||
.module = fs_module,
|
||||
.entry_point = "main",
|
||||
.targets = &.{color_target},
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{color_target},
|
||||
.constants = null,
|
||||
};
|
||||
|
||||
|
|
@ -95,12 +100,14 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const bgle_textureview = gpu.BindGroupLayout.Entry.texture(2, .{ .fragment = true }, .float, .dimension_2d, false);
|
||||
const bgl = core.device.createBindGroupLayout(
|
||||
&gpu.BindGroupLayout.Descriptor{
|
||||
.entries = &.{ bgle_buffer, bgle_sampler, bgle_textureview },
|
||||
.entry_count = 3,
|
||||
.entries = &[_]gpu.BindGroupLayout.Entry{ bgle_buffer, bgle_sampler, bgle_textureview },
|
||||
},
|
||||
);
|
||||
|
||||
const bind_group_layouts = [_]gpu.BindGroupLayout{bgl};
|
||||
const bind_group_layouts = [_]*gpu.BindGroupLayout{bgl};
|
||||
const pipeline_layout = core.device.createPipelineLayout(&.{
|
||||
.bind_group_layout_count = 1,
|
||||
.bind_group_layouts = &bind_group_layouts,
|
||||
});
|
||||
|
||||
|
|
@ -115,7 +122,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.vertex = .{
|
||||
.module = vs_module,
|
||||
.entry_point = "main",
|
||||
.buffers = &.{vertex_buffer_layout},
|
||||
.buffer_count = 1,
|
||||
.buffers = &[_]gpu.VertexBufferLayout{vertex_buffer_layout},
|
||||
},
|
||||
.multisample = .{
|
||||
.count = 1,
|
||||
|
|
@ -126,7 +134,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.front_face = .ccw,
|
||||
.cull_mode = .back,
|
||||
.topology = .triangle_list,
|
||||
.strip_index_format = .none,
|
||||
.strip_index_format = .undef,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -136,7 +144,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.mapped_at_creation = true,
|
||||
});
|
||||
var vertex_mapped = vertex_buffer.getMappedRange(Vertex, 0, vertices.len);
|
||||
std.mem.copy(Vertex, vertex_mapped, vertices[0..]);
|
||||
std.mem.copy(Vertex, vertex_mapped.?, vertices[0..]);
|
||||
vertex_buffer.unmap();
|
||||
|
||||
const uniform_buffer = core.device.createBuffer(&.{
|
||||
|
|
@ -179,7 +187,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const bind_group = core.device.createBindGroup(
|
||||
&gpu.BindGroup.Descriptor{
|
||||
.layout = bgl,
|
||||
.entries = &.{
|
||||
.entry_count = 3,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, uniform_buffer, 0, @sizeOf(UniformBufferObject)),
|
||||
gpu.BindGroup.Entry.sampler(1, sampler),
|
||||
gpu.BindGroup.Entry.textureView(2, cube_texture_view),
|
||||
|
|
@ -254,17 +263,19 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
.depth_load_op = .clear,
|
||||
.depth_store_op = .store,
|
||||
.depth_clear_value = 1.0,
|
||||
.stencil_load_op = .none,
|
||||
.stencil_store_op = .none,
|
||||
.stencil_load_op = .undef,
|
||||
.stencil_store_op = .undef,
|
||||
};
|
||||
|
||||
const encoder = core.device.createCommandEncoder(null);
|
||||
const cube_render_pass_info = gpu.RenderPassEncoder.Descriptor{
|
||||
.color_attachments = &.{cube_color_attachment},
|
||||
const cube_render_pass_info = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{cube_color_attachment},
|
||||
.depth_stencil_attachment = &depth_stencil_attachment,
|
||||
};
|
||||
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
|
||||
.color_attachments = &.{color_attachment},
|
||||
const render_pass_info = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{color_attachment},
|
||||
.depth_stencil_attachment = &depth_stencil_attachment,
|
||||
};
|
||||
|
||||
|
|
@ -285,7 +296,7 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
const ubo = UniformBufferObject{
|
||||
.mat = zm.transpose(zm.mul(zm.mul(model, view), proj)),
|
||||
};
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, UniformBufferObject, &.{ubo});
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, &[_]UniformBufferObject{ubo});
|
||||
}
|
||||
|
||||
const pass = encoder.beginRenderPass(&render_pass_info);
|
||||
|
|
@ -372,7 +383,8 @@ pub fn resize(app: *App, core: *mach.Core, width: u32, height: u32) !void {
|
|||
app.bind_group = core.device.createBindGroup(
|
||||
&gpu.BindGroup.Descriptor{
|
||||
.layout = app.bgl,
|
||||
.entries = &.{
|
||||
.entry_count = 3,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, app.uniform_buffer, 0, @sizeOf(UniformBufferObject)),
|
||||
gpu.BindGroup.Entry.sampler(1, app.sampler),
|
||||
gpu.BindGroup.Entry.textureView(2, app.cube_texture_view),
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ struct VertexOut {
|
|||
@location(1) fragPosition: vec4<f32>,
|
||||
}
|
||||
|
||||
@stage(vertex) fn main(
|
||||
@vertex fn main(
|
||||
@location(0) position : vec4<f32>,
|
||||
@location(1) uv: vec2<f32>
|
||||
) -> VertexOut {
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ struct FragUniform {
|
|||
@binding(2) @group(0) var mySampler: sampler;
|
||||
@binding(3) @group(0) var myTexture: texture_2d<f32>;
|
||||
|
||||
@stage(fragment) fn main(
|
||||
@fragment fn main(
|
||||
@location(0) uv: vec2<f32>,
|
||||
@interpolate(linear) @location(1) bary: vec2<f32>,
|
||||
@interpolate(flat) @location(2) triangle_index: u32,
|
||||
|
|
|
|||
|
|
@ -18,17 +18,17 @@ pub const App = @This();
|
|||
|
||||
const AtlasRGB8 = Atlas(zigimg.color.Rgba32);
|
||||
|
||||
pipeline: gpu.RenderPipeline,
|
||||
queue: gpu.Queue,
|
||||
vertex_buffer: gpu.Buffer,
|
||||
pipeline: *gpu.RenderPipeline,
|
||||
queue: *gpu.Queue,
|
||||
vertex_buffer: *gpu.Buffer,
|
||||
vertices: std.ArrayList(draw.Vertex),
|
||||
update_vertex_buffer: bool,
|
||||
vertex_uniform_buffer: gpu.Buffer,
|
||||
vertex_uniform_buffer: *gpu.Buffer,
|
||||
update_vertex_uniform_buffer: bool,
|
||||
frag_uniform_buffer: gpu.Buffer,
|
||||
frag_uniform_buffer: *gpu.Buffer,
|
||||
fragment_uniform_list: std.ArrayList(draw.FragUniform),
|
||||
update_frag_uniform_buffer: bool,
|
||||
bind_group: gpu.BindGroup,
|
||||
bind_group: *gpu.BindGroup,
|
||||
texture_atlas_data: AtlasRGB8,
|
||||
|
||||
pub fn init(app: *App, core: *mach.Core) !void {
|
||||
|
|
@ -110,7 +110,6 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
&.{ .texture = texture },
|
||||
&data_layout,
|
||||
&.{ .width = app.texture_atlas_data.size, .height = app.texture_atlas_data.size },
|
||||
zigimg.color.Rgba32,
|
||||
app.texture_atlas_data.data,
|
||||
);
|
||||
|
||||
|
|
@ -130,13 +129,17 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
// try draw.circle(app, .{ window_width / 2, window_height / 2 }, window_height / 2 - 10, .{ 0, 0.5, 0.75, 1.0 }, white_texture_uv_data);
|
||||
|
||||
const vs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("vert.wgsl"),
|
||||
} },
|
||||
.label = "my vertex shader",
|
||||
.code = .{ .wgsl = @embedFile("vert.wgsl") },
|
||||
});
|
||||
|
||||
const fs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("frag.wgsl"),
|
||||
} },
|
||||
.label = "my fragment shader",
|
||||
.code = .{ .wgsl = @embedFile("frag.wgsl") },
|
||||
});
|
||||
|
||||
const blend = gpu.BlendState{
|
||||
|
|
@ -155,12 +158,13 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.swap_chain_format,
|
||||
.blend = &blend,
|
||||
.write_mask = gpu.ColorWriteMask.all,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
};
|
||||
const fragment = gpu.FragmentState{
|
||||
.module = fs_module,
|
||||
.entry_point = "main",
|
||||
.targets = &.{color_target},
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{color_target},
|
||||
.constants = null,
|
||||
};
|
||||
|
||||
|
|
@ -170,11 +174,13 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const tbgle = gpu.BindGroupLayout.Entry.texture(3, .{ .fragment = true }, .float, .dimension_2d, false);
|
||||
const bgl = core.device.createBindGroupLayout(
|
||||
&gpu.BindGroupLayout.Descriptor{
|
||||
.entries = &.{ vbgle, fbgle, sbgle, tbgle },
|
||||
.entry_count = 4,
|
||||
.entries = &[_]gpu.BindGroupLayout.Entry{ vbgle, fbgle, sbgle, tbgle },
|
||||
},
|
||||
);
|
||||
const bind_group_layouts = [_]gpu.BindGroupLayout{bgl};
|
||||
const bind_group_layouts = [_]*gpu.BindGroupLayout{bgl};
|
||||
const pipeline_layout = core.device.createPipelineLayout(&.{
|
||||
.bind_group_layout_count = 1,
|
||||
.bind_group_layouts = &bind_group_layouts,
|
||||
});
|
||||
|
||||
|
|
@ -185,7 +191,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.vertex = .{
|
||||
.module = vs_module,
|
||||
.entry_point = "main",
|
||||
.buffers = &.{draw.VERTEX_BUFFER_LAYOUT},
|
||||
.buffer_count = 1,
|
||||
.buffers = &[_]gpu.VertexBufferLayout{draw.VERTEX_BUFFER_LAYOUT},
|
||||
},
|
||||
.multisample = .{
|
||||
.count = 1,
|
||||
|
|
@ -196,7 +203,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.front_face = .ccw,
|
||||
.cull_mode = .none,
|
||||
.topology = .triangle_list,
|
||||
.strip_index_format = .none,
|
||||
.strip_index_format = .undef,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -226,7 +233,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const bind_group = core.device.createBindGroup(
|
||||
&gpu.BindGroup.Descriptor{
|
||||
.layout = bgl,
|
||||
.entries = &.{
|
||||
.entry_count = 4,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, vertex_uniform_buffer, 0, @sizeOf(draw.VertexUniform)),
|
||||
gpu.BindGroup.Entry.buffer(1, frag_uniform_buffer, 0, @sizeOf(draw.FragUniform) * app.vertices.items.len / 3),
|
||||
gpu.BindGroup.Entry.sampler(2, sampler),
|
||||
|
|
@ -282,21 +290,22 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const encoder = core.device.createCommandEncoder(null);
|
||||
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
|
||||
.color_attachments = &.{color_attachment},
|
||||
const render_pass_info = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{color_attachment},
|
||||
};
|
||||
|
||||
{
|
||||
if (app.update_vertex_buffer) {
|
||||
encoder.writeBuffer(app.vertex_buffer, 0, draw.Vertex, app.vertices.items);
|
||||
encoder.writeBuffer(app.vertex_buffer, 0, app.vertices.items);
|
||||
app.update_vertex_buffer = false;
|
||||
}
|
||||
if (app.update_frag_uniform_buffer) {
|
||||
encoder.writeBuffer(app.frag_uniform_buffer, 0, draw.FragUniform, app.fragment_uniform_list.items);
|
||||
encoder.writeBuffer(app.frag_uniform_buffer, 0, app.fragment_uniform_list.items);
|
||||
app.update_frag_uniform_buffer = false;
|
||||
}
|
||||
if (app.update_vertex_uniform_buffer) {
|
||||
encoder.writeBuffer(app.vertex_uniform_buffer, 0, draw.VertexUniform, &.{try getVertexUniformBufferObject(core)});
|
||||
encoder.writeBuffer(app.vertex_uniform_buffer, 0, &[_]draw.VertexUniform{try getVertexUniformBufferObject(core)});
|
||||
app.update_vertex_uniform_buffer = false;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ struct VertexOut {
|
|||
@interpolate(flat) @location(2) triangle_index: u32,
|
||||
}
|
||||
|
||||
@stage(vertex) fn main(
|
||||
@vertex fn main(
|
||||
@builtin(vertex_index) vertex_index: u32,
|
||||
@location(0) position: vec4<f32>,
|
||||
@location(1) uv: vec2<f32>,
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ struct Flip {
|
|||
|
||||
var<workgroup> tile : array<array<vec3<f32>, 128>, 4>;
|
||||
|
||||
@stage(compute) @workgroup_size(32, 1, 1)
|
||||
@compute @workgroup_size(32, 1, 1)
|
||||
fn main(
|
||||
@builtin(workgroup_id) WorkGroupID : vec3<u32>,
|
||||
@builtin(local_invocation_id) LocalInvocationID : vec3<u32>
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ struct VertexOutput {
|
|||
@location(0) fragUV : vec2<f32>,
|
||||
}
|
||||
|
||||
@stage(vertex)
|
||||
@vertex
|
||||
fn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {
|
||||
var pos = array<vec2<f32>, 6>(
|
||||
vec2<f32>( 1.0, 1.0),
|
||||
|
|
@ -32,7 +32,7 @@ fn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {
|
|||
return output;
|
||||
}
|
||||
|
||||
@stage(fragment)
|
||||
@fragment
|
||||
fn frag_main(@location(0) fragUV : vec2<f32>) -> @location(0) vec4<f32> {
|
||||
return textureSample(myTexture, mySampler, fragUV);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,17 +3,17 @@ const mach = @import("mach");
|
|||
const gpu = @import("gpu");
|
||||
const zigimg = @import("zigimg");
|
||||
|
||||
queue: gpu.Queue,
|
||||
blur_pipeline: gpu.ComputePipeline,
|
||||
fullscreen_quad_pipeline: gpu.RenderPipeline,
|
||||
cube_texture: gpu.Texture,
|
||||
textures: [2]gpu.Texture,
|
||||
blur_params_buffer: gpu.Buffer,
|
||||
compute_constants: gpu.BindGroup,
|
||||
compute_bind_group_0: gpu.BindGroup,
|
||||
compute_bind_group_1: gpu.BindGroup,
|
||||
compute_bind_group_2: gpu.BindGroup,
|
||||
show_result_bind_group: gpu.BindGroup,
|
||||
queue: *gpu.Queue,
|
||||
blur_pipeline: *gpu.ComputePipeline,
|
||||
fullscreen_quad_pipeline: *gpu.RenderPipeline,
|
||||
cube_texture: *gpu.Texture,
|
||||
textures: [2]*gpu.Texture,
|
||||
blur_params_buffer: *gpu.Buffer,
|
||||
compute_constants: *gpu.BindGroup,
|
||||
compute_bind_group_0: *gpu.BindGroup,
|
||||
compute_bind_group_1: *gpu.BindGroup,
|
||||
compute_bind_group_2: *gpu.BindGroup,
|
||||
show_result_bind_group: *gpu.BindGroup,
|
||||
img_size: gpu.Extent3D,
|
||||
|
||||
pub const App = @This();
|
||||
|
|
@ -35,8 +35,10 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
});
|
||||
|
||||
const blur_shader_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("blur.wgsl"),
|
||||
} },
|
||||
.label = "blur shader module",
|
||||
.code = .{ .wgsl = @embedFile("blur.wgsl") },
|
||||
});
|
||||
|
||||
const blur_pipeline_descriptor = gpu.ComputePipeline.Descriptor{
|
||||
|
|
@ -49,13 +51,17 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const blur_pipeline = core.device.createComputePipeline(&blur_pipeline_descriptor);
|
||||
|
||||
const fullscreen_quad_vs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("fullscreen_textured_quad.wgsl"),
|
||||
} },
|
||||
.label = "fullscreen quad vertex shader",
|
||||
.code = .{ .wgsl = @embedFile("fullscreen_textured_quad.wgsl") },
|
||||
});
|
||||
|
||||
const fullscreen_quad_fs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("fullscreen_textured_quad.wgsl"),
|
||||
} },
|
||||
.label = "fullscreen quad fragment shader",
|
||||
.code = .{ .wgsl = @embedFile("fullscreen_textured_quad.wgsl") },
|
||||
});
|
||||
|
||||
const blend = gpu.BlendState{
|
||||
|
|
@ -74,13 +80,14 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.swap_chain_format,
|
||||
.blend = &blend,
|
||||
.write_mask = gpu.ColorWriteMask.all,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
};
|
||||
|
||||
const fragment_state = gpu.FragmentState{
|
||||
.module = fullscreen_quad_fs_module,
|
||||
.entry_point = "frag_main",
|
||||
.targets = &.{color_target},
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{color_target},
|
||||
.constants = null,
|
||||
};
|
||||
|
||||
|
|
@ -122,16 +129,16 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
switch (img.pixels.?) {
|
||||
.Rgba32 => |pixels| queue.writeTexture(&.{ .texture = cube_texture }, &data_layout, &img_size, zigimg.color.Rgba32, pixels),
|
||||
.Rgba32 => |pixels| queue.writeTexture(&.{ .texture = cube_texture }, &data_layout, &img_size, pixels),
|
||||
.Rgb24 => |pixels| {
|
||||
const data = try rgb24ToRgba32(core.allocator, pixels);
|
||||
defer data.deinit(core.allocator);
|
||||
queue.writeTexture(&.{ .texture = cube_texture }, &data_layout, &img_size, zigimg.color.Rgba32, data.Rgba32);
|
||||
queue.writeTexture(&.{ .texture = cube_texture }, &data_layout, &img_size, data.Rgba32);
|
||||
},
|
||||
else => @panic("unsupported image color format"),
|
||||
}
|
||||
|
||||
var textures: [2]gpu.Texture = undefined;
|
||||
var textures: [2]*gpu.Texture = undefined;
|
||||
for (textures) |_, i| {
|
||||
textures[i] = core.device.createTexture(&.{
|
||||
.size = img_size,
|
||||
|
|
@ -146,7 +153,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
|
||||
// the shader blurs the input texture in one direction,
|
||||
// depending on whether flip value is 0 or 1
|
||||
var flip: [2]gpu.Buffer = undefined;
|
||||
var flip: [2]*gpu.Buffer = undefined;
|
||||
for (flip) |_, i| {
|
||||
const buffer = core.device.createBuffer(&.{
|
||||
.usage = .{ .uniform = true },
|
||||
|
|
@ -155,7 +162,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
});
|
||||
|
||||
const buffer_mapped = buffer.getMappedRange(u32, 0, 1);
|
||||
buffer_mapped[0] = @intCast(u32, i);
|
||||
buffer_mapped.?[0] = @intCast(u32, i);
|
||||
buffer.unmap();
|
||||
|
||||
flip[i] = buffer;
|
||||
|
|
@ -168,6 +175,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
|
||||
const compute_constants = core.device.createBindGroup(&gpu.BindGroup.Descriptor{
|
||||
.layout = blur_pipeline.getBindGroupLayout(0),
|
||||
.entry_count = 2,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.sampler(0, sampler),
|
||||
gpu.BindGroup.Entry.buffer(1, blur_params_buffer, 0, 8),
|
||||
|
|
@ -176,6 +184,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
|
||||
const compute_bind_group_0 = core.device.createBindGroup(&gpu.BindGroup.Descriptor{
|
||||
.layout = blur_pipeline.getBindGroupLayout(1),
|
||||
.entry_count = 3,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.textureView(1, cube_texture.createView(&gpu.TextureView.Descriptor{})),
|
||||
gpu.BindGroup.Entry.textureView(2, textures[0].createView(&gpu.TextureView.Descriptor{})),
|
||||
|
|
@ -185,6 +194,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
|
||||
const compute_bind_group_1 = core.device.createBindGroup(&gpu.BindGroup.Descriptor{
|
||||
.layout = blur_pipeline.getBindGroupLayout(1),
|
||||
.entry_count = 3,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.textureView(1, textures[0].createView(&gpu.TextureView.Descriptor{})),
|
||||
gpu.BindGroup.Entry.textureView(2, textures[1].createView(&gpu.TextureView.Descriptor{})),
|
||||
|
|
@ -194,6 +204,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
|
||||
const compute_bind_group_2 = core.device.createBindGroup(&gpu.BindGroup.Descriptor{
|
||||
.layout = blur_pipeline.getBindGroupLayout(1),
|
||||
.entry_count = 3,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.textureView(1, textures[1].createView(&gpu.TextureView.Descriptor{})),
|
||||
gpu.BindGroup.Entry.textureView(2, textures[0].createView(&gpu.TextureView.Descriptor{})),
|
||||
|
|
@ -203,6 +214,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
|
||||
const show_result_bind_group = core.device.createBindGroup(&gpu.BindGroup.Descriptor{
|
||||
.layout = fullscreen_quad_pipeline.getBindGroupLayout(0),
|
||||
.entry_count = 2,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.sampler(0, sampler),
|
||||
gpu.BindGroup.Entry.textureView(1, textures[1].createView(&gpu.TextureView.Descriptor{})),
|
||||
|
|
@ -210,7 +222,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
});
|
||||
|
||||
const blur_params_buffer_data = [_]u32{ filter_size, block_dimension };
|
||||
queue.writeBuffer(blur_params_buffer, 0, u32, &blur_params_buffer_data);
|
||||
queue.writeBuffer(blur_params_buffer, 0, &blur_params_buffer_data);
|
||||
|
||||
app.queue = queue;
|
||||
app.blur_pipeline = blur_pipeline;
|
||||
|
|
@ -239,18 +251,18 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
const width: u32 = @intCast(u32, app.img_size.width);
|
||||
const height: u32 = @intCast(u32, app.img_size.height);
|
||||
compute_pass.setBindGroup(1, app.compute_bind_group_0, &.{});
|
||||
compute_pass.dispatch(try std.math.divCeil(u32, width, block_dimension), try std.math.divCeil(u32, height, batch[1]), 1);
|
||||
compute_pass.dispatchWorkgroups(try std.math.divCeil(u32, width, block_dimension), try std.math.divCeil(u32, height, batch[1]), 1);
|
||||
|
||||
compute_pass.setBindGroup(1, app.compute_bind_group_1, &.{});
|
||||
compute_pass.dispatch(try std.math.divCeil(u32, height, block_dimension), try std.math.divCeil(u32, width, batch[1]), 1);
|
||||
compute_pass.dispatchWorkgroups(try std.math.divCeil(u32, height, block_dimension), try std.math.divCeil(u32, width, batch[1]), 1);
|
||||
|
||||
var i: u32 = 0;
|
||||
while (i < iterations - 1) : (i += 1) {
|
||||
compute_pass.setBindGroup(1, app.compute_bind_group_2, &.{});
|
||||
compute_pass.dispatch(try std.math.divCeil(u32, width, block_dimension), try std.math.divCeil(u32, height, batch[1]), 1);
|
||||
compute_pass.dispatchWorkgroups(try std.math.divCeil(u32, width, block_dimension), try std.math.divCeil(u32, height, batch[1]), 1);
|
||||
|
||||
compute_pass.setBindGroup(1, app.compute_bind_group_1, &.{});
|
||||
compute_pass.dispatch(try std.math.divCeil(u32, height, block_dimension), try std.math.divCeil(u32, width, batch[1]), 1);
|
||||
compute_pass.dispatchWorkgroups(try std.math.divCeil(u32, height, block_dimension), try std.math.divCeil(u32, width, batch[1]), 1);
|
||||
}
|
||||
compute_pass.end();
|
||||
|
||||
|
|
@ -262,9 +274,12 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
.store_op = .store,
|
||||
};
|
||||
|
||||
const render_pass_descriptor = gpu.RenderPassEncoder.Descriptor{ .color_attachments = &[_]gpu.RenderPassColorAttachment{
|
||||
const render_pass_descriptor = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{
|
||||
color_attachment,
|
||||
} };
|
||||
},
|
||||
};
|
||||
|
||||
const render_pass = encoder.beginRenderPass(&render_pass_descriptor);
|
||||
render_pass.setPipeline(app.fullscreen_quad_pipeline);
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
@stage(fragment) fn main(
|
||||
@fragment fn main(
|
||||
@location(0) fragUV: vec2<f32>,
|
||||
@location(1) fragPosition: vec4<f32>
|
||||
) -> @location(0) vec4<f32> {
|
||||
|
|
|
|||
|
|
@ -12,11 +12,11 @@ const UniformBufferObject = struct {
|
|||
|
||||
var timer: mach.Timer = undefined;
|
||||
|
||||
pipeline: gpu.RenderPipeline,
|
||||
queue: gpu.Queue,
|
||||
vertex_buffer: gpu.Buffer,
|
||||
uniform_buffer: gpu.Buffer,
|
||||
bind_group: gpu.BindGroup,
|
||||
pipeline: *gpu.RenderPipeline,
|
||||
queue: *gpu.Queue,
|
||||
vertex_buffer: *gpu.Buffer,
|
||||
uniform_buffer: *gpu.Buffer,
|
||||
bind_group: *gpu.BindGroup,
|
||||
|
||||
pub const App = @This();
|
||||
|
||||
|
|
@ -28,8 +28,10 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
});
|
||||
|
||||
const vs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("vert.wgsl"),
|
||||
} },
|
||||
.label = "my vertex shader",
|
||||
.code = .{ .wgsl = @embedFile("vert.wgsl") },
|
||||
});
|
||||
|
||||
const vertex_attributes = [_]gpu.VertexAttribute{
|
||||
|
|
@ -44,31 +46,36 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const fs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("frag.wgsl"),
|
||||
} },
|
||||
.label = "my fragment shader",
|
||||
.code = .{ .wgsl = @embedFile("frag.wgsl") },
|
||||
});
|
||||
|
||||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.swap_chain_format,
|
||||
.blend = null,
|
||||
.write_mask = gpu.ColorWriteMask.all,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
};
|
||||
const fragment = gpu.FragmentState{
|
||||
.module = fs_module,
|
||||
.entry_point = "main",
|
||||
.targets = &.{color_target},
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{color_target},
|
||||
.constants = null,
|
||||
};
|
||||
|
||||
const bgle = gpu.BindGroupLayout.Entry.buffer(0, .{ .vertex = true }, .uniform, true, 0);
|
||||
const bgl = core.device.createBindGroupLayout(
|
||||
&gpu.BindGroupLayout.Descriptor{
|
||||
.entries = &.{bgle},
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroupLayout.Entry{bgle},
|
||||
},
|
||||
);
|
||||
|
||||
const bind_group_layouts = [_]gpu.BindGroupLayout{bgl};
|
||||
const bind_group_layouts = [_]*gpu.BindGroupLayout{bgl};
|
||||
const pipeline_layout = core.device.createPipelineLayout(&.{
|
||||
.bind_group_layout_count = 1,
|
||||
.bind_group_layouts = &bind_group_layouts,
|
||||
});
|
||||
|
||||
|
|
@ -79,7 +86,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.vertex = .{
|
||||
.module = vs_module,
|
||||
.entry_point = "main",
|
||||
.buffers = &.{vertex_buffer_layout},
|
||||
.buffer_count = 1,
|
||||
.buffers = &[_]gpu.VertexBufferLayout{vertex_buffer_layout},
|
||||
},
|
||||
.multisample = .{
|
||||
.count = 1,
|
||||
|
|
@ -90,7 +98,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.front_face = .ccw,
|
||||
.cull_mode = .back,
|
||||
.topology = .triangle_list,
|
||||
.strip_index_format = .none,
|
||||
.strip_index_format = .undef,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -100,7 +108,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.mapped_at_creation = true,
|
||||
});
|
||||
var vertex_mapped = vertex_buffer.getMappedRange(Vertex, 0, vertices.len);
|
||||
std.mem.copy(Vertex, vertex_mapped, vertices[0..]);
|
||||
std.mem.copy(Vertex, vertex_mapped.?, vertices[0..]);
|
||||
vertex_buffer.unmap();
|
||||
|
||||
const x_count = 4;
|
||||
|
|
@ -115,7 +123,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const bind_group = core.device.createBindGroup(
|
||||
&gpu.BindGroup.Descriptor{
|
||||
.layout = bgl,
|
||||
.entries = &.{
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, uniform_buffer, 0, @sizeOf(UniformBufferObject) * num_instances),
|
||||
},
|
||||
},
|
||||
|
|
@ -160,8 +169,9 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const encoder = core.device.createCommandEncoder(null);
|
||||
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
|
||||
.color_attachments = &.{color_attachment},
|
||||
const render_pass_info = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{color_attachment},
|
||||
};
|
||||
|
||||
{
|
||||
|
|
@ -191,7 +201,7 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
m += 1;
|
||||
}
|
||||
}
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, UniformBufferObject, &ubos);
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, &ubos);
|
||||
}
|
||||
|
||||
const pass = encoder.beginRenderPass(&render_pass_info);
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
@binding(0) @group(0) var<uniform> ubos : array<mat4x4<f32>, 16>;
|
||||
|
||||
struct VertexOutput {
|
||||
@builtin(position) position_clip : vec4<f32>;
|
||||
@location(0) fragUV : vec2<f32>;
|
||||
@location(1) fragPosition: vec4<f32>;
|
||||
@builtin(position) position_clip : vec4<f32>,
|
||||
@location(0) fragUV : vec2<f32>,
|
||||
@location(1) fragPosition: vec4<f32>,
|
||||
};
|
||||
|
||||
@stage(vertex)
|
||||
@vertex
|
||||
fn main(@builtin(instance_index) instanceIdx : u32,
|
||||
@location(0) position : vec4<f32>,
|
||||
@location(1) uv : vec2<f32>) -> VertexOutput {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
@group(0) @binding(0) var<storage, write> output: array<f32>;
|
||||
@group(0) @binding(0) var<storage, read_write> output: array<f32>;
|
||||
|
||||
@stage(compute) @workgroup_size(64, 1, 1)
|
||||
@compute @workgroup_size(64, 1, 1)
|
||||
fn main(
|
||||
@builtin(global_invocation_id)
|
||||
global_id : vec3<u32>,
|
||||
|
|
|
|||
|
|
@ -21,8 +21,10 @@ pub fn init(_: *App, core: *mach.Core) !void {
|
|||
});
|
||||
|
||||
const compute_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("main.wgsl"),
|
||||
} },
|
||||
.label = "shader module",
|
||||
.code = .{ .wgsl = @embedFile("main.wgsl") },
|
||||
});
|
||||
|
||||
const compute_pipeline = core.device.createComputePipeline(&gpu.ComputePipeline.Descriptor{ .compute = gpu.ProgrammableStageDescriptor{
|
||||
|
|
@ -32,6 +34,7 @@ pub fn init(_: *App, core: *mach.Core) !void {
|
|||
|
||||
const compute_bind_group = core.device.createBindGroup(&gpu.BindGroup.Descriptor{
|
||||
.layout = compute_pipeline.getBindGroupLayout(0),
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, output, 0, buffer_size),
|
||||
},
|
||||
|
|
@ -44,7 +47,7 @@ pub fn init(_: *App, core: *mach.Core) !void {
|
|||
const compute_pass = encoder.beginComputePass(null);
|
||||
compute_pass.setPipeline(compute_pipeline);
|
||||
compute_pass.setBindGroup(0, compute_bind_group, &.{});
|
||||
compute_pass.dispatch(try std.math.divCeil(u32, buffer_size, workgroup_size), 1, 1);
|
||||
compute_pass.dispatchWorkgroups(try std.math.divCeil(u32, buffer_size, workgroup_size), 1, 1);
|
||||
compute_pass.end();
|
||||
|
||||
encoder.copyBufferToBuffer(output, 0, staging, 0, buffer_size);
|
||||
|
|
@ -53,16 +56,16 @@ pub fn init(_: *App, core: *mach.Core) !void {
|
|||
encoder.release();
|
||||
|
||||
var response: gpu.Buffer.MapAsyncStatus = undefined;
|
||||
var callback = gpu.Buffer.MapCallback.init(*gpu.Buffer.MapAsyncStatus, &response, (struct {
|
||||
pub fn callback(ctx: *gpu.Buffer.MapAsyncStatus, callback_response: gpu.Buffer.MapAsyncStatus) void {
|
||||
ctx.* = callback_response;
|
||||
const callback = (struct {
|
||||
pub inline fn callback(ctx: *gpu.Buffer.MapAsyncStatus, status: gpu.Buffer.MapAsyncStatus) void {
|
||||
ctx.* = status;
|
||||
}
|
||||
}).callback);
|
||||
}).callback;
|
||||
|
||||
var queue = core.device.getQueue();
|
||||
queue.submit(&.{command});
|
||||
|
||||
staging.mapAsync(gpu.Buffer.MapMode.read, 0, buffer_size, &callback);
|
||||
staging.mapAsync(.{ .read = true }, 0, buffer_size, &response, callback);
|
||||
while (true) {
|
||||
if (response == gpu.Buffer.MapAsyncStatus.success) {
|
||||
break;
|
||||
|
|
@ -72,7 +75,7 @@ pub fn init(_: *App, core: *mach.Core) !void {
|
|||
}
|
||||
|
||||
const staging_mapped = staging.getConstMappedRange(f32, 0, buffer_size / @sizeOf(f32));
|
||||
for (staging_mapped) |v| {
|
||||
for (staging_mapped.?) |v| {
|
||||
std.debug.print("{d} ", .{v});
|
||||
}
|
||||
std.debug.print("\n", .{});
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
@stage(fragment) fn main(
|
||||
@fragment fn main(
|
||||
@location(0) fragUV: vec2<f32>,
|
||||
@location(1) fragPosition: vec4<f32>
|
||||
) -> @location(0) vec4<f32> {
|
||||
|
|
|
|||
|
|
@ -14,11 +14,11 @@ const UniformBufferObject = struct {
|
|||
|
||||
var timer: mach.Timer = undefined;
|
||||
|
||||
pipeline: gpu.RenderPipeline,
|
||||
queue: gpu.Queue,
|
||||
vertex_buffer: gpu.Buffer,
|
||||
uniform_buffer: gpu.Buffer,
|
||||
bind_group: gpu.BindGroup,
|
||||
pipeline: *gpu.RenderPipeline,
|
||||
queue: *gpu.Queue,
|
||||
vertex_buffer: *gpu.Buffer,
|
||||
uniform_buffer: *gpu.Buffer,
|
||||
bind_group: *gpu.BindGroup,
|
||||
|
||||
pub fn init(app: *App, core: *mach.Core) !void {
|
||||
timer = try mach.Timer.start();
|
||||
|
|
@ -28,8 +28,10 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
});
|
||||
|
||||
const vs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("vert.wgsl"),
|
||||
} },
|
||||
.label = "my vertex shader",
|
||||
.code = .{ .wgsl = @embedFile("vert.wgsl") },
|
||||
});
|
||||
|
||||
const vertex_attributes = [_]gpu.VertexAttribute{
|
||||
|
|
@ -44,8 +46,10 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const fs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("frag.wgsl"),
|
||||
} },
|
||||
.label = "my fragment shader",
|
||||
.code = .{ .wgsl = @embedFile("frag.wgsl") },
|
||||
});
|
||||
|
||||
const blend = gpu.BlendState{
|
||||
|
|
@ -63,24 +67,27 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.swap_chain_format,
|
||||
.blend = &blend,
|
||||
.write_mask = gpu.ColorWriteMask.all,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
};
|
||||
const fragment = gpu.FragmentState{
|
||||
.module = fs_module,
|
||||
.entry_point = "main",
|
||||
.targets = &.{color_target},
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{color_target},
|
||||
.constants = null,
|
||||
};
|
||||
|
||||
const bgle = gpu.BindGroupLayout.Entry.buffer(0, .{ .vertex = true }, .uniform, true, 0);
|
||||
const bgl = core.device.createBindGroupLayout(
|
||||
&gpu.BindGroupLayout.Descriptor{
|
||||
.entries = &.{bgle},
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroupLayout.Entry{bgle},
|
||||
},
|
||||
);
|
||||
|
||||
const bind_group_layouts = [_]gpu.BindGroupLayout{bgl};
|
||||
const bind_group_layouts = [_]*gpu.BindGroupLayout{bgl};
|
||||
const pipeline_layout = core.device.createPipelineLayout(&.{
|
||||
.bind_group_layout_count = 1,
|
||||
.bind_group_layouts = &bind_group_layouts,
|
||||
});
|
||||
|
||||
|
|
@ -91,7 +98,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.vertex = .{
|
||||
.module = vs_module,
|
||||
.entry_point = "main",
|
||||
.buffers = &.{vertex_buffer_layout},
|
||||
.buffer_count = 1,
|
||||
.buffers = &[_]gpu.VertexBufferLayout{vertex_buffer_layout},
|
||||
},
|
||||
.multisample = .{
|
||||
.count = 1,
|
||||
|
|
@ -102,7 +110,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.front_face = .ccw,
|
||||
.cull_mode = .back,
|
||||
.topology = .triangle_list,
|
||||
.strip_index_format = .none,
|
||||
.strip_index_format = .undef,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -112,7 +120,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.mapped_at_creation = true,
|
||||
});
|
||||
var vertex_mapped = vertex_buffer.getMappedRange(Vertex, 0, vertices.len);
|
||||
std.mem.copy(Vertex, vertex_mapped, vertices[0..]);
|
||||
std.mem.copy(Vertex, vertex_mapped.?, vertices[0..]);
|
||||
vertex_buffer.unmap();
|
||||
|
||||
const uniform_buffer = core.device.createBuffer(&.{
|
||||
|
|
@ -123,7 +131,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const bind_group = core.device.createBindGroup(
|
||||
&gpu.BindGroup.Descriptor{
|
||||
.layout = bgl,
|
||||
.entries = &.{
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, uniform_buffer, 0, @sizeOf(UniformBufferObject)),
|
||||
},
|
||||
},
|
||||
|
|
@ -168,8 +177,9 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const encoder = core.device.createCommandEncoder(null);
|
||||
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
|
||||
.color_attachments = &.{color_attachment},
|
||||
const render_pass_info = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{color_attachment},
|
||||
.depth_stencil_attachment = null,
|
||||
};
|
||||
|
||||
|
|
@ -191,7 +201,7 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
const ubo = UniformBufferObject{
|
||||
.mat = zm.transpose(mvp),
|
||||
};
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, UniformBufferObject, &.{ubo});
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, &[_]UniformBufferObject{ubo});
|
||||
}
|
||||
|
||||
const pass = encoder.beginRenderPass(&render_pass_info);
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
@group(0) @binding(0) var<uniform> ubo : mat4x4<f32>;
|
||||
struct VertexOut {
|
||||
@builtin(position) position_clip : vec4<f32>;
|
||||
@location(0) fragUV : vec2<f32>;
|
||||
@location(1) fragPosition: vec4<f32>;
|
||||
@builtin(position) position_clip : vec4<f32>,
|
||||
@location(0) fragUV : vec2<f32>,
|
||||
@location(1) fragPosition: vec4<f32>,
|
||||
}
|
||||
|
||||
@stage(vertex) fn main(
|
||||
@vertex fn main(
|
||||
@location(0) position : vec4<f32>,
|
||||
@location(1) uv: vec2<f32>
|
||||
) -> VertexOut {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
@group(0) @binding(1) var mySampler: sampler;
|
||||
@group(0) @binding(2) var myTexture: texture_2d<f32>;
|
||||
|
||||
@stage(fragment)
|
||||
@fragment
|
||||
fn main(@location(0) fragUV: vec2<f32>,
|
||||
@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
|
||||
return textureSample(myTexture, mySampler, fragUV);
|
||||
|
|
|
|||
|
|
@ -13,13 +13,13 @@ const UniformBufferObject = struct {
|
|||
|
||||
var timer: mach.Timer = undefined;
|
||||
|
||||
pipeline: gpu.RenderPipeline,
|
||||
queue: gpu.Queue,
|
||||
vertex_buffer: gpu.Buffer,
|
||||
uniform_buffer: gpu.Buffer,
|
||||
bind_group: gpu.BindGroup,
|
||||
depth_texture: ?gpu.Texture,
|
||||
depth_texture_view: gpu.TextureView,
|
||||
pipeline: *gpu.RenderPipeline,
|
||||
queue: *gpu.Queue,
|
||||
vertex_buffer: *gpu.Buffer,
|
||||
uniform_buffer: *gpu.Buffer,
|
||||
bind_group: *gpu.BindGroup,
|
||||
depth_texture: ?*gpu.Texture,
|
||||
depth_texture_view: *gpu.TextureView,
|
||||
|
||||
pub const App = @This();
|
||||
|
||||
|
|
@ -31,8 +31,10 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
});
|
||||
|
||||
const vs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("vert.wgsl"),
|
||||
} },
|
||||
.label = "my vertex shader",
|
||||
.code = .{ .wgsl = @embedFile("vert.wgsl") },
|
||||
});
|
||||
|
||||
const vertex_attributes = [_]gpu.VertexAttribute{
|
||||
|
|
@ -47,8 +49,10 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const fs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("frag.wgsl"),
|
||||
} },
|
||||
.label = "my fragment shader",
|
||||
.code = .{ .wgsl = @embedFile("frag.wgsl") },
|
||||
});
|
||||
|
||||
const blend = gpu.BlendState{
|
||||
|
|
@ -66,12 +70,13 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.swap_chain_format,
|
||||
.blend = &blend,
|
||||
.write_mask = gpu.ColorWriteMask.all,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
};
|
||||
const fragment = gpu.FragmentState{
|
||||
.module = fs_module,
|
||||
.entry_point = "main",
|
||||
.targets = &.{color_target},
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{color_target},
|
||||
.constants = null,
|
||||
};
|
||||
|
||||
|
|
@ -87,7 +92,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.vertex = .{
|
||||
.module = vs_module,
|
||||
.entry_point = "main",
|
||||
.buffers = &.{vertex_buffer_layout},
|
||||
.buffer_count = 1,
|
||||
.buffers = &[_]gpu.VertexBufferLayout{vertex_buffer_layout},
|
||||
},
|
||||
.primitive = .{
|
||||
.topology = .triangle_list,
|
||||
|
|
@ -106,7 +112,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.mapped_at_creation = true,
|
||||
});
|
||||
var vertex_mapped = vertex_buffer.getMappedRange(Vertex, 0, vertices.len);
|
||||
std.mem.copy(Vertex, vertex_mapped, vertices[0..]);
|
||||
std.mem.copy(Vertex, vertex_mapped.?, vertices[0..]);
|
||||
vertex_buffer.unmap();
|
||||
|
||||
// Create a sampler with linear filtering for smooth interpolation.
|
||||
|
|
@ -132,11 +138,11 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.rows_per_image = @intCast(u32, img.height),
|
||||
};
|
||||
switch (img.pixels.?) {
|
||||
.Rgba32 => |pixels| queue.writeTexture(&.{ .texture = cube_texture }, &data_layout, &img_size, zigimg.color.Rgba32, pixels),
|
||||
.Rgba32 => |pixels| queue.writeTexture(&.{ .texture = cube_texture }, &data_layout, &img_size, pixels),
|
||||
.Rgb24 => |pixels| {
|
||||
const data = try rgb24ToRgba32(core.allocator, pixels);
|
||||
defer data.deinit(core.allocator);
|
||||
queue.writeTexture(&.{ .texture = cube_texture }, &data_layout, &img_size, zigimg.color.Rgba32, data.Rgba32);
|
||||
queue.writeTexture(&.{ .texture = cube_texture }, &data_layout, &img_size, data.Rgba32);
|
||||
},
|
||||
else => @panic("unsupported image color format"),
|
||||
}
|
||||
|
|
@ -150,7 +156,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const bind_group = core.device.createBindGroup(
|
||||
&gpu.BindGroup.Descriptor{
|
||||
.layout = pipeline.getBindGroupLayout(0),
|
||||
.entries = &.{
|
||||
.entry_count = 3,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, uniform_buffer, 0, @sizeOf(UniformBufferObject)),
|
||||
gpu.BindGroup.Entry.sampler(1, sampler),
|
||||
gpu.BindGroup.Entry.textureView(2, cube_texture.createView(&gpu.TextureView.Descriptor{})),
|
||||
|
|
@ -198,8 +205,9 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const encoder = core.device.createCommandEncoder(null);
|
||||
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
|
||||
.color_attachments = &.{color_attachment},
|
||||
const render_pass_info = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{color_attachment},
|
||||
.depth_stencil_attachment = &.{
|
||||
.view = app.depth_texture_view,
|
||||
.depth_clear_value = 1.0,
|
||||
|
|
@ -226,7 +234,7 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
const ubo = UniformBufferObject{
|
||||
.mat = zm.transpose(mvp),
|
||||
};
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, UniformBufferObject, &.{ubo});
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, &[_]UniformBufferObject{ubo});
|
||||
}
|
||||
|
||||
const pass = encoder.beginRenderPass(&render_pass_info);
|
||||
|
|
|
|||
|
|
@ -1,15 +1,15 @@
|
|||
struct Uniforms {
|
||||
modelViewProjectionMatrix : mat4x4<f32>;
|
||||
modelViewProjectionMatrix : mat4x4<f32>,
|
||||
};
|
||||
@binding(0) @group(0) var<uniform> uniforms : Uniforms;
|
||||
|
||||
struct VertexOutput {
|
||||
@builtin(position) Position : vec4<f32>;
|
||||
@location(0) fragUV : vec2<f32>;
|
||||
@location(1) fragPosition: vec4<f32>;
|
||||
@builtin(position) Position : vec4<f32>,
|
||||
@location(0) fragUV : vec2<f32>,
|
||||
@location(1) fragPosition: vec4<f32>,
|
||||
};
|
||||
|
||||
@stage(vertex)
|
||||
@vertex
|
||||
fn main(@location(0) position : vec4<f32>,
|
||||
@location(1) uv : vec2<f32>) -> VertexOutput {
|
||||
var output : VertexOutput;
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
@stage(fragment) fn main() -> @location(0) vec4<f32> {
|
||||
@fragment fn main() -> @location(0) vec4<f32> {
|
||||
return vec4<f32>(1.0, 0.0, 0.0, 1.0);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,18 +4,22 @@ const gpu = @import("gpu");
|
|||
|
||||
pub const App = @This();
|
||||
|
||||
pipeline: gpu.RenderPipeline,
|
||||
queue: gpu.Queue,
|
||||
pipeline: *gpu.RenderPipeline,
|
||||
queue: *gpu.Queue,
|
||||
|
||||
pub fn init(app: *App, core: *mach.Core) !void {
|
||||
const vs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("vert.wgsl"),
|
||||
} },
|
||||
.label = "my vertex shader",
|
||||
.code = .{ .wgsl = @embedFile("vert.wgsl") },
|
||||
});
|
||||
|
||||
const fs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("frag.wgsl"),
|
||||
} },
|
||||
.label = "my fragment shader",
|
||||
.code = .{ .wgsl = @embedFile("frag.wgsl") },
|
||||
});
|
||||
|
||||
// Fragment state
|
||||
|
|
@ -34,12 +38,13 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.swap_chain_format,
|
||||
.blend = &blend,
|
||||
.write_mask = gpu.ColorWriteMask.all,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
};
|
||||
const fragment = gpu.FragmentState{
|
||||
.module = fs_module,
|
||||
.entry_point = "main",
|
||||
.targets = &.{color_target},
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{color_target},
|
||||
.constants = null,
|
||||
};
|
||||
const pipeline_descriptor = gpu.RenderPipeline.Descriptor{
|
||||
|
|
@ -60,7 +65,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.front_face = .ccw,
|
||||
.cull_mode = .none,
|
||||
.topology = .triangle_list,
|
||||
.strip_index_format = .none,
|
||||
.strip_index_format = .undef,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -84,8 +89,9 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const encoder = core.device.createCommandEncoder(null);
|
||||
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
|
||||
.color_attachments = &.{color_attachment},
|
||||
const render_pass_info = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{color_attachment},
|
||||
.depth_stencil_attachment = null,
|
||||
};
|
||||
const pass = encoder.beginRenderPass(&render_pass_info);
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
@stage(vertex) fn main(
|
||||
@vertex fn main(
|
||||
@builtin(vertex_index) VertexIndex : u32
|
||||
) -> @builtin(position) vec4<f32> {
|
||||
var pos = array<vec2<f32>, 3>(
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
@stage(fragment) fn main(
|
||||
@fragment fn main(
|
||||
@location(0) fragUV: vec2<f32>,
|
||||
@location(1) fragPosition: vec4<f32>
|
||||
) -> @location(0) vec4<f32> {
|
||||
|
|
|
|||
|
|
@ -12,12 +12,12 @@ const UniformBufferObject = struct {
|
|||
|
||||
var timer: mach.Timer = undefined;
|
||||
|
||||
pipeline: gpu.RenderPipeline,
|
||||
queue: gpu.Queue,
|
||||
vertex_buffer: gpu.Buffer,
|
||||
uniform_buffer: gpu.Buffer,
|
||||
bind_group1: gpu.BindGroup,
|
||||
bind_group2: gpu.BindGroup,
|
||||
pipeline: *gpu.RenderPipeline,
|
||||
queue: *gpu.Queue,
|
||||
vertex_buffer: *gpu.Buffer,
|
||||
uniform_buffer: *gpu.Buffer,
|
||||
bind_group1: *gpu.BindGroup,
|
||||
bind_group2: *gpu.BindGroup,
|
||||
|
||||
pub const App = @This();
|
||||
|
||||
|
|
@ -29,8 +29,10 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
});
|
||||
|
||||
const vs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("vert.wgsl"),
|
||||
} },
|
||||
.label = "my vertex shader",
|
||||
.code = .{ .wgsl = @embedFile("vert.wgsl") },
|
||||
});
|
||||
|
||||
const vertex_attributes = [_]gpu.VertexAttribute{
|
||||
|
|
@ -45,8 +47,10 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const fs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("frag.wgsl"),
|
||||
} },
|
||||
.label = "my fragment shader",
|
||||
.code = .{ .wgsl = @embedFile("frag.wgsl") },
|
||||
});
|
||||
|
||||
const blend = gpu.BlendState{
|
||||
|
|
@ -64,24 +68,27 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.swap_chain_format,
|
||||
.blend = &blend,
|
||||
.write_mask = gpu.ColorWriteMask.all,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
};
|
||||
const fragment = gpu.FragmentState{
|
||||
.module = fs_module,
|
||||
.entry_point = "main",
|
||||
.targets = &.{color_target},
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{color_target},
|
||||
.constants = null,
|
||||
};
|
||||
|
||||
const bgle = gpu.BindGroupLayout.Entry.buffer(0, .{ .vertex = true }, .uniform, true, 0);
|
||||
const bgl = core.device.createBindGroupLayout(
|
||||
&gpu.BindGroupLayout.Descriptor{
|
||||
.entries = &.{bgle},
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroupLayout.Entry{bgle},
|
||||
},
|
||||
);
|
||||
|
||||
const bind_group_layouts = [_]gpu.BindGroupLayout{bgl};
|
||||
const bind_group_layouts = [_]*gpu.BindGroupLayout{bgl};
|
||||
const pipeline_layout = core.device.createPipelineLayout(&.{
|
||||
.bind_group_layout_count = 1,
|
||||
.bind_group_layouts = &bind_group_layouts,
|
||||
});
|
||||
|
||||
|
|
@ -92,7 +99,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.vertex = .{
|
||||
.module = vs_module,
|
||||
.entry_point = "main",
|
||||
.buffers = &.{vertex_buffer_layout},
|
||||
.buffer_count = 1,
|
||||
.buffers = &[_]gpu.VertexBufferLayout{vertex_buffer_layout},
|
||||
},
|
||||
.multisample = .{
|
||||
.count = 1,
|
||||
|
|
@ -103,7 +111,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.front_face = .ccw,
|
||||
.cull_mode = .back,
|
||||
.topology = .triangle_list,
|
||||
.strip_index_format = .none,
|
||||
.strip_index_format = .undef,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -115,7 +123,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.mapped_at_creation = true,
|
||||
});
|
||||
var vertex_mapped = vertex_buffer.getMappedRange(Vertex, 0, vertices.len);
|
||||
std.mem.copy(Vertex, vertex_mapped, vertices[0..]);
|
||||
std.mem.copy(Vertex, vertex_mapped.?, vertices[0..]);
|
||||
vertex_buffer.unmap();
|
||||
|
||||
// uniformBindGroup offset must be 256-byte aligned
|
||||
|
|
@ -129,7 +137,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const bind_group1 = core.device.createBindGroup(
|
||||
&gpu.BindGroup.Descriptor{
|
||||
.layout = bgl,
|
||||
.entries = &.{
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, uniform_buffer, 0, @sizeOf(UniformBufferObject)),
|
||||
},
|
||||
},
|
||||
|
|
@ -138,7 +147,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const bind_group2 = core.device.createBindGroup(
|
||||
&gpu.BindGroup.Descriptor{
|
||||
.layout = bgl,
|
||||
.entries = &.{
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, uniform_buffer, uniform_offset, @sizeOf(UniformBufferObject)),
|
||||
},
|
||||
},
|
||||
|
|
@ -185,8 +195,9 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const encoder = core.device.createCommandEncoder(null);
|
||||
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
|
||||
.color_attachments = &.{color_attachment},
|
||||
const render_pass_info = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{color_attachment},
|
||||
.depth_stencil_attachment = null,
|
||||
};
|
||||
|
||||
|
|
@ -216,10 +227,10 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
.mat = zm.transpose(mvp2),
|
||||
};
|
||||
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, UniformBufferObject, &.{ubo1});
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, &[_]UniformBufferObject{ubo1});
|
||||
|
||||
// bind_group2 offset
|
||||
encoder.writeBuffer(app.uniform_buffer, 256, UniformBufferObject, &.{ubo2});
|
||||
encoder.writeBuffer(app.uniform_buffer, 256, &[_]UniformBufferObject{ubo2});
|
||||
}
|
||||
|
||||
const pass = encoder.beginRenderPass(&render_pass_info);
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
@group(0) @binding(0) var<uniform> ubo : mat4x4<f32>;
|
||||
struct VertexOut {
|
||||
@builtin(position) position_clip : vec4<f32>;
|
||||
@location(0) fragUV : vec2<f32>;
|
||||
@location(1) fragPosition: vec4<f32>;
|
||||
@builtin(position) position_clip : vec4<f32>,
|
||||
@location(0) fragUV : vec2<f32>,
|
||||
@location(1) fragPosition: vec4<f32>,
|
||||
}
|
||||
|
||||
@stage(vertex) fn main(
|
||||
@vertex fn main(
|
||||
@location(0) position : vec4<f32>,
|
||||
@location(1) uv: vec2<f32>
|
||||
) -> VertexOut {
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ struct UniformBufferObject {
|
|||
}
|
||||
@group(0) @binding(0) var<uniform> ubo : UniformBufferObject;
|
||||
|
||||
@stage(fragment) fn main(
|
||||
@fragment fn main(
|
||||
@location(0) uv : vec2<f32>
|
||||
) -> @location(0) vec4<f32> {
|
||||
return vec4<f32>( 0.0, 0.0, 0.0, 1.0);
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ struct UniformBufferObject {
|
|||
}
|
||||
@group(0) @binding(0) var<uniform> ubo : UniformBufferObject;
|
||||
|
||||
@stage(fragment) fn main(
|
||||
@fragment fn main(
|
||||
@location(0) uv : vec2<f32>
|
||||
) -> @location(0) vec4<f32> {
|
||||
let aspect = ubo.resolution / min(ubo.resolution.x,ubo.resolution.y);
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ fn getLight(p:vec3<f32>) -> f32{
|
|||
return dif;
|
||||
}
|
||||
|
||||
@stage(fragment) fn main(
|
||||
@fragment fn main(
|
||||
@location(0) uv : vec2<f32>
|
||||
) -> @location(0) vec4<f32> {
|
||||
let aspect = ubo.resolution / min(ubo.resolution.x,ubo.resolution.y);
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ struct UniformBufferObject {
|
|||
}
|
||||
@group(0) @binding(0) var<uniform> ubo : UniformBufferObject;
|
||||
|
||||
@stage(fragment) fn main(
|
||||
@fragment fn main(
|
||||
@location(0) uv : vec2<f32>
|
||||
) -> @location(0) vec4<f32> {
|
||||
let aspect = ubo.resolution.xy / ubo.resolution.y;
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ fn getLight(p:vec3<f32>) -> f32{
|
|||
return dif;
|
||||
}
|
||||
|
||||
@stage(fragment) fn main(
|
||||
@fragment fn main(
|
||||
@location(0) uv : vec2<f32>
|
||||
) -> @location(0) vec4<f32> {
|
||||
let aspect = ubo.resolution / min(ubo.resolution.x,ubo.resolution.y);
|
||||
|
|
|
|||
|
|
@ -25,12 +25,12 @@ const UniformBufferObject = struct {
|
|||
|
||||
var timer: std.time.Timer = undefined;
|
||||
|
||||
pipeline: gpu.RenderPipeline,
|
||||
queue: gpu.Queue,
|
||||
vertex_buffer: gpu.Buffer,
|
||||
index_buffer: gpu.Buffer,
|
||||
uniform_buffer: gpu.Buffer,
|
||||
bind_group: gpu.BindGroup,
|
||||
pipeline: *gpu.RenderPipeline,
|
||||
queue: *gpu.Queue,
|
||||
vertex_buffer: *gpu.Buffer,
|
||||
index_buffer: *gpu.Buffer,
|
||||
uniform_buffer: *gpu.Buffer,
|
||||
bind_group: *gpu.BindGroup,
|
||||
|
||||
fragment_shader_file: std.fs.File,
|
||||
fragment_shader_code: [:0]const u8,
|
||||
|
|
@ -70,7 +70,7 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.mapped_at_creation = true,
|
||||
});
|
||||
var vertex_mapped = vertex_buffer.getMappedRange(Vertex, 0, vertices.len);
|
||||
std.mem.copy(Vertex, vertex_mapped, vertices[0..]);
|
||||
std.mem.copy(Vertex, vertex_mapped.?, vertices[0..]);
|
||||
vertex_buffer.unmap();
|
||||
|
||||
const index_buffer = core.device.createBuffer(&.{
|
||||
|
|
@ -79,12 +79,12 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
.mapped_at_creation = true,
|
||||
});
|
||||
var index_mapped = index_buffer.getMappedRange(@TypeOf(indices[0]), 0, indices.len);
|
||||
std.mem.copy(u16, index_mapped, indices[0..]);
|
||||
std.mem.copy(u16, index_mapped.?, indices[0..]);
|
||||
index_buffer.unmap();
|
||||
|
||||
// We need a bgl to bind the UniformBufferObject, but it is also needed for creating
|
||||
// the RenderPipeline, so we pass it to recreatePipeline as a pointer
|
||||
var bgl: gpu.BindGroupLayout = undefined;
|
||||
var bgl: *gpu.BindGroupLayout = undefined;
|
||||
const pipeline = recreatePipeline(core, code, &bgl);
|
||||
|
||||
const uniform_buffer = core.device.createBuffer(&.{
|
||||
|
|
@ -95,7 +95,8 @@ pub fn init(app: *App, core: *mach.Core) !void {
|
|||
const bind_group = core.device.createBindGroup(
|
||||
&gpu.BindGroup.Descriptor{
|
||||
.layout = bgl,
|
||||
.entries = &.{
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, uniform_buffer, 0, @sizeOf(UniformBufferObject)),
|
||||
},
|
||||
},
|
||||
|
|
@ -161,8 +162,9 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
};
|
||||
|
||||
const encoder = core.device.createCommandEncoder(null);
|
||||
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
|
||||
.color_attachments = &.{color_attachment},
|
||||
const render_pass_info = gpu.RenderPassDescriptor{
|
||||
.color_attachment_count = 1,
|
||||
.color_attachments = &[_]gpu.RenderPassColorAttachment{color_attachment},
|
||||
.depth_stencil_attachment = null,
|
||||
};
|
||||
|
||||
|
|
@ -171,7 +173,7 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
.resolution = .{ @intToFloat(f32, core.current_desc.width), @intToFloat(f32, core.current_desc.height) },
|
||||
.time = time,
|
||||
};
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, UniformBufferObject, &.{ubo});
|
||||
encoder.writeBuffer(app.uniform_buffer, 0, &[_]UniformBufferObject{ubo});
|
||||
|
||||
const pass = encoder.beginRenderPass(&render_pass_info);
|
||||
pass.setVertexBuffer(0, app.vertex_buffer, 0, @sizeOf(Vertex) * vertices.len);
|
||||
|
|
@ -191,10 +193,12 @@ pub fn update(app: *App, core: *mach.Core) !void {
|
|||
back_buffer_view.release();
|
||||
}
|
||||
|
||||
fn recreatePipeline(core: *mach.Core, fragment_shader_code: [:0]const u8, bgl: ?*gpu.BindGroupLayout) gpu.RenderPipeline {
|
||||
fn recreatePipeline(core: *mach.Core, fragment_shader_code: [:0]const u8, bgl: ?**gpu.BindGroupLayout) *gpu.RenderPipeline {
|
||||
const vs_module = core.device.createShaderModule(&.{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("vert.wgsl"),
|
||||
} },
|
||||
.label = "my vertex shader",
|
||||
.code = .{ .wgsl = @embedFile("vert.wgsl") },
|
||||
});
|
||||
defer vs_module.release();
|
||||
const vertex_attributes = [_]gpu.VertexAttribute{
|
||||
|
|
@ -212,23 +216,27 @@ fn recreatePipeline(core: *mach.Core, fragment_shader_code: [:0]const u8, bgl: ?
|
|||
// print the validation layer error and show a black screen
|
||||
core.device.pushErrorScope(.validation);
|
||||
var fs_module = core.device.createShaderModule(&gpu.ShaderModule.Descriptor{
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = fragment_shader_code,
|
||||
} },
|
||||
.label = "my fragment shader",
|
||||
.code = .{ .wgsl = fragment_shader_code },
|
||||
});
|
||||
var error_occurred: bool = false;
|
||||
// popErrorScope() returns always true, (unless maybe it fails to capture the error scope?)
|
||||
_ = core.device.popErrorScope(&gpu.ErrorCallback.init(*bool, &error_occurred, struct {
|
||||
fn callback(ctx: *bool, typ: gpu.ErrorType, message: [*:0]const u8) void {
|
||||
if (typ != .noError) {
|
||||
_ = core.device.popErrorScope(&error_occurred, struct {
|
||||
inline fn callback(ctx: *bool, typ: gpu.ErrorType, message: [*:0]const u8) void {
|
||||
if (typ != .no_error) {
|
||||
std.debug.print("🔴🔴🔴🔴:\n{s}\n", .{message});
|
||||
ctx.* = true;
|
||||
}
|
||||
}
|
||||
}.callback));
|
||||
}.callback);
|
||||
if (error_occurred) {
|
||||
fs_module = core.device.createShaderModule(&gpu.ShaderModule.Descriptor{
|
||||
.label = "my fragment shader",
|
||||
.code = .{ .wgsl = @embedFile("black_screen_frag.wgsl") },
|
||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||
.source = @embedFile("black_screen_frag.wgsl"),
|
||||
} },
|
||||
.label = "black screen fragment shader",
|
||||
});
|
||||
}
|
||||
defer fs_module.release();
|
||||
|
|
@ -248,12 +256,13 @@ fn recreatePipeline(core: *mach.Core, fragment_shader_code: [:0]const u8, bgl: ?
|
|||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.swap_chain_format,
|
||||
.blend = &blend,
|
||||
.write_mask = gpu.ColorWriteMask.all,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
};
|
||||
const fragment = gpu.FragmentState{
|
||||
.module = fs_module,
|
||||
.entry_point = "main",
|
||||
.targets = &.{color_target},
|
||||
.target_count = 1,
|
||||
.targets = &[_]gpu.ColorTargetState{color_target},
|
||||
.constants = null,
|
||||
};
|
||||
|
||||
|
|
@ -261,7 +270,8 @@ fn recreatePipeline(core: *mach.Core, fragment_shader_code: [:0]const u8, bgl: ?
|
|||
// bgl is needed outside, for the creation of the uniform_buffer in main
|
||||
const bgl_tmp = core.device.createBindGroupLayout(
|
||||
&gpu.BindGroupLayout.Descriptor{
|
||||
.entries = &.{bgle},
|
||||
.entry_count = 1,
|
||||
.entries = &[_]gpu.BindGroupLayout.Entry{bgle},
|
||||
},
|
||||
);
|
||||
defer {
|
||||
|
|
@ -273,8 +283,9 @@ fn recreatePipeline(core: *mach.Core, fragment_shader_code: [:0]const u8, bgl: ?
|
|||
}
|
||||
}
|
||||
|
||||
const bind_group_layouts = [_]gpu.BindGroupLayout{bgl_tmp};
|
||||
const bind_group_layouts = [_]*gpu.BindGroupLayout{bgl_tmp};
|
||||
const pipeline_layout = core.device.createPipelineLayout(&.{
|
||||
.bind_group_layout_count = 1,
|
||||
.bind_group_layouts = &bind_group_layouts,
|
||||
});
|
||||
defer pipeline_layout.release();
|
||||
|
|
@ -286,7 +297,8 @@ fn recreatePipeline(core: *mach.Core, fragment_shader_code: [:0]const u8, bgl: ?
|
|||
.vertex = .{
|
||||
.module = vs_module,
|
||||
.entry_point = "main",
|
||||
.buffers = &.{vertex_buffer_layout},
|
||||
.buffer_count = 1,
|
||||
.buffers = &[_]gpu.VertexBufferLayout{vertex_buffer_layout},
|
||||
},
|
||||
.multisample = .{
|
||||
.count = 1,
|
||||
|
|
@ -297,7 +309,7 @@ fn recreatePipeline(core: *mach.Core, fragment_shader_code: [:0]const u8, bgl: ?
|
|||
.front_face = .ccw,
|
||||
.cull_mode = .none,
|
||||
.topology = .triangle_list,
|
||||
.strip_index_format = .none,
|
||||
.strip_index_format = .undef,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -306,14 +318,14 @@ fn recreatePipeline(core: *mach.Core, fragment_shader_code: [:0]const u8, bgl: ?
|
|||
core.device.pushErrorScope(.validation);
|
||||
const pipeline = core.device.createRenderPipeline(&pipeline_descriptor);
|
||||
// popErrorScope() returns always true, (unless maybe it fails to capture the error scope?)
|
||||
_ = core.device.popErrorScope(&gpu.ErrorCallback.init(*bool, &error_occurred, struct {
|
||||
fn callback(ctx: *bool, typ: gpu.ErrorType, message: [*:0]const u8) void {
|
||||
if (typ != .noError) {
|
||||
_ = core.device.popErrorScope(&error_occurred, struct {
|
||||
inline fn callback(ctx: *bool, typ: gpu.ErrorType, message: [*:0]const u8) void {
|
||||
if (typ != .no_error) {
|
||||
std.debug.print("🔴🔴🔴🔴:\n{s}\n", .{message});
|
||||
ctx.* = true;
|
||||
}
|
||||
}
|
||||
}.callback));
|
||||
}.callback);
|
||||
if (error_occurred) {
|
||||
// Retry with black_screen_frag which we know will work.
|
||||
return recreatePipeline(core, @embedFile("black_screen_frag.wgsl"), bgl);
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
struct VertexOut {
|
||||
@builtin(position) position_clip : vec4<f32>;
|
||||
@location(0) frag_uv : vec2<f32>;
|
||||
@builtin(position) position_clip : vec4<f32>,
|
||||
@location(0) frag_uv : vec2<f32>,
|
||||
}
|
||||
|
||||
@stage(vertex) fn main(
|
||||
@vertex fn main(
|
||||
@location(0) position : vec4<f32>,
|
||||
@location(1) uv : vec2<f32>
|
||||
) -> VertexOut {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue