examples: add map-async example
Signed-off-by: Lucas Romanó <9062026+lucasromanosantos@users.noreply.github.com>
This commit is contained in:
parent
a825965208
commit
7de47a8f2d
4 changed files with 104 additions and 0 deletions
16
examples/map-async/main.wgsl
Normal file
16
examples/map-async/main.wgsl
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
@group(0) @binding(0) var<storage, write> output: array<f32>;
|
||||
|
||||
@stage(compute) @workgroup_size(64, 1, 1)
|
||||
fn main(
|
||||
@builtin(global_invocation_id)
|
||||
global_id : vec3<u32>,
|
||||
|
||||
@builtin(local_invocation_id)
|
||||
local_id : vec3<u32>,
|
||||
) {
|
||||
if (global_id.x >= arrayLength(&output)) {
|
||||
return;
|
||||
}
|
||||
output[global_id.x] =
|
||||
f32(global_id.x) * 1000. + f32(local_id.x);
|
||||
}
|
||||
86
examples/map-async/main.zig
Normal file
86
examples/map-async/main.zig
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
const std = @import("std");
|
||||
const mach = @import("mach");
|
||||
const gpu = @import("gpu");
|
||||
|
||||
pub const App = @This();
|
||||
|
||||
const workgroup_size = 64;
|
||||
const buffer_size = 1000;
|
||||
|
||||
pub fn init(_: *App, core: *mach.Core) !void {
|
||||
const output = core.device.createBuffer(&.{
|
||||
.usage = .{ .storage = true, .copy_src = true },
|
||||
.size = buffer_size * @sizeOf(f32),
|
||||
.mapped_at_creation = false,
|
||||
});
|
||||
|
||||
const staging = core.device.createBuffer(&.{
|
||||
.usage = .{ .map_read = true, .copy_dst = true },
|
||||
.size = buffer_size * @sizeOf(f32),
|
||||
.mapped_at_creation = false,
|
||||
});
|
||||
|
||||
const compute_module = core.device.createShaderModule(&.{
|
||||
.label = "shader module",
|
||||
.code = .{ .wgsl = @embedFile("main.wgsl") },
|
||||
});
|
||||
|
||||
const compute_pipeline = core.device.createComputePipeline(&gpu.ComputePipeline.Descriptor{ .compute = gpu.ProgrammableStageDescriptor{
|
||||
.module = compute_module,
|
||||
.entry_point = "main",
|
||||
} });
|
||||
|
||||
const compute_bind_group = core.device.createBindGroup(&gpu.BindGroup.Descriptor{
|
||||
.layout = compute_pipeline.getBindGroupLayout(0),
|
||||
.entries = &[_]gpu.BindGroup.Entry{
|
||||
gpu.BindGroup.Entry.buffer(0, output, 0, buffer_size),
|
||||
},
|
||||
});
|
||||
|
||||
compute_module.release();
|
||||
|
||||
const encoder = core.device.createCommandEncoder(null);
|
||||
|
||||
const compute_pass = encoder.beginComputePass(null);
|
||||
compute_pass.setPipeline(compute_pipeline);
|
||||
compute_pass.setBindGroup(0, compute_bind_group, &.{});
|
||||
compute_pass.dispatch(try std.math.divCeil(u32, buffer_size, workgroup_size), 1, 1);
|
||||
compute_pass.end();
|
||||
|
||||
encoder.copyBufferToBuffer(output, 0, staging, 0, buffer_size);
|
||||
|
||||
var command = encoder.finish(null);
|
||||
encoder.release();
|
||||
|
||||
var response: gpu.Buffer.MapAsyncStatus = undefined;
|
||||
var callback = gpu.Buffer.MapCallback.init(*gpu.Buffer.MapAsyncStatus, &response, (struct {
|
||||
pub fn callback(ctx: *gpu.Buffer.MapAsyncStatus, callback_response: gpu.Buffer.MapAsyncStatus) void {
|
||||
ctx.* = callback_response;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
var queue = core.device.getQueue();
|
||||
queue.submit(&.{command});
|
||||
|
||||
staging.mapAsync(gpu.Buffer.MapMode.read, 0, buffer_size, &callback);
|
||||
while (true) {
|
||||
if (response == gpu.Buffer.MapAsyncStatus.success) {
|
||||
break;
|
||||
} else {
|
||||
core.device.tick();
|
||||
}
|
||||
}
|
||||
|
||||
const staging_mapped = staging.getConstMappedRange(f32, 0, buffer_size / @sizeOf(f32));
|
||||
for (staging_mapped) |v| {
|
||||
std.debug.print("{d} ", .{v});
|
||||
}
|
||||
std.debug.print("\n", .{});
|
||||
staging.unmap();
|
||||
}
|
||||
|
||||
pub fn deinit(_: *App, _: *mach.Core) void {}
|
||||
|
||||
pub fn update(_: *App, core: *mach.Core) !void {
|
||||
core.setShouldClose(true);
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue