From 2cf68adcc79b3a6d538a9539c41300857d4597bd Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Sat, 13 Apr 2024 11:25:41 -0700 Subject: [PATCH] src/gpu: move github.com/hexops/mach-gpu here This moves github.com/hexops/mach-gpu@528dad0823dafeae5d474c88cc658b091bf2e605 to this repository in the src/gpu directory. It can be imported via `@import("mach").gpu`. Soon we will move away from mach-gpu entirely as part of #1166 - but in the meantime I am giving a workshop at https://sycl.it and it would be nice for people using the `mach.gpu.*` API to be able to search the API in this single repository. There's not much harm to moving this code here. Signed-off-by: Stephen Gutekanst --- build.zig | 62 +- build.zig.zon | 6 +- src/core/main.zig | 6 +- src/gpu/adapter.zig | 122 ++ src/gpu/bind_group.zig | 88 + src/gpu/bind_group_layout.zig | 131 ++ src/gpu/buffer.zig | 166 ++ src/gpu/command_buffer.zig | 21 + src/gpu/command_encoder.zig | 111 ++ src/gpu/compute_pass_encoder.zig | 64 + src/gpu/compute_pipeline.zig | 30 + src/gpu/dawn.zig | 75 + src/gpu/dawn_impl.zig | 1270 ++++++++++++++ src/gpu/device.zig | 368 ++++ src/gpu/example/main.zig | 245 +++ src/gpu/example/objc_message.zig | 7 + src/gpu/example/util.zig | 201 +++ src/gpu/external_texture.zig | 56 + src/gpu/instance.zig | 65 + src/gpu/interface.zig | 2702 +++++++++++++++++++++++++++++ src/gpu/mach_dawn.cpp | 28 + src/gpu/mach_dawn.h | 36 + src/gpu/main.zig | 1025 +++++++++++ src/gpu/pipeline_layout.zig | 38 + src/gpu/query_set.zig | 57 + src/gpu/queue.zig | 101 ++ src/gpu/render_bundle.zig | 21 + src/gpu/render_bundle_encoder.zig | 122 ++ src/gpu/render_pass_encoder.zig | 128 ++ src/gpu/render_pipeline.zig | 38 + src/gpu/sampler.zig | 52 + src/gpu/shader_module.zig | 69 + src/gpu/shared_fence.zig | 91 + src/gpu/shared_texture_memory.zig | 124 ++ src/gpu/surface.zig | 72 + src/gpu/swap_chain.zig | 37 + src/gpu/texture.zig | 266 +++ src/gpu/texture_view.zig | 40 + 38 files changed, 8123 insertions(+), 18 deletions(-) create mode 100644 src/gpu/adapter.zig create mode 100644 src/gpu/bind_group.zig create mode 100644 src/gpu/bind_group_layout.zig create mode 100644 src/gpu/buffer.zig create mode 100644 src/gpu/command_buffer.zig create mode 100644 src/gpu/command_encoder.zig create mode 100644 src/gpu/compute_pass_encoder.zig create mode 100644 src/gpu/compute_pipeline.zig create mode 100644 src/gpu/dawn.zig create mode 100644 src/gpu/dawn_impl.zig create mode 100644 src/gpu/device.zig create mode 100644 src/gpu/example/main.zig create mode 100644 src/gpu/example/objc_message.zig create mode 100644 src/gpu/example/util.zig create mode 100644 src/gpu/external_texture.zig create mode 100644 src/gpu/instance.zig create mode 100644 src/gpu/interface.zig create mode 100644 src/gpu/mach_dawn.cpp create mode 100644 src/gpu/mach_dawn.h create mode 100644 src/gpu/main.zig create mode 100644 src/gpu/pipeline_layout.zig create mode 100644 src/gpu/query_set.zig create mode 100644 src/gpu/queue.zig create mode 100644 src/gpu/render_bundle.zig create mode 100644 src/gpu/render_bundle_encoder.zig create mode 100644 src/gpu/render_pass_encoder.zig create mode 100644 src/gpu/render_pipeline.zig create mode 100644 src/gpu/sampler.zig create mode 100644 src/gpu/shader_module.zig create mode 100644 src/gpu/shared_fence.zig create mode 100644 src/gpu/shared_texture_memory.zig create mode 100644 src/gpu/surface.zig create mode 100644 src/gpu/swap_chain.zig create mode 100644 src/gpu/texture.zig create mode 100644 src/gpu/texture_view.zig diff --git a/build.zig b/build.zig index 1786a078..011262f3 100644 --- a/build.zig +++ b/build.zig @@ -1,8 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); const glfw = @import("mach_glfw"); -const gpu = @import("mach_gpu"); -const sysgpu = @import("mach_sysgpu"); pub const SysgpuBackend = enum { default, @@ -101,12 +99,6 @@ pub fn build(b: *std.Build) !void { try buildExamples(b, optimize, target, module); } if (want_core) { - const mach_gpu_dep = b.dependency("mach_gpu", .{ - .target = target, - .optimize = optimize, - }); - module.addImport("mach-gpu", mach_gpu_dep.module("mach-gpu")); - if (target.result.cpu.arch == .wasm32) { const sysjs_dep = b.dependency("mach_sysjs", .{ .target = target, @@ -265,6 +257,37 @@ pub fn build(b: *std.Build) !void { b.installArtifact(lib); } + if (true) { // want_gpu + const gpu_dawn = @import("mach_gpu_dawn"); + gpu_dawn.addPathsToModule(b, module, .{}); + module.addIncludePath(.{ .path = sdkPath("/src/gpu") }); + + const example_exe = b.addExecutable(.{ + .name = "dawn-gpu-hello-triangle", + .root_source_file = .{ .path = "src/gpu/example/main.zig" }, + .target = target, + .optimize = optimize, + }); + example_exe.root_module.addImport("mach", module); + link(b, example_exe, &example_exe.root_module); + + const mach_glfw_dep = b.dependency("mach_glfw", .{ + .target = target, + .optimize = optimize, + }); + example_exe.root_module.addImport("mach-glfw", mach_glfw_dep.module("mach-glfw")); + + const example_compile_step = b.step("dawn-gpu-hello-triangle", "Install 'dawn-gpu-hello-triangle'"); + example_compile_step.dependOn(b.getInstallStep()); + + const example_run_cmd = b.addRunArtifact(example_exe); + example_run_cmd.step.dependOn(b.getInstallStep()); + if (b.args) |args| example_run_cmd.addArgs(args); + + const example_run_step = b.step("run-dawn-gpu-hello-triangle", "Run 'dawn-gpu-hello-triangle' example"); + example_run_step.dependOn(&example_run_cmd.step); + } + if (target.result.cpu.arch != .wasm32) { // Creates a step for unit testing. This only builds the test executable // but does not run it. @@ -288,7 +311,7 @@ pub fn build(b: *std.Build) !void { const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&run_unit_tests.step); - if (want_sysgpu) linkSysgpu(b, &unit_tests.root_module); + if (want_sysgpu) linkSysgpu(b, &unit_tests.root_module) else link(b, unit_tests, &unit_tests.root_module); } } @@ -507,10 +530,23 @@ pub const CoreApp = struct { // TODO(sysgpu): remove this once we switch to sysgpu fully pub fn link(mach_builder: *std.Build, step: *std.Build.Step.Compile, mod: *std.Build.Module) void { - gpu.link(mach_builder.dependency("mach_gpu", .{ - .target = step.root_module.resolved_target orelse mach_builder.host, - .optimize = step.root_module.optimize.?, - }).builder, step, mod, .{}) catch unreachable; + const gpu_dawn = @import("mach_gpu_dawn"); + const Options = struct { + gpu_dawn_options: gpu_dawn.Options = .{}, + }; + const options: Options = .{}; + + gpu_dawn.link( + mach_builder.dependency("mach_gpu_dawn", .{ + .target = step.root_module.resolved_target.?, + .optimize = step.root_module.optimize.?, + }).builder, + step, + mod, + options.gpu_dawn_options, + ); + step.addCSourceFile(.{ .file = .{ .path = sdkPath("/src/gpu/mach_dawn.cpp") }, .flags = &.{"-std=c++17"} }); + step.addIncludePath(.{ .path = sdkPath("/src/gpu") }); } fn linkSysgpu(b: *std.Build, module: *std.Build.Module) void { diff --git a/build.zig.zon b/build.zig.zon index 716d99e6..d0e4e4a4 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -27,9 +27,9 @@ .url = "https://pkg.machengine.org/mach-sysjs/eeef024f79beae189b7a4ed85e64ed076e76d538.tar.gz", .hash = "1220db6845ce34743ae2a1ab0222efc942496adde2736c20e3443d4fde4ef64b11b9", }, - .mach_gpu = .{ - .url = "https://pkg.machengine.org/mach-gpu/528dad0823dafeae5d474c88cc658b091bf2e605.tar.gz", - .hash = "1220fe2e555ca66741539bc0f97769b2513c5e609c968d27eb8997f577a1d195f048", + .mach_gpu_dawn = .{ + .url = "https://pkg.machengine.org/mach-gpu-dawn/cce4d19945ca6102162b0cbbc546648edb38dc41.tar.gz", + .hash = "1220a6e3f4772fed665bb5b1792cf5cff8ac51af42a57ad8d276e394ae19f310a92e", }, .mach_glfw = .{ .url = "https://pkg.machengine.org/mach-glfw/26e8af73d7d4fbdac3ff60492c44294fc0d139b7.tar.gz", diff --git a/src/core/main.zig b/src/core/main.zig index af2b1d67..27fd705a 100644 --- a/src/core/main.zig +++ b/src/core/main.zig @@ -57,7 +57,7 @@ pub const options = if (@hasDecl(@import("root"), "mach_core_options")) else ComptimeOptions{}; -pub const wgpu = @import("mach-gpu"); +pub const wgpu = @import("../gpu/main.zig"); pub const gpu = if (options.use_sysgpu) sysgpu.sysgpu else wgpu; @@ -154,7 +154,9 @@ pub const Options = struct { power_preference: gpu.PowerPreference = .undefined, required_features: ?[]const gpu.FeatureName = null, required_limits: ?gpu.Limits = null, - swap_chain_usage: gpu.Texture.UsageFlags = .{ .render_attachment = true, }, + swap_chain_usage: gpu.Texture.UsageFlags = .{ + .render_attachment = true, + }, }; pub fn init(options_in: Options) !void { diff --git a/src/gpu/adapter.zig b/src/gpu/adapter.zig new file mode 100644 index 00000000..4ee090df --- /dev/null +++ b/src/gpu/adapter.zig @@ -0,0 +1,122 @@ +const std = @import("std"); +const testing = std.testing; +const dawn = @import("dawn.zig"); +const Bool32 = @import("main.zig").Bool32; +const ChainedStructOut = @import("main.zig").ChainedStructOut; +const Device = @import("device.zig").Device; +const Instance = @import("instance.zig").Instance; +const FeatureName = @import("main.zig").FeatureName; +const SupportedLimits = @import("main.zig").SupportedLimits; +const RequestDeviceStatus = @import("main.zig").RequestDeviceStatus; +const BackendType = @import("main.zig").BackendType; +const RequestDeviceCallback = @import("main.zig").RequestDeviceCallback; +const Impl = @import("interface.zig").Impl; + +pub const Adapter = opaque { + pub const Type = enum(u32) { + discrete_gpu, + integrated_gpu, + cpu, + unknown, + + pub fn name(t: Type) []const u8 { + return switch (t) { + .discrete_gpu => "Discrete GPU", + .integrated_gpu => "Integrated GPU", + .cpu => "CPU", + .unknown => "Unknown", + }; + } + }; + + pub const Properties = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStructOut, + dawn_adapter_properties_power_preference: *const dawn.AdapterPropertiesPowerPreference, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + vendor_id: u32, + vendor_name: [*:0]const u8, + architecture: [*:0]const u8, + device_id: u32, + name: [*:0]const u8, + driver_description: [*:0]const u8, + adapter_type: Type, + backend_type: BackendType, + compatibility_mode: Bool32 = .false, + }; + + pub inline fn createDevice(adapter: *Adapter, descriptor: ?*const Device.Descriptor) ?*Device { + return Impl.adapterCreateDevice(adapter, descriptor); + } + + /// Call once with null to determine the array length, and again to fetch the feature list. + /// + /// Consider using the enumerateFeaturesOwned helper. + pub inline fn enumerateFeatures(adapter: *Adapter, features: ?[*]FeatureName) usize { + return Impl.adapterEnumerateFeatures(adapter, features); + } + + /// Enumerates the adapter features, storing the result in an allocated slice which is owned by + /// the caller. + pub inline fn enumerateFeaturesOwned(adapter: *Adapter, allocator: std.mem.Allocator) ![]FeatureName { + const count = adapter.enumerateFeatures(null); + const data = try allocator.alloc(FeatureName, count); + _ = adapter.enumerateFeatures(data.ptr); + return data; + } + + pub inline fn getInstance(adapter: *Adapter) *Instance { + return Impl.adapterGetInstance(adapter); + } + + pub inline fn getLimits(adapter: *Adapter, limits: *SupportedLimits) bool { + return Impl.adapterGetLimits(adapter, limits) != 0; + } + + pub inline fn getProperties(adapter: *Adapter, properties: *Adapter.Properties) void { + Impl.adapterGetProperties(adapter, properties); + } + + pub inline fn hasFeature(adapter: *Adapter, feature: FeatureName) bool { + return Impl.adapterHasFeature(adapter, feature) != 0; + } + + pub inline fn requestDevice( + adapter: *Adapter, + descriptor: ?*const Device.Descriptor, + context: anytype, + comptime callback: fn ( + ctx: @TypeOf(context), + status: RequestDeviceStatus, + device: *Device, + message: ?[*:0]const u8, + ) callconv(.Inline) void, + ) void { + const Context = @TypeOf(context); + const Helper = struct { + pub fn cCallback(status: RequestDeviceStatus, device: *Device, message: ?[*:0]const u8, userdata: ?*anyopaque) callconv(.C) void { + callback( + if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), + status, + device, + message, + ); + } + }; + Impl.adapterRequestDevice(adapter, descriptor, Helper.cCallback, if (Context == void) null else context); + } + + pub inline fn reference(adapter: *Adapter) void { + Impl.adapterReference(adapter); + } + + pub inline fn release(adapter: *Adapter) void { + Impl.adapterRelease(adapter); + } +}; + +test "Adapter.Type name" { + try testing.expectEqualStrings("Discrete GPU", Adapter.Type.discrete_gpu.name()); +} diff --git a/src/gpu/bind_group.zig b/src/gpu/bind_group.zig new file mode 100644 index 00000000..8428f61e --- /dev/null +++ b/src/gpu/bind_group.zig @@ -0,0 +1,88 @@ +const Buffer = @import("buffer.zig").Buffer; +const Sampler = @import("sampler.zig").Sampler; +const TextureView = @import("texture_view.zig").TextureView; +const ChainedStruct = @import("main.zig").ChainedStruct; +const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; +const ExternalTexture = @import("external_texture.zig").ExternalTexture; +const Impl = @import("interface.zig").Impl; + +pub const BindGroup = opaque { + pub const Entry = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + external_texture_binding_entry: *const ExternalTexture.BindingEntry, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + binding: u32, + buffer: ?*Buffer = null, + offset: u64 = 0, + size: u64, + sampler: ?*Sampler = null, + texture_view: ?*TextureView = null, + + /// Helper to create a buffer BindGroup.Entry. + pub fn buffer(binding: u32, buf: *Buffer, offset: u64, size: u64) Entry { + return .{ + .binding = binding, + .buffer = buf, + .offset = offset, + .size = size, + }; + } + + /// Helper to create a sampler BindGroup.Entry. + pub fn sampler(binding: u32, _sampler: *Sampler) Entry { + return .{ + .binding = binding, + .sampler = _sampler, + .size = 0, + }; + } + + /// Helper to create a texture view BindGroup.Entry. + pub fn textureView(binding: u32, texture_view: *TextureView) Entry { + return .{ + .binding = binding, + .texture_view = texture_view, + .size = 0, + }; + } + }; + + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + layout: *BindGroupLayout, + entry_count: usize = 0, + entries: ?[*]const Entry = null, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + layout: *BindGroupLayout, + entries: ?[]const Entry = null, + }) Descriptor { + return .{ + .next_in_chain = v.next_in_chain, + .label = v.label, + .layout = v.layout, + .entry_count = if (v.entries) |e| e.len else 0, + .entries = if (v.entries) |e| e.ptr else null, + }; + } + }; + + pub inline fn setLabel(bind_group: *BindGroup, label: [*:0]const u8) void { + Impl.bindGroupSetLabel(bind_group, label); + } + + pub inline fn reference(bind_group: *BindGroup) void { + Impl.bindGroupReference(bind_group); + } + + pub inline fn release(bind_group: *BindGroup) void { + Impl.bindGroupRelease(bind_group); + } +}; diff --git a/src/gpu/bind_group_layout.zig b/src/gpu/bind_group_layout.zig new file mode 100644 index 00000000..cc295d85 --- /dev/null +++ b/src/gpu/bind_group_layout.zig @@ -0,0 +1,131 @@ +const Bool32 = @import("main.zig").Bool32; +const ChainedStruct = @import("main.zig").ChainedStruct; +const ShaderStageFlags = @import("main.zig").ShaderStageFlags; +const Buffer = @import("buffer.zig").Buffer; +const Sampler = @import("sampler.zig").Sampler; +const Texture = @import("texture.zig").Texture; +const TextureView = @import("texture_view.zig").TextureView; +const StorageTextureBindingLayout = @import("main.zig").StorageTextureBindingLayout; +const StorageTextureAccess = @import("main.zig").StorageTextureAccess; +const ExternalTexture = @import("external_texture.zig").ExternalTexture; +const Impl = @import("interface.zig").Impl; + +pub const BindGroupLayout = opaque { + pub const Entry = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + external_texture_binding_layout: *const ExternalTexture.BindingLayout, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + binding: u32, + visibility: ShaderStageFlags, + buffer: Buffer.BindingLayout = .{}, + sampler: Sampler.BindingLayout = .{}, + texture: Texture.BindingLayout = .{}, + storage_texture: StorageTextureBindingLayout = .{}, + + /// Helper to create a buffer BindGroupLayout.Entry. + pub fn buffer( + binding: u32, + visibility: ShaderStageFlags, + binding_type: Buffer.BindingType, + has_dynamic_offset: bool, + min_binding_size: u64, + ) Entry { + return .{ + .binding = binding, + .visibility = visibility, + .buffer = .{ + .type = binding_type, + .has_dynamic_offset = Bool32.from(has_dynamic_offset), + .min_binding_size = min_binding_size, + }, + }; + } + + /// Helper to create a sampler BindGroupLayout.Entry. + pub fn sampler( + binding: u32, + visibility: ShaderStageFlags, + binding_type: Sampler.BindingType, + ) Entry { + return .{ + .binding = binding, + .visibility = visibility, + .sampler = .{ .type = binding_type }, + }; + } + + /// Helper to create a texture BindGroupLayout.Entry. + pub fn texture( + binding: u32, + visibility: ShaderStageFlags, + sample_type: Texture.SampleType, + view_dimension: TextureView.Dimension, + multisampled: bool, + ) Entry { + return .{ + .binding = binding, + .visibility = visibility, + .texture = .{ + .sample_type = sample_type, + .view_dimension = view_dimension, + .multisampled = Bool32.from(multisampled), + }, + }; + } + + /// Helper to create a storage texture BindGroupLayout.Entry. + pub fn storageTexture( + binding: u32, + visibility: ShaderStageFlags, + access: StorageTextureAccess, + format: Texture.Format, + view_dimension: TextureView.Dimension, + ) Entry { + return .{ + .binding = binding, + .visibility = visibility, + .storage_texture = .{ + .access = access, + .format = format, + .view_dimension = view_dimension, + }, + }; + } + }; + + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + entry_count: usize = 0, + entries: ?[*]const Entry = null, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + entries: ?[]const Entry = null, + }) Descriptor { + return .{ + .next_in_chain = v.next_in_chain, + .label = v.label, + .entry_count = if (v.entries) |e| e.len else 0, + .entries = if (v.entries) |e| e.ptr else null, + }; + } + }; + + pub inline fn setLabel(bind_group_layout: *BindGroupLayout, label: [*:0]const u8) void { + Impl.bindGroupLayoutSetLabel(bind_group_layout, label); + } + + pub inline fn reference(bind_group_layout: *BindGroupLayout) void { + Impl.bindGroupLayoutReference(bind_group_layout); + } + + pub inline fn release(bind_group_layout: *BindGroupLayout) void { + Impl.bindGroupLayoutRelease(bind_group_layout); + } +}; diff --git a/src/gpu/buffer.zig b/src/gpu/buffer.zig new file mode 100644 index 00000000..ada4ddbb --- /dev/null +++ b/src/gpu/buffer.zig @@ -0,0 +1,166 @@ +const std = @import("std"); +const Bool32 = @import("main.zig").Bool32; +const ChainedStruct = @import("main.zig").ChainedStruct; +const dawn = @import("dawn.zig"); +const MapModeFlags = @import("main.zig").MapModeFlags; +const Impl = @import("interface.zig").Impl; + +pub const Buffer = opaque { + pub const MapCallback = *const fn (status: MapAsyncStatus, userdata: ?*anyopaque) callconv(.C) void; + + pub const BindingType = enum(u32) { + undefined = 0x00000000, + uniform = 0x00000001, + storage = 0x00000002, + read_only_storage = 0x00000003, + }; + + pub const MapState = enum(u32) { + unmapped = 0x00000000, + pending = 0x00000001, + mapped = 0x00000002, + }; + + pub const MapAsyncStatus = enum(u32) { + success = 0x00000000, + validation_error = 0x00000001, + unknown = 0x00000002, + device_lost = 0x00000003, + destroyed_before_callback = 0x00000004, + unmapped_before_callback = 0x00000005, + mapping_already_pending = 0x00000006, + offset_out_of_range = 0x00000007, + size_out_of_range = 0x00000008, + }; + + pub const UsageFlags = packed struct(u32) { + map_read: bool = false, + map_write: bool = false, + copy_src: bool = false, + copy_dst: bool = false, + index: bool = false, + vertex: bool = false, + uniform: bool = false, + storage: bool = false, + indirect: bool = false, + query_resolve: bool = false, + + _padding: u22 = 0, + + comptime { + std.debug.assert( + @sizeOf(@This()) == @sizeOf(u32) and + @bitSizeOf(@This()) == @bitSizeOf(u32), + ); + } + + pub const none = UsageFlags{}; + + pub fn equal(a: UsageFlags, b: UsageFlags) bool { + return @as(u10, @truncate(@as(u32, @bitCast(a)))) == @as(u10, @truncate(@as(u32, @bitCast(b)))); + } + }; + + pub const BindingLayout = extern struct { + next_in_chain: ?*const ChainedStruct = null, + type: BindingType = .undefined, + has_dynamic_offset: Bool32 = .false, + min_binding_size: u64 = 0, + }; + + pub const Descriptor = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + dawn_buffer_descriptor_error_info_from_wire_client: *const dawn.BufferDescriptorErrorInfoFromWireClient, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*:0]const u8 = null, + usage: UsageFlags, + size: u64, + mapped_at_creation: Bool32 = .false, + }; + + pub inline fn destroy(buffer: *Buffer) void { + Impl.bufferDestroy(buffer); + } + + pub inline fn getMapState(buffer: *Buffer) MapState { + return Impl.bufferGetMapState(buffer); + } + + /// Default `offset_bytes`: 0 + /// Default `len`: `gpu.whole_map_size` / `std.math.maxint(usize)` (whole range) + pub inline fn getConstMappedRange( + buffer: *Buffer, + comptime T: type, + offset_bytes: usize, + len: usize, + ) ?[]const T { + const size = @sizeOf(T) * len; + const data = Impl.bufferGetConstMappedRange( + buffer, + offset_bytes, + size + size % 4, + ); + return if (data) |d| @as([*]const T, @ptrCast(@alignCast(d)))[0..len] else null; + } + + /// Default `offset_bytes`: 0 + /// Default `len`: `gpu.whole_map_size` / `std.math.maxint(usize)` (whole range) + pub inline fn getMappedRange( + buffer: *Buffer, + comptime T: type, + offset_bytes: usize, + len: usize, + ) ?[]T { + const size = @sizeOf(T) * len; + const data = Impl.bufferGetMappedRange( + buffer, + offset_bytes, + size + size % 4, + ); + return if (data) |d| @as([*]T, @ptrCast(@alignCast(d)))[0..len] else null; + } + + pub inline fn getSize(buffer: *Buffer) u64 { + return Impl.bufferGetSize(buffer); + } + + pub inline fn getUsage(buffer: *Buffer) Buffer.UsageFlags { + return Impl.bufferGetUsage(buffer); + } + + pub inline fn mapAsync( + buffer: *Buffer, + mode: MapModeFlags, + offset: usize, + size: usize, + context: anytype, + comptime callback: fn (ctx: @TypeOf(context), status: MapAsyncStatus) callconv(.Inline) void, + ) void { + const Context = @TypeOf(context); + const Helper = struct { + pub fn cCallback(status: MapAsyncStatus, userdata: ?*anyopaque) callconv(.C) void { + callback(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), status); + } + }; + Impl.bufferMapAsync(buffer, mode, offset, size, Helper.cCallback, if (Context == void) null else context); + } + + pub inline fn setLabel(buffer: *Buffer, label: [*:0]const u8) void { + Impl.bufferSetLabel(buffer, label); + } + + pub inline fn unmap(buffer: *Buffer) void { + Impl.bufferUnmap(buffer); + } + + pub inline fn reference(buffer: *Buffer) void { + Impl.bufferReference(buffer); + } + + pub inline fn release(buffer: *Buffer) void { + Impl.bufferRelease(buffer); + } +}; diff --git a/src/gpu/command_buffer.zig b/src/gpu/command_buffer.zig new file mode 100644 index 00000000..03a744f1 --- /dev/null +++ b/src/gpu/command_buffer.zig @@ -0,0 +1,21 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const Impl = @import("interface.zig").Impl; + +pub const CommandBuffer = opaque { + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + }; + + pub inline fn setLabel(command_buffer: *CommandBuffer, label: [*:0]const u8) void { + Impl.commandBufferSetLabel(command_buffer, label); + } + + pub inline fn reference(command_buffer: *CommandBuffer) void { + Impl.commandBufferReference(command_buffer); + } + + pub inline fn release(command_buffer: *CommandBuffer) void { + Impl.commandBufferRelease(command_buffer); + } +}; diff --git a/src/gpu/command_encoder.zig b/src/gpu/command_encoder.zig new file mode 100644 index 00000000..ae4bfd1d --- /dev/null +++ b/src/gpu/command_encoder.zig @@ -0,0 +1,111 @@ +const std = @import("std"); +const ComputePassEncoder = @import("compute_pass_encoder.zig").ComputePassEncoder; +const RenderPassEncoder = @import("render_pass_encoder.zig").RenderPassEncoder; +const CommandBuffer = @import("command_buffer.zig").CommandBuffer; +const Buffer = @import("buffer.zig").Buffer; +const QuerySet = @import("query_set.zig").QuerySet; +const RenderPassDescriptor = @import("main.zig").RenderPassDescriptor; +const ComputePassDescriptor = @import("main.zig").ComputePassDescriptor; +const ChainedStruct = @import("main.zig").ChainedStruct; +const ImageCopyBuffer = @import("main.zig").ImageCopyBuffer; +const ImageCopyTexture = @import("main.zig").ImageCopyTexture; +const Extent3D = @import("main.zig").Extent3D; +const Impl = @import("interface.zig").Impl; +const dawn = @import("dawn.zig"); + +pub const CommandEncoder = opaque { + pub const Descriptor = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + dawn_encoder_internal_usage_descriptor: *const dawn.EncoderInternalUsageDescriptor, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*:0]const u8 = null, + }; + + pub inline fn beginComputePass(command_encoder: *CommandEncoder, descriptor: ?*const ComputePassDescriptor) *ComputePassEncoder { + return Impl.commandEncoderBeginComputePass(command_encoder, descriptor); + } + + pub inline fn beginRenderPass(command_encoder: *CommandEncoder, descriptor: *const RenderPassDescriptor) *RenderPassEncoder { + return Impl.commandEncoderBeginRenderPass(command_encoder, descriptor); + } + + /// Default `offset`: 0 + /// Default `size`: `gpu.whole_size` + pub inline fn clearBuffer(command_encoder: *CommandEncoder, buffer: *Buffer, offset: u64, size: u64) void { + Impl.commandEncoderClearBuffer(command_encoder, buffer, offset, size); + } + + pub inline fn copyBufferToBuffer(command_encoder: *CommandEncoder, source: *Buffer, source_offset: u64, destination: *Buffer, destination_offset: u64, size: u64) void { + Impl.commandEncoderCopyBufferToBuffer(command_encoder, source, source_offset, destination, destination_offset, size); + } + + pub inline fn copyBufferToTexture(command_encoder: *CommandEncoder, source: *const ImageCopyBuffer, destination: *const ImageCopyTexture, copy_size: *const Extent3D) void { + Impl.commandEncoderCopyBufferToTexture(command_encoder, source, destination, copy_size); + } + + pub inline fn copyTextureToBuffer(command_encoder: *CommandEncoder, source: *const ImageCopyTexture, destination: *const ImageCopyBuffer, copy_size: *const Extent3D) void { + Impl.commandEncoderCopyTextureToBuffer(command_encoder, source, destination, copy_size); + } + + pub inline fn copyTextureToTexture(command_encoder: *CommandEncoder, source: *const ImageCopyTexture, destination: *const ImageCopyTexture, copy_size: *const Extent3D) void { + Impl.commandEncoderCopyTextureToTexture(command_encoder, source, destination, copy_size); + } + + pub inline fn finish(command_encoder: *CommandEncoder, descriptor: ?*const CommandBuffer.Descriptor) *CommandBuffer { + return Impl.commandEncoderFinish(command_encoder, descriptor); + } + + pub inline fn injectValidationError(command_encoder: *CommandEncoder, message: [*:0]const u8) void { + Impl.commandEncoderInjectValidationError(command_encoder, message); + } + + pub inline fn insertDebugMarker(command_encoder: *CommandEncoder, marker_label: [*:0]const u8) void { + Impl.commandEncoderInsertDebugMarker(command_encoder, marker_label); + } + + pub inline fn popDebugGroup(command_encoder: *CommandEncoder) void { + Impl.commandEncoderPopDebugGroup(command_encoder); + } + + pub inline fn pushDebugGroup(command_encoder: *CommandEncoder, group_label: [*:0]const u8) void { + Impl.commandEncoderPushDebugGroup(command_encoder, group_label); + } + + pub inline fn resolveQuerySet(command_encoder: *CommandEncoder, query_set: *QuerySet, first_query: u32, query_count: u32, destination: *Buffer, destination_offset: u64) void { + Impl.commandEncoderResolveQuerySet(command_encoder, query_set, first_query, query_count, destination, destination_offset); + } + + pub inline fn setLabel(command_encoder: *CommandEncoder, label: [*:0]const u8) void { + Impl.commandEncoderSetLabel(command_encoder, label); + } + + pub inline fn writeBuffer( + command_encoder: *CommandEncoder, + buffer: *Buffer, + buffer_offset_bytes: u64, + data_slice: anytype, + ) void { + Impl.commandEncoderWriteBuffer( + command_encoder, + buffer, + buffer_offset_bytes, + @as([*]const u8, @ptrCast(std.mem.sliceAsBytes(data_slice).ptr)), + @as(u64, @intCast(data_slice.len)) * @sizeOf(std.meta.Elem(@TypeOf(data_slice))), + ); + } + + pub inline fn writeTimestamp(command_encoder: *CommandEncoder, query_set: *QuerySet, query_index: u32) void { + Impl.commandEncoderWriteTimestamp(command_encoder, query_set, query_index); + } + + pub inline fn reference(command_encoder: *CommandEncoder) void { + Impl.commandEncoderReference(command_encoder); + } + + pub inline fn release(command_encoder: *CommandEncoder) void { + Impl.commandEncoderRelease(command_encoder); + } +}; diff --git a/src/gpu/compute_pass_encoder.zig b/src/gpu/compute_pass_encoder.zig new file mode 100644 index 00000000..64e443f9 --- /dev/null +++ b/src/gpu/compute_pass_encoder.zig @@ -0,0 +1,64 @@ +const Buffer = @import("buffer.zig").Buffer; +const BindGroup = @import("bind_group.zig").BindGroup; +const ComputePipeline = @import("compute_pipeline.zig").ComputePipeline; +const QuerySet = @import("query_set.zig").QuerySet; +const Impl = @import("interface.zig").Impl; + +pub const ComputePassEncoder = opaque { + /// Default `workgroup_count_y`: 1 + /// Default `workgroup_count_z`: 1 + pub inline fn dispatchWorkgroups(compute_pass_encoder: *ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) void { + Impl.computePassEncoderDispatchWorkgroups(compute_pass_encoder, workgroup_count_x, workgroup_count_y, workgroup_count_z); + } + + pub inline fn dispatchWorkgroupsIndirect(compute_pass_encoder: *ComputePassEncoder, indirect_buffer: *Buffer, indirect_offset: u64) void { + Impl.computePassEncoderDispatchWorkgroupsIndirect(compute_pass_encoder, indirect_buffer, indirect_offset); + } + + pub inline fn end(compute_pass_encoder: *ComputePassEncoder) void { + Impl.computePassEncoderEnd(compute_pass_encoder); + } + + pub inline fn insertDebugMarker(compute_pass_encoder: *ComputePassEncoder, marker_label: [*:0]const u8) void { + Impl.computePassEncoderInsertDebugMarker(compute_pass_encoder, marker_label); + } + + pub inline fn popDebugGroup(compute_pass_encoder: *ComputePassEncoder) void { + Impl.computePassEncoderPopDebugGroup(compute_pass_encoder); + } + + pub inline fn pushDebugGroup(compute_pass_encoder: *ComputePassEncoder, group_label: [*:0]const u8) void { + Impl.computePassEncoderPushDebugGroup(compute_pass_encoder, group_label); + } + + /// Default `dynamic_offsets`: null + pub inline fn setBindGroup(compute_pass_encoder: *ComputePassEncoder, group_index: u32, group: *BindGroup, dynamic_offsets: ?[]const u32) void { + Impl.computePassEncoderSetBindGroup( + compute_pass_encoder, + group_index, + group, + if (dynamic_offsets) |v| v.len else 0, + if (dynamic_offsets) |v| v.ptr else null, + ); + } + + pub inline fn setLabel(compute_pass_encoder: *ComputePassEncoder, label: [*:0]const u8) void { + Impl.computePassEncoderSetLabel(compute_pass_encoder, label); + } + + pub inline fn setPipeline(compute_pass_encoder: *ComputePassEncoder, pipeline: *ComputePipeline) void { + Impl.computePassEncoderSetPipeline(compute_pass_encoder, pipeline); + } + + pub inline fn writeTimestamp(compute_pass_encoder: *ComputePassEncoder, query_set: *QuerySet, query_index: u32) void { + Impl.computePassEncoderWriteTimestamp(compute_pass_encoder, query_set, query_index); + } + + pub inline fn reference(compute_pass_encoder: *ComputePassEncoder) void { + Impl.computePassEncoderReference(compute_pass_encoder); + } + + pub inline fn release(compute_pass_encoder: *ComputePassEncoder) void { + Impl.computePassEncoderRelease(compute_pass_encoder); + } +}; diff --git a/src/gpu/compute_pipeline.zig b/src/gpu/compute_pipeline.zig new file mode 100644 index 00000000..ac18de5e --- /dev/null +++ b/src/gpu/compute_pipeline.zig @@ -0,0 +1,30 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const ProgrammableStageDescriptor = @import("main.zig").ProgrammableStageDescriptor; +const PipelineLayout = @import("pipeline_layout.zig").PipelineLayout; +const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; +const Impl = @import("interface.zig").Impl; + +pub const ComputePipeline = opaque { + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + layout: ?*PipelineLayout = null, + compute: ProgrammableStageDescriptor, + }; + + pub inline fn getBindGroupLayout(compute_pipeline: *ComputePipeline, group_index: u32) *BindGroupLayout { + return Impl.computePipelineGetBindGroupLayout(compute_pipeline, group_index); + } + + pub inline fn setLabel(compute_pipeline: *ComputePipeline, label: [*:0]const u8) void { + Impl.computePipelineSetLabel(compute_pipeline, label); + } + + pub inline fn reference(compute_pipeline: *ComputePipeline) void { + Impl.computePipelineReference(compute_pipeline); + } + + pub inline fn release(compute_pipeline: *ComputePipeline) void { + Impl.computePipelineRelease(compute_pipeline); + } +}; diff --git a/src/gpu/dawn.zig b/src/gpu/dawn.zig new file mode 100644 index 00000000..d8203415 --- /dev/null +++ b/src/gpu/dawn.zig @@ -0,0 +1,75 @@ +const Bool32 = @import("main.zig").Bool32; +const ChainedStruct = @import("main.zig").ChainedStruct; +const ChainedStructOut = @import("main.zig").ChainedStructOut; +const PowerPreference = @import("main.zig").PowerPreference; +const Texture = @import("texture.zig").Texture; +pub const Interface = @import("dawn_impl.zig").Interface; + +pub const CacheDeviceDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .dawn_cache_device_descriptor }, + isolation_key: [*:0]const u8 = "", +}; + +pub const EncoderInternalUsageDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .dawn_encoder_internal_usage_descriptor }, + use_internal_usages: Bool32 = .false, +}; + +pub const MultisampleStateRenderToSingleSampled = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .dawn_multisample_state_render_to_single_sampled }, + enabled: Bool32 = .false, +}; + +pub const RenderPassColorAttachmentRenderToSingleSampled = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .dawn_render_pass_color_attachment_render_to_single_sampled }, + implicit_sample_count: u32 = 1, +}; + +pub const TextureInternalUsageDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .dawn_texture_internal_usage_descriptor }, + internal_usage: Texture.UsageFlags = Texture.UsageFlags.none, +}; + +pub const TogglesDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .dawn_toggles_descriptor }, + enabled_toggles_count: usize = 0, + enabled_toggles: ?[*]const [*:0]const u8 = null, + disabled_toggles_count: usize = 0, + disabled_toggles: ?[*]const [*:0]const u8 = null, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + chain: ChainedStruct = .{ .next = null, .s_type = .dawn_toggles_descriptor }, + enabled_toggles: ?[]const [*:0]const u8 = null, + disabled_toggles: ?[]const [*:0]const u8 = null, + }) TogglesDescriptor { + return .{ + .chain = v.chain, + .enabled_toggles_count = if (v.enabled_toggles) |e| e.len else 0, + .enabled_toggles = if (v.enabled_toggles) |e| e.ptr else null, + .disabled_toggles_count = if (v.disabled_toggles) |e| e.len else 0, + .disabled_toggles = if (v.disabled_toggles) |e| e.ptr else null, + }; + } +}; + +pub const ShaderModuleSPIRVOptionsDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .dawn_shader_module_spirv_options_descriptor }, + allow_non_uniform_derivatives: Bool32 = .false, +}; + +pub const AdapterPropertiesPowerPreference = extern struct { + chain: ChainedStructOut = .{ + .next = null, + .s_type = .dawn_adapter_properties_power_preference, + }, + power_preference: PowerPreference = .undefined, +}; + +pub const BufferDescriptorErrorInfoFromWireClient = extern struct { + chain: ChainedStruct = .{ + .next = null, + .s_type = .dawn_buffer_descriptor_error_info_from_wire_client, + }, + out_of_memory: Bool32 = .false, +}; diff --git a/src/gpu/dawn_impl.zig b/src/gpu/dawn_impl.zig new file mode 100644 index 00000000..31b718ec --- /dev/null +++ b/src/gpu/dawn_impl.zig @@ -0,0 +1,1270 @@ +const gpu = @import("main.zig"); +const builtin = @import("builtin"); +const std = @import("std"); + +const c = @cImport({ + @cInclude("dawn/webgpu.h"); + @cInclude("mach_dawn.h"); +}); + +var didInit = false; +var procs: c.DawnProcTable = undefined; + +/// A Dawn implementation of the gpu.Interface, which merely directs calls to the Dawn proc table. +/// +/// Before use, it must be `.init()`ialized in order to set the global proc table. +pub const Interface = struct { + pub fn init(allocator: std.mem.Allocator, _: struct {}) error{}!void { + _ = allocator; + didInit = true; + procs = c.machDawnGetProcTable(); + } + + pub inline fn createInstance(descriptor: ?*const gpu.Instance.Descriptor) ?*gpu.Instance { + if (builtin.mode == .Debug and !didInit) @panic("dawn: not initialized; did you forget to call gpu.Impl.init()?"); + return @ptrCast(procs.createInstance.?(@ptrCast(descriptor))); + } + + pub inline fn getProcAddress(device: *gpu.Device, proc_name: [*:0]const u8) ?gpu.Proc { + return procs.getProcAddress.?(@ptrCast(device), proc_name); + } + + pub inline fn adapterCreateDevice(adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor) ?*gpu.Device { + return @ptrCast(procs.adapterCreateDevice.?(@ptrCast(adapter), @ptrCast(descriptor))); + } + + pub inline fn adapterEnumerateFeatures(adapter: *gpu.Adapter, features: ?[*]gpu.FeatureName) usize { + return procs.adapterEnumerateFeatures.?(@ptrCast(adapter), @ptrCast(features)); + } + + pub inline fn adapterGetInstance(adapter: *gpu.Adapter) *gpu.Instance { + return @ptrCast(procs.adapterGetInstance.?(@ptrCast(adapter))); + } + + pub inline fn adapterGetLimits(adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) u32 { + return procs.adapterGetLimits.?(@ptrCast(adapter), @ptrCast(limits)); + } + + pub inline fn adapterGetProperties(adapter: *gpu.Adapter, properties: *gpu.Adapter.Properties) void { + return procs.adapterGetProperties.?(@ptrCast(adapter), @ptrCast(properties)); + } + + pub inline fn adapterHasFeature(adapter: *gpu.Adapter, feature: gpu.FeatureName) u32 { + return procs.adapterHasFeature.?(@ptrCast(adapter), @intFromEnum(feature)); + } + + pub inline fn adapterPropertiesFreeMembers(value: gpu.Adapter.Properties) void { + procs.adapterPropertiesFreeMembers.?(@bitCast(value)); + } + + pub inline fn adapterRequestDevice(adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor, callback: gpu.RequestDeviceCallback, userdata: ?*anyopaque) void { + return procs.adapterRequestDevice.?( + @ptrCast(adapter), + @ptrCast(descriptor), + @ptrCast(callback), + userdata, + ); + } + + pub inline fn adapterReference(adapter: *gpu.Adapter) void { + procs.adapterReference.?(@ptrCast(adapter)); + } + + pub inline fn adapterRelease(adapter: *gpu.Adapter) void { + procs.adapterRelease.?(@ptrCast(adapter)); + } + + pub inline fn bindGroupSetLabel(bind_group: *gpu.BindGroup, label: [*:0]const u8) void { + procs.bindGroupSetLabel.?(@ptrCast(bind_group), label); + } + + pub inline fn bindGroupReference(bind_group: *gpu.BindGroup) void { + procs.bindGroupReference.?(@ptrCast(bind_group)); + } + + pub inline fn bindGroupRelease(bind_group: *gpu.BindGroup) void { + procs.bindGroupRelease.?(@ptrCast(bind_group)); + } + + pub inline fn bindGroupLayoutSetLabel(bind_group_layout: *gpu.BindGroupLayout, label: [*:0]const u8) void { + procs.bindGroupLayoutSetLabel.?(@ptrCast(bind_group_layout), label); + } + + pub inline fn bindGroupLayoutReference(bind_group_layout: *gpu.BindGroupLayout) void { + procs.bindGroupLayoutReference.?(@ptrCast(bind_group_layout)); + } + + pub inline fn bindGroupLayoutRelease(bind_group_layout: *gpu.BindGroupLayout) void { + procs.bindGroupLayoutRelease.?(@ptrCast(bind_group_layout)); + } + + pub inline fn bufferDestroy(buffer: *gpu.Buffer) void { + procs.bufferDestroy.?(@ptrCast(buffer)); + } + + pub inline fn bufferGetMapState(buffer: *gpu.Buffer) gpu.Buffer.MapState { + return @enumFromInt(procs.bufferGetMapState.?(@ptrCast(buffer))); + } + + // TODO: dawn: return value not marked as nullable in dawn.json but in fact is. + pub inline fn bufferGetConstMappedRange(buffer: *gpu.Buffer, offset: usize, size: usize) ?*const anyopaque { + return procs.bufferGetConstMappedRange.?(@ptrCast(buffer), offset, size); + } + + // TODO: dawn: return value not marked as nullable in dawn.json but in fact is. + pub inline fn bufferGetMappedRange(buffer: *gpu.Buffer, offset: usize, size: usize) ?*anyopaque { + return procs.bufferGetMappedRange.?(@ptrCast(buffer), offset, size); + } + + pub inline fn bufferGetSize(buffer: *gpu.Buffer) u64 { + return procs.bufferGetSize.?(@ptrCast(buffer)); + } + + pub inline fn bufferGetUsage(buffer: *gpu.Buffer) gpu.Buffer.UsageFlags { + return @bitCast(procs.bufferGetUsage.?(@ptrCast(buffer))); + } + + pub inline fn bufferMapAsync(buffer: *gpu.Buffer, mode: gpu.MapModeFlags, offset: usize, size: usize, callback: gpu.Buffer.MapCallback, userdata: ?*anyopaque) void { + procs.bufferMapAsync.?( + @ptrCast(buffer), + @bitCast(mode), + offset, + size, + @ptrCast(callback), + userdata, + ); + } + + pub inline fn bufferSetLabel(buffer: *gpu.Buffer, label: [*:0]const u8) void { + procs.bufferSetLabel.?(@ptrCast(buffer), label); + } + + pub inline fn bufferUnmap(buffer: *gpu.Buffer) void { + procs.bufferUnmap.?(@ptrCast(buffer)); + } + + pub inline fn bufferReference(buffer: *gpu.Buffer) void { + procs.bufferReference.?(@ptrCast(buffer)); + } + + pub inline fn bufferRelease(buffer: *gpu.Buffer) void { + procs.bufferRelease.?(@ptrCast(buffer)); + } + + pub inline fn commandBufferSetLabel(command_buffer: *gpu.CommandBuffer, label: [*:0]const u8) void { + procs.commandBufferSetLabel.?(@ptrCast(command_buffer), label); + } + + pub inline fn commandBufferReference(command_buffer: *gpu.CommandBuffer) void { + procs.commandBufferReference.?(@ptrCast(command_buffer)); + } + + pub inline fn commandBufferRelease(command_buffer: *gpu.CommandBuffer) void { + procs.commandBufferRelease.?(@ptrCast(command_buffer)); + } + + pub inline fn commandEncoderBeginComputePass(command_encoder: *gpu.CommandEncoder, descriptor: ?*const gpu.ComputePassDescriptor) *gpu.ComputePassEncoder { + return @ptrCast(procs.commandEncoderBeginComputePass.?(@ptrCast(command_encoder), @ptrCast(descriptor))); + } + + pub inline fn commandEncoderBeginRenderPass(command_encoder: *gpu.CommandEncoder, descriptor: *const gpu.RenderPassDescriptor) *gpu.RenderPassEncoder { + return @ptrCast(procs.commandEncoderBeginRenderPass.?(@ptrCast(command_encoder), @ptrCast(descriptor))); + } + + pub inline fn commandEncoderClearBuffer(command_encoder: *gpu.CommandEncoder, buffer: *gpu.Buffer, offset: u64, size: u64) void { + procs.commandEncoderClearBuffer.?( + @ptrCast(command_encoder), + @ptrCast(buffer), + offset, + size, + ); + } + + pub inline fn commandEncoderCopyBufferToBuffer(command_encoder: *gpu.CommandEncoder, source: *gpu.Buffer, source_offset: u64, destination: *gpu.Buffer, destination_offset: u64, size: u64) void { + procs.commandEncoderCopyBufferToBuffer.?( + @ptrCast(command_encoder), + @ptrCast(source), + source_offset, + @ptrCast(destination), + destination_offset, + size, + ); + } + + pub inline fn commandEncoderCopyBufferToTexture(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyBuffer, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D) void { + procs.commandEncoderCopyBufferToTexture.?( + @ptrCast(command_encoder), + @ptrCast(source), + @ptrCast(destination), + @ptrCast(copy_size), + ); + } + + pub inline fn commandEncoderCopyTextureToBuffer(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyBuffer, copy_size: *const gpu.Extent3D) void { + procs.commandEncoderCopyTextureToBuffer.?( + @ptrCast(command_encoder), + @ptrCast(source), + @ptrCast(destination), + @ptrCast(copy_size), + ); + } + + pub inline fn commandEncoderCopyTextureToTexture(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D) void { + procs.commandEncoderCopyTextureToTexture.?( + @ptrCast(command_encoder), + @ptrCast(source), + @ptrCast(destination), + @ptrCast(copy_size), + ); + } + + pub inline fn commandEncoderFinish(command_encoder: *gpu.CommandEncoder, descriptor: ?*const gpu.CommandBuffer.Descriptor) *gpu.CommandBuffer { + return @ptrCast(procs.commandEncoderFinish.?( + @ptrCast(command_encoder), + @ptrCast(descriptor), + )); + } + + pub inline fn commandEncoderInjectValidationError(command_encoder: *gpu.CommandEncoder, message: [*:0]const u8) void { + procs.commandEncoderInjectValidationError.?( + @ptrCast(command_encoder), + message, + ); + } + + pub inline fn commandEncoderInsertDebugMarker(command_encoder: *gpu.CommandEncoder, marker_label: [*:0]const u8) void { + procs.commandEncoderInsertDebugMarker.?( + @ptrCast(command_encoder), + marker_label, + ); + } + + pub inline fn commandEncoderPopDebugGroup(command_encoder: *gpu.CommandEncoder) void { + procs.commandEncoderPopDebugGroup.?(@ptrCast(command_encoder)); + } + + pub inline fn commandEncoderPushDebugGroup(command_encoder: *gpu.CommandEncoder, group_label: [*:0]const u8) void { + procs.commandEncoderPushDebugGroup.?( + @ptrCast(command_encoder), + group_label, + ); + } + + pub inline fn commandEncoderResolveQuerySet(command_encoder: *gpu.CommandEncoder, query_set: *gpu.QuerySet, first_query: u32, query_count: u32, destination: *gpu.Buffer, destination_offset: u64) void { + procs.commandEncoderResolveQuerySet.?( + @ptrCast(command_encoder), + @ptrCast(query_set), + first_query, + query_count, + @ptrCast(destination), + destination_offset, + ); + } + + pub inline fn commandEncoderSetLabel(command_encoder: *gpu.CommandEncoder, label: [*:0]const u8) void { + procs.commandEncoderSetLabel.?(@ptrCast(command_encoder), label); + } + + pub inline fn commandEncoderWriteBuffer(command_encoder: *gpu.CommandEncoder, buffer: *gpu.Buffer, buffer_offset: u64, data: [*]const u8, size: u64) void { + procs.commandEncoderWriteBuffer.?( + @ptrCast(command_encoder), + @ptrCast(buffer), + buffer_offset, + data, + size, + ); + } + + pub inline fn commandEncoderWriteTimestamp(command_encoder: *gpu.CommandEncoder, query_set: *gpu.QuerySet, query_index: u32) void { + procs.commandEncoderWriteTimestamp.?( + @ptrCast(command_encoder), + @ptrCast(query_set), + query_index, + ); + } + + pub inline fn commandEncoderReference(command_encoder: *gpu.CommandEncoder) void { + procs.commandEncoderReference.?(@ptrCast(command_encoder)); + } + + pub inline fn commandEncoderRelease(command_encoder: *gpu.CommandEncoder) void { + procs.commandEncoderRelease.?(@ptrCast(command_encoder)); + } + + pub inline fn computePassEncoderDispatchWorkgroups(compute_pass_encoder: *gpu.ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) void { + procs.computePassEncoderDispatchWorkgroups.?( + @ptrCast(compute_pass_encoder), + workgroup_count_x, + workgroup_count_y, + workgroup_count_z, + ); + } + + pub inline fn computePassEncoderDispatchWorkgroupsIndirect(compute_pass_encoder: *gpu.ComputePassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + procs.computePassEncoderDispatchWorkgroupsIndirect.?( + @ptrCast(compute_pass_encoder), + @ptrCast(indirect_buffer), + indirect_offset, + ); + } + + pub inline fn computePassEncoderEnd(compute_pass_encoder: *gpu.ComputePassEncoder) void { + procs.computePassEncoderEnd.?(@ptrCast(compute_pass_encoder)); + } + + pub inline fn computePassEncoderInsertDebugMarker(compute_pass_encoder: *gpu.ComputePassEncoder, marker_label: [*:0]const u8) void { + procs.computePassEncoderInsertDebugMarker.?( + @ptrCast(compute_pass_encoder), + marker_label, + ); + } + + pub inline fn computePassEncoderPopDebugGroup(compute_pass_encoder: *gpu.ComputePassEncoder) void { + procs.computePassEncoderPopDebugGroup.?(@ptrCast(compute_pass_encoder)); + } + + pub inline fn computePassEncoderPushDebugGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_label: [*:0]const u8) void { + procs.computePassEncoderPushDebugGroup.?( + @ptrCast(compute_pass_encoder), + group_label, + ); + } + + pub inline fn computePassEncoderSetBindGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { + procs.computePassEncoderSetBindGroup.?( + @ptrCast(compute_pass_encoder), + group_index, + @ptrCast(group), + dynamic_offset_count, + dynamic_offsets, + ); + } + + pub inline fn computePassEncoderSetLabel(compute_pass_encoder: *gpu.ComputePassEncoder, label: [*:0]const u8) void { + procs.computePassEncoderSetLabel.?(@ptrCast(compute_pass_encoder), label); + } + + pub inline fn computePassEncoderSetPipeline(compute_pass_encoder: *gpu.ComputePassEncoder, pipeline: *gpu.ComputePipeline) void { + procs.computePassEncoderSetPipeline.?( + @ptrCast(compute_pass_encoder), + @ptrCast(pipeline), + ); + } + + pub inline fn computePassEncoderWriteTimestamp(compute_pass_encoder: *gpu.ComputePassEncoder, query_set: *gpu.QuerySet, query_index: u32) void { + procs.computePassEncoderWriteTimestamp.?( + @ptrCast(compute_pass_encoder), + @ptrCast(query_set), + query_index, + ); + } + + pub inline fn computePassEncoderReference(compute_pass_encoder: *gpu.ComputePassEncoder) void { + procs.computePassEncoderReference.?(@ptrCast(compute_pass_encoder)); + } + + pub inline fn computePassEncoderRelease(compute_pass_encoder: *gpu.ComputePassEncoder) void { + procs.computePassEncoderRelease.?(@ptrCast(compute_pass_encoder)); + } + + pub inline fn computePipelineGetBindGroupLayout(compute_pipeline: *gpu.ComputePipeline, group_index: u32) *gpu.BindGroupLayout { + return @ptrCast(procs.computePipelineGetBindGroupLayout.?( + @ptrCast(compute_pipeline), + group_index, + )); + } + + pub inline fn computePipelineSetLabel(compute_pipeline: *gpu.ComputePipeline, label: [*:0]const u8) void { + procs.computePipelineSetLabel.?(@ptrCast(compute_pipeline), label); + } + + pub inline fn computePipelineReference(compute_pipeline: *gpu.ComputePipeline) void { + procs.computePipelineReference.?(@ptrCast(compute_pipeline)); + } + + pub inline fn computePipelineRelease(compute_pipeline: *gpu.ComputePipeline) void { + procs.computePipelineRelease.?(@ptrCast(compute_pipeline)); + } + + pub inline fn deviceCreateBindGroup(device: *gpu.Device, descriptor: *const gpu.BindGroup.Descriptor) *gpu.BindGroup { + return @ptrCast(procs.deviceCreateBindGroup.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateBindGroupLayout(device: *gpu.Device, descriptor: *const gpu.BindGroupLayout.Descriptor) *gpu.BindGroupLayout { + return @ptrCast(procs.deviceCreateBindGroupLayout.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateBuffer(device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) *gpu.Buffer { + return @ptrCast(procs.deviceCreateBuffer.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateCommandEncoder(device: *gpu.Device, descriptor: ?*const gpu.CommandEncoder.Descriptor) *gpu.CommandEncoder { + return @ptrCast(procs.deviceCreateCommandEncoder.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateComputePipeline(device: *gpu.Device, descriptor: *const gpu.ComputePipeline.Descriptor) *gpu.ComputePipeline { + return @ptrCast(procs.deviceCreateComputePipeline.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateComputePipelineAsync(device: *gpu.Device, descriptor: *const gpu.ComputePipeline.Descriptor, callback: gpu.CreateComputePipelineAsyncCallback, userdata: ?*anyopaque) void { + procs.deviceCreateComputePipelineAsync.?( + @ptrCast(device), + @ptrCast(descriptor), + @ptrCast(callback), + userdata, + ); + } + + pub inline fn deviceCreateErrorBuffer(device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) *gpu.Buffer { + return @ptrCast(procs.deviceCreateErrorBuffer.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateErrorExternalTexture(device: *gpu.Device) *gpu.ExternalTexture { + return @ptrCast(procs.deviceCreateErrorExternalTexture.?(@ptrCast(device))); + } + + pub inline fn deviceCreateErrorTexture(device: *gpu.Device, descriptor: *const gpu.Texture.Descriptor) *gpu.Texture { + return @ptrCast(procs.deviceCreateErrorTexture.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateExternalTexture(device: *gpu.Device, external_texture_descriptor: *const gpu.ExternalTexture.Descriptor) *gpu.ExternalTexture { + return @ptrCast(procs.deviceCreateExternalTexture.?( + @ptrCast(device), + @ptrCast(external_texture_descriptor), + )); + } + + pub inline fn deviceCreatePipelineLayout(device: *gpu.Device, pipeline_layout_descriptor: *const gpu.PipelineLayout.Descriptor) *gpu.PipelineLayout { + return @ptrCast(procs.deviceCreatePipelineLayout.?( + @ptrCast(device), + @ptrCast(pipeline_layout_descriptor), + )); + } + + pub inline fn deviceCreateQuerySet(device: *gpu.Device, descriptor: *const gpu.QuerySet.Descriptor) *gpu.QuerySet { + return @ptrCast(procs.deviceCreateQuerySet.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateRenderBundleEncoder(device: *gpu.Device, descriptor: *const gpu.RenderBundleEncoder.Descriptor) *gpu.RenderBundleEncoder { + return @ptrCast(procs.deviceCreateRenderBundleEncoder.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateRenderPipeline(device: *gpu.Device, descriptor: *const gpu.RenderPipeline.Descriptor) *gpu.RenderPipeline { + return @ptrCast(procs.deviceCreateRenderPipeline.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateRenderPipelineAsync(device: *gpu.Device, descriptor: *const gpu.RenderPipeline.Descriptor, callback: gpu.CreateRenderPipelineAsyncCallback, userdata: ?*anyopaque) void { + procs.deviceCreateRenderPipelineAsync.?( + @ptrCast(device), + @ptrCast(descriptor), + @ptrCast(callback), + userdata, + ); + } + + // TODO(self-hosted): this cannot be marked as inline for some reason. + // https://github.com/ziglang/zig/issues/12545 + pub fn deviceCreateSampler(device: *gpu.Device, descriptor: ?*const gpu.Sampler.Descriptor) *gpu.Sampler { + return @ptrCast(procs.deviceCreateSampler.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateShaderModule(device: *gpu.Device, descriptor: *const gpu.ShaderModule.Descriptor) *gpu.ShaderModule { + return @ptrCast(procs.deviceCreateShaderModule.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateSwapChain(device: *gpu.Device, surface: ?*gpu.Surface, descriptor: *const gpu.SwapChain.Descriptor) *gpu.SwapChain { + return @ptrCast(procs.deviceCreateSwapChain.?( + @ptrCast(device), + @ptrCast(surface), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceCreateTexture(device: *gpu.Device, descriptor: *const gpu.Texture.Descriptor) *gpu.Texture { + return @ptrCast(procs.deviceCreateTexture.?( + @ptrCast(device), + @ptrCast(descriptor), + )); + } + + pub inline fn deviceDestroy(device: *gpu.Device) void { + procs.deviceDestroy.?(@ptrCast(device)); + } + + pub inline fn deviceEnumerateFeatures(device: *gpu.Device, features: ?[*]gpu.FeatureName) usize { + return procs.deviceEnumerateFeatures.?(@ptrCast(device), @ptrCast(features)); + } + + pub inline fn forceLoss(device: *gpu.Device, reason: gpu.Device.LostReason, message: [*:0]const u8) void { + return procs.deviceForceLoss.?( + @ptrCast(device), + reason, + message, + ); + } + + pub inline fn deviceGetAdapter(device: *gpu.Device) *gpu.Adapter { + return procs.deviceGetAdapter.?(@ptrCast(device)); + } + + pub inline fn deviceGetLimits(device: *gpu.Device, limits: *gpu.SupportedLimits) u32 { + return procs.deviceGetLimits.?( + @ptrCast(device), + @ptrCast(limits), + ); + } + + pub inline fn deviceGetQueue(device: *gpu.Device) *gpu.Queue { + return @ptrCast(procs.deviceGetQueue.?(@ptrCast(device))); + } + + pub inline fn deviceHasFeature(device: *gpu.Device, feature: gpu.FeatureName) u32 { + return procs.deviceHasFeature.?( + @ptrCast(device), + @intFromEnum(feature), + ); + } + + pub inline fn deviceImportSharedFence(device: *gpu.Device, descriptor: *const gpu.SharedFence.Descriptor) *gpu.SharedFence { + return @ptrCast(procs.deviceImportSharedFence.?(@ptrCast(device), @ptrCast(descriptor))); + } + + pub inline fn deviceImportSharedTextureMemory(device: *gpu.Device, descriptor: *const gpu.SharedTextureMemory.Descriptor) *gpu.SharedTextureMemory { + return @ptrCast(procs.deviceImportSharedTextureMemory.?(@ptrCast(device), @ptrCast(descriptor))); + } + + pub inline fn deviceInjectError(device: *gpu.Device, typ: gpu.ErrorType, message: [*:0]const u8) void { + procs.deviceInjectError.?( + @ptrCast(device), + @intFromEnum(typ), + message, + ); + } + + pub inline fn devicePopErrorScope(device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) void { + procs.devicePopErrorScope.?( + @ptrCast(device), + @ptrCast(callback), + userdata, + ); + } + + pub inline fn devicePushErrorScope(device: *gpu.Device, filter: gpu.ErrorFilter) void { + procs.devicePushErrorScope.?( + @ptrCast(device), + @intFromEnum(filter), + ); + } + + pub inline fn deviceSetDeviceLostCallback(device: *gpu.Device, callback: ?gpu.Device.LostCallback, userdata: ?*anyopaque) void { + procs.deviceSetDeviceLostCallback.?( + @ptrCast(device), + @ptrCast(callback), + userdata, + ); + } + + pub inline fn deviceSetLabel(device: *gpu.Device, label: [*:0]const u8) void { + procs.deviceSetLabel.?(@ptrCast(device), label); + } + + pub inline fn deviceSetLoggingCallback(device: *gpu.Device, callback: ?gpu.LoggingCallback, userdata: ?*anyopaque) void { + procs.deviceSetLoggingCallback.?( + @ptrCast(device), + @ptrCast(callback), + userdata, + ); + } + + pub inline fn deviceSetUncapturedErrorCallback(device: *gpu.Device, callback: ?gpu.ErrorCallback, userdata: ?*anyopaque) void { + procs.deviceSetUncapturedErrorCallback.?( + @ptrCast(device), + @ptrCast(callback), + userdata, + ); + } + + pub inline fn deviceTick(device: *gpu.Device) void { + procs.deviceTick.?(@ptrCast(device)); + } + + pub inline fn machDeviceWaitForCommandsToBeScheduled(device: *gpu.Device) void { + c.machDawnDeviceWaitForCommandsToBeScheduled(@ptrCast(device)); + } + + pub inline fn deviceValidateTextureDescriptor(device: *gpu.Device, descriptor: *const gpu.Texture.Descriptor) void { + procs.deviceValidateTextureDescriptor(device, descriptor); + } + + pub inline fn deviceReference(device: *gpu.Device) void { + procs.deviceReference.?(@ptrCast(device)); + } + + pub inline fn deviceRelease(device: *gpu.Device) void { + procs.deviceRelease.?(@ptrCast(device)); + } + + pub inline fn externalTextureDestroy(external_texture: *gpu.ExternalTexture) void { + procs.externalTextureDestroy.?(@ptrCast(external_texture)); + } + + pub inline fn externalTextureSetLabel(external_texture: *gpu.ExternalTexture, label: [*:0]const u8) void { + procs.externalTextureSetLabel.?(@ptrCast(external_texture), label); + } + + pub inline fn externalTextureReference(external_texture: *gpu.ExternalTexture) void { + procs.externalTextureReference.?(@ptrCast(external_texture)); + } + + pub inline fn externalTextureRelease(external_texture: *gpu.ExternalTexture) void { + procs.externalTextureRelease.?(@ptrCast(external_texture)); + } + + pub inline fn instanceCreateSurface(instance: *gpu.Instance, descriptor: *const gpu.Surface.Descriptor) *gpu.Surface { + return @ptrCast(procs.instanceCreateSurface.?( + @ptrCast(instance), + @ptrCast(descriptor), + )); + } + + pub inline fn instanceProcessEvents(instance: *gpu.Instance) void { + procs.instanceProcessEvents.?( + @ptrCast(instance), + ); + } + + pub inline fn instanceRequestAdapter(instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) void { + procs.instanceRequestAdapter.?( + @ptrCast(instance), + @ptrCast(options), + @ptrCast(callback), + userdata, + ); + } + + pub inline fn instanceReference(instance: *gpu.Instance) void { + procs.instanceReference.?(@ptrCast(instance)); + } + + pub inline fn instanceRelease(instance: *gpu.Instance) void { + procs.instanceRelease.?(@ptrCast(instance)); + } + + pub inline fn pipelineLayoutSetLabel(pipeline_layout: *gpu.PipelineLayout, label: [*:0]const u8) void { + procs.pipelineLayoutSetLabel.?(@ptrCast(pipeline_layout), label); + } + + pub inline fn pipelineLayoutReference(pipeline_layout: *gpu.PipelineLayout) void { + procs.pipelineLayoutReference.?(@ptrCast(pipeline_layout)); + } + + pub inline fn pipelineLayoutRelease(pipeline_layout: *gpu.PipelineLayout) void { + procs.pipelineLayoutRelease.?(@ptrCast(pipeline_layout)); + } + + pub inline fn querySetDestroy(query_set: *gpu.QuerySet) void { + procs.querySetDestroy.?(@ptrCast(query_set)); + } + + pub inline fn querySetGetCount(query_set: *gpu.QuerySet) u32 { + return procs.querySetGetCount.?(@ptrCast(query_set)); + } + + pub inline fn querySetGetType(query_set: *gpu.QuerySet) gpu.QueryType { + return @enumFromInt(procs.querySetGetType.?(@ptrCast(query_set))); + } + + pub inline fn querySetSetLabel(query_set: *gpu.QuerySet, label: [*:0]const u8) void { + procs.querySetSetLabel.?(@ptrCast(query_set), label); + } + + pub inline fn querySetReference(query_set: *gpu.QuerySet) void { + procs.querySetReference.?(@ptrCast(query_set)); + } + + pub inline fn querySetRelease(query_set: *gpu.QuerySet) void { + procs.querySetRelease.?(@ptrCast(query_set)); + } + + pub inline fn queueCopyExternalTextureForBrowser(queue: *gpu.Queue, source: *const gpu.ImageCopyExternalTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D, options: *const gpu.CopyTextureForBrowserOptions) void { + procs.queueCopyExternalTextureForBrowser.?( + @ptrCast(queue), + @ptrCast(source), + @ptrCast(destination), + @ptrCast(copy_size), + @ptrCast(options), + ); + } + + pub inline fn queueCopyTextureForBrowser(queue: *gpu.Queue, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D, options: *const gpu.CopyTextureForBrowserOptions) void { + procs.queueCopyTextureForBrowser.?( + @ptrCast(queue), + @ptrCast(source), + @ptrCast(destination), + @ptrCast(copy_size), + @ptrCast(options), + ); + } + + pub inline fn queueOnSubmittedWorkDone(queue: *gpu.Queue, signal_value: u64, callback: gpu.Queue.WorkDoneCallback, userdata: ?*anyopaque) void { + procs.queueOnSubmittedWorkDone.?( + @ptrCast(queue), + signal_value, + @ptrCast(callback), + userdata, + ); + } + + pub inline fn queueSetLabel(queue: *gpu.Queue, label: [*:0]const u8) void { + procs.queueSetLabel.?(@ptrCast(queue), label); + } + + pub inline fn queueSubmit(queue: *gpu.Queue, command_count: usize, commands: [*]const *const gpu.CommandBuffer) void { + procs.queueSubmit.?( + @ptrCast(queue), + command_count, + @ptrCast(commands), + ); + } + + pub inline fn queueWriteBuffer(queue: *gpu.Queue, buffer: *gpu.Buffer, buffer_offset: u64, data: *const anyopaque, size: usize) void { + procs.queueWriteBuffer.?( + @ptrCast(queue), + @ptrCast(buffer), + buffer_offset, + data, + size, + ); + } + + pub inline fn queueWriteTexture(queue: *gpu.Queue, destination: *const gpu.ImageCopyTexture, data: *const anyopaque, data_size: usize, data_layout: *const gpu.Texture.DataLayout, write_size: *const gpu.Extent3D) void { + procs.queueWriteTexture.?( + @ptrCast(queue), + @ptrCast(destination), + data, + data_size, + @ptrCast(data_layout), + @ptrCast(write_size), + ); + } + + pub inline fn queueReference(queue: *gpu.Queue) void { + procs.queueReference.?(@ptrCast(queue)); + } + + pub inline fn queueRelease(queue: *gpu.Queue) void { + procs.queueRelease.?(@ptrCast(queue)); + } + + pub inline fn renderBundleSetLabel(render_bundle: *gpu.RenderBundle, label: [*:0]const u8) void { + procs.renderBundleSetLabel.?(@ptrCast(render_bundle), label); + } + + pub inline fn renderBundleReference(render_bundle: *gpu.RenderBundle) void { + procs.renderBundleReference.?(@ptrCast(render_bundle)); + } + + pub inline fn renderBundleRelease(render_bundle: *gpu.RenderBundle) void { + procs.renderBundleRelease.?(@ptrCast(render_bundle)); + } + + pub inline fn renderBundleEncoderDraw(render_bundle_encoder: *gpu.RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { + procs.renderBundleEncoderDraw.?(@ptrCast(render_bundle_encoder), vertex_count, instance_count, first_vertex, first_instance); + } + + pub inline fn renderBundleEncoderDrawIndexed(render_bundle_encoder: *gpu.RenderBundleEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { + procs.renderBundleEncoderDrawIndexed.?( + @ptrCast(render_bundle_encoder), + index_count, + instance_count, + first_index, + base_vertex, + first_instance, + ); + } + + pub inline fn renderBundleEncoderDrawIndexedIndirect(render_bundle_encoder: *gpu.RenderBundleEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + procs.renderBundleEncoderDrawIndexedIndirect.?( + @ptrCast(render_bundle_encoder), + @ptrCast(indirect_buffer), + indirect_offset, + ); + } + + pub inline fn renderBundleEncoderDrawIndirect(render_bundle_encoder: *gpu.RenderBundleEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + procs.renderBundleEncoderDrawIndirect.?( + @ptrCast(render_bundle_encoder), + @ptrCast(indirect_buffer), + indirect_offset, + ); + } + + pub inline fn renderBundleEncoderFinish(render_bundle_encoder: *gpu.RenderBundleEncoder, descriptor: ?*const gpu.RenderBundle.Descriptor) *gpu.RenderBundle { + return @ptrCast(procs.renderBundleEncoderFinish.?( + @ptrCast(render_bundle_encoder), + @ptrCast(descriptor), + )); + } + + pub inline fn renderBundleEncoderInsertDebugMarker(render_bundle_encoder: *gpu.RenderBundleEncoder, marker_label: [*:0]const u8) void { + procs.renderBundleEncoderInsertDebugMarker.?( + @ptrCast(render_bundle_encoder), + marker_label, + ); + } + + pub inline fn renderBundleEncoderPopDebugGroup(render_bundle_encoder: *gpu.RenderBundleEncoder) void { + procs.renderBundleEncoderPopDebugGroup.?(@ptrCast(render_bundle_encoder)); + } + + pub inline fn renderBundleEncoderPushDebugGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_label: [*:0]const u8) void { + procs.renderBundleEncoderPushDebugGroup.?(@ptrCast(render_bundle_encoder), group_label); + } + + pub inline fn renderBundleEncoderSetBindGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { + procs.renderBundleEncoderSetBindGroup.?( + @ptrCast(render_bundle_encoder), + group_index, + @ptrCast(group), + dynamic_offset_count, + dynamic_offsets, + ); + } + + pub inline fn renderBundleEncoderSetIndexBuffer(render_bundle_encoder: *gpu.RenderBundleEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) void { + procs.renderBundleEncoderSetIndexBuffer.?( + @ptrCast(render_bundle_encoder), + @ptrCast(buffer), + @intFromEnum(format), + offset, + size, + ); + } + + pub inline fn renderBundleEncoderSetLabel(render_bundle_encoder: *gpu.RenderBundleEncoder, label: [*:0]const u8) void { + procs.renderBundleEncoderSetLabel.?(@ptrCast(render_bundle_encoder), label); + } + + pub inline fn renderBundleEncoderSetPipeline(render_bundle_encoder: *gpu.RenderBundleEncoder, pipeline: *gpu.RenderPipeline) void { + procs.renderBundleEncoderSetPipeline.?( + @ptrCast(render_bundle_encoder), + @ptrCast(pipeline), + ); + } + + pub inline fn renderBundleEncoderSetVertexBuffer(render_bundle_encoder: *gpu.RenderBundleEncoder, slot: u32, buffer: *gpu.Buffer, offset: u64, size: u64) void { + procs.renderBundleEncoderSetVertexBuffer.?( + @ptrCast(render_bundle_encoder), + slot, + @ptrCast(buffer), + offset, + size, + ); + } + + pub inline fn renderBundleEncoderReference(render_bundle_encoder: *gpu.RenderBundleEncoder) void { + procs.renderBundleEncoderReference.?(@ptrCast(render_bundle_encoder)); + } + + pub inline fn renderBundleEncoderRelease(render_bundle_encoder: *gpu.RenderBundleEncoder) void { + procs.renderBundleEncoderRelease.?(@ptrCast(render_bundle_encoder)); + } + + pub inline fn renderPassEncoderBeginOcclusionQuery(render_pass_encoder: *gpu.RenderPassEncoder, query_index: u32) void { + procs.renderPassEncoderBeginOcclusionQuery.?( + @ptrCast(render_pass_encoder), + query_index, + ); + } + + pub inline fn renderPassEncoderDraw(render_pass_encoder: *gpu.RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { + procs.renderPassEncoderDraw.?( + @ptrCast(render_pass_encoder), + vertex_count, + instance_count, + first_vertex, + first_instance, + ); + } + + pub inline fn renderPassEncoderDrawIndexed(render_pass_encoder: *gpu.RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { + procs.renderPassEncoderDrawIndexed.?( + @ptrCast(render_pass_encoder), + index_count, + instance_count, + first_index, + base_vertex, + first_instance, + ); + } + + pub inline fn renderPassEncoderDrawIndexedIndirect(render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + procs.renderPassEncoderDrawIndexedIndirect.?( + @ptrCast(render_pass_encoder), + @ptrCast(indirect_buffer), + indirect_offset, + ); + } + + pub inline fn renderPassEncoderDrawIndirect(render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + procs.renderPassEncoderDrawIndirect.?( + @ptrCast(render_pass_encoder), + @ptrCast(indirect_buffer), + indirect_offset, + ); + } + + pub inline fn renderPassEncoderEnd(render_pass_encoder: *gpu.RenderPassEncoder) void { + procs.renderPassEncoderEnd.?(@ptrCast(render_pass_encoder)); + } + + pub inline fn renderPassEncoderEndOcclusionQuery(render_pass_encoder: *gpu.RenderPassEncoder) void { + procs.renderPassEncoderEndOcclusionQuery.?(@ptrCast(render_pass_encoder)); + } + + pub inline fn renderPassEncoderExecuteBundles(render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const gpu.RenderBundle) void { + procs.renderPassEncoderExecuteBundles.?( + @ptrCast(render_pass_encoder), + bundles_count, + @ptrCast(bundles), + ); + } + + pub inline fn renderPassEncoderInsertDebugMarker(render_pass_encoder: *gpu.RenderPassEncoder, marker_label: [*:0]const u8) void { + procs.renderPassEncoderInsertDebugMarker.?(@ptrCast(render_pass_encoder), marker_label); + } + + pub inline fn renderPassEncoderPopDebugGroup(render_pass_encoder: *gpu.RenderPassEncoder) void { + procs.renderPassEncoderPopDebugGroup.?(@ptrCast(render_pass_encoder)); + } + + pub inline fn renderPassEncoderPushDebugGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_label: [*:0]const u8) void { + procs.renderPassEncoderPushDebugGroup.?( + @ptrCast(render_pass_encoder), + group_label, + ); + } + + pub inline fn renderPassEncoderSetBindGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { + procs.renderPassEncoderSetBindGroup.?( + @ptrCast(render_pass_encoder), + group_index, + @ptrCast(group), + dynamic_offset_count, + dynamic_offsets, + ); + } + + pub inline fn renderPassEncoderSetBlendConstant(render_pass_encoder: *gpu.RenderPassEncoder, color: *const gpu.Color) void { + procs.renderPassEncoderSetBlendConstant.?( + @ptrCast(render_pass_encoder), + @ptrCast(color), + ); + } + + pub inline fn renderPassEncoderSetIndexBuffer(render_pass_encoder: *gpu.RenderPassEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) void { + procs.renderPassEncoderSetIndexBuffer.?( + @ptrCast(render_pass_encoder), + @ptrCast(buffer), + @intFromEnum(format), + offset, + size, + ); + } + + pub inline fn renderPassEncoderSetLabel(render_pass_encoder: *gpu.RenderPassEncoder, label: [*:0]const u8) void { + procs.renderPassEncoderSetLabel.?(@ptrCast(render_pass_encoder), label); + } + + pub inline fn renderPassEncoderSetPipeline(render_pass_encoder: *gpu.RenderPassEncoder, pipeline: *gpu.RenderPipeline) void { + procs.renderPassEncoderSetPipeline.?( + @ptrCast(render_pass_encoder), + @ptrCast(pipeline), + ); + } + + pub inline fn renderPassEncoderSetScissorRect(render_pass_encoder: *gpu.RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) void { + procs.renderPassEncoderSetScissorRect.?( + @ptrCast(render_pass_encoder), + x, + y, + width, + height, + ); + } + + pub inline fn renderPassEncoderSetStencilReference(render_pass_encoder: *gpu.RenderPassEncoder, reference: u32) void { + procs.renderPassEncoderSetStencilReference.?( + @ptrCast(render_pass_encoder), + reference, + ); + } + + pub inline fn renderPassEncoderSetVertexBuffer(render_pass_encoder: *gpu.RenderPassEncoder, slot: u32, buffer: *gpu.Buffer, offset: u64, size: u64) void { + procs.renderPassEncoderSetVertexBuffer.?( + @ptrCast(render_pass_encoder), + slot, + @ptrCast(buffer), + offset, + size, + ); + } + + pub inline fn renderPassEncoderSetViewport(render_pass_encoder: *gpu.RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32) void { + procs.renderPassEncoderSetViewport.?( + @ptrCast(render_pass_encoder), + x, + y, + width, + height, + min_depth, + max_depth, + ); + } + + pub inline fn renderPassEncoderWriteTimestamp(render_pass_encoder: *gpu.RenderPassEncoder, query_set: *gpu.QuerySet, query_index: u32) void { + procs.renderPassEncoderWriteTimestamp.?( + @ptrCast(render_pass_encoder), + @ptrCast(query_set), + query_index, + ); + } + + pub inline fn renderPassEncoderReference(render_pass_encoder: *gpu.RenderPassEncoder) void { + procs.renderPassEncoderReference.?(@ptrCast(render_pass_encoder)); + } + + pub inline fn renderPassEncoderRelease(render_pass_encoder: *gpu.RenderPassEncoder) void { + procs.renderPassEncoderRelease.?(@ptrCast(render_pass_encoder)); + } + + pub inline fn renderPipelineGetBindGroupLayout(render_pipeline: *gpu.RenderPipeline, group_index: u32) *gpu.BindGroupLayout { + return @ptrCast(procs.renderPipelineGetBindGroupLayout.?( + @ptrCast(render_pipeline), + group_index, + )); + } + + pub inline fn renderPipelineSetLabel(render_pipeline: *gpu.RenderPipeline, label: [*:0]const u8) void { + procs.renderPipelineSetLabel.?(@ptrCast(render_pipeline), label); + } + + pub inline fn renderPipelineReference(render_pipeline: *gpu.RenderPipeline) void { + procs.renderPipelineReference.?(@ptrCast(render_pipeline)); + } + + pub inline fn renderPipelineRelease(render_pipeline: *gpu.RenderPipeline) void { + procs.renderPipelineRelease.?(@ptrCast(render_pipeline)); + } + + pub inline fn samplerSetLabel(sampler: *gpu.Sampler, label: [*:0]const u8) void { + procs.samplerSetLabel.?(@ptrCast(sampler), label); + } + + pub inline fn samplerReference(sampler: *gpu.Sampler) void { + procs.samplerReference.?(@ptrCast(sampler)); + } + + pub inline fn samplerRelease(sampler: *gpu.Sampler) void { + procs.samplerRelease.?(@ptrCast(sampler)); + } + + pub inline fn shaderModuleGetCompilationInfo(shader_module: *gpu.ShaderModule, callback: gpu.CompilationInfoCallback, userdata: ?*anyopaque) void { + procs.shaderModuleGetCompilationInfo.?( + @ptrCast(shader_module), + @ptrCast(callback), + userdata, + ); + } + + pub inline fn shaderModuleSetLabel(shader_module: *gpu.ShaderModule, label: [*:0]const u8) void { + procs.shaderModuleSetLabel.?(@ptrCast(shader_module), label); + } + + pub inline fn shaderModuleReference(shader_module: *gpu.ShaderModule) void { + procs.shaderModuleReference.?(@ptrCast(shader_module)); + } + + pub inline fn shaderModuleRelease(shader_module: *gpu.ShaderModule) void { + procs.shaderModuleRelease.?(@ptrCast(shader_module)); + } + + pub inline fn sharedFenceExportInfo(shared_fence: *gpu.SharedFence, info: *gpu.SharedFence.ExportInfo) void { + procs.sharedFenceExportInfo.?(@ptrCast(shared_fence), @ptrCast(info)); + } + + pub inline fn sharedFenceReference(shared_fence: *gpu.SharedFence) void { + procs.sharedFenceReference.?(@ptrCast(shared_fence)); + } + + pub inline fn sharedFenceRelease(shared_fence: *gpu.SharedFence) void { + procs.sharedFenceRelease.?(@ptrCast(shared_fence)); + } + + pub inline fn sharedTextureMemoryBeginAccess(shared_texture_memory: *gpu.SharedTextureMemory, texture: *gpu.Texture, descriptor: *const gpu.SharedTextureMemory.BeginAccessDescriptor) void { + procs.sharedTextureMemoryBeginAccess.?(@ptrCast(shared_texture_memory), @ptrCast(texture), @ptrCast(descriptor)); + } + + pub inline fn sharedTextureMemoryCreateTexture(shared_texture_memory: *gpu.SharedTextureMemory, descriptor: *const gpu.Texture.Descriptor) *gpu.Texture { + return @ptrCast(procs.sharedTextureMemoryCreateTexture.?(@ptrCast(shared_texture_memory), @ptrCast(descriptor))); + } + + pub inline fn sharedTextureMemoryEndAccess(shared_texture_memory: *gpu.SharedTextureMemory, texture: *gpu.Texture, descriptor: *gpu.SharedTextureMemory.EndAccessState) void { + procs.sharedTextureMemoryEndAccess.?(@ptrCast(shared_texture_memory), @ptrCast(texture), @ptrCast(descriptor)); + } + + pub inline fn sharedTextureMemoryEndAccessStateFreeMembers(value: gpu.SharedTextureMemory.EndAccessState) void { + procs.sharedTextureMemoryEndAccessStateFreeMembers.?(@bitCast(value)); + } + + pub inline fn sharedTextureMemoryGetProperties(shared_texture_memory: *gpu.SharedTextureMemory, properties: *gpu.SharedTextureMemory.Properties) void { + procs.sharedTextureMemoryGetProperties.?(@ptrCast(shared_texture_memory), @ptrCast(properties)); + } + + pub inline fn sharedTextureMemorySetLabel(shared_texture_memory: *gpu.SharedTextureMemory, label: [*:0]const u8) void { + procs.sharedTextureMemorySetLabel.?(@ptrCast(shared_texture_memory), label); + } + + pub inline fn sharedTextureMemoryReference(shared_texture_memory: *gpu.SharedTextureMemory) void { + procs.sharedTextureMemoryReference.?(@ptrCast(shared_texture_memory)); + } + + pub inline fn sharedTextureMemoryRelease(shared_texture_memory: *gpu.SharedTextureMemory) void { + procs.sharedTextureMemoryRelease.?(@ptrCast(shared_texture_memory)); + } + + pub inline fn surfaceReference(surface: *gpu.Surface) void { + procs.surfaceReference.?(@ptrCast(surface)); + } + + pub inline fn surfaceRelease(surface: *gpu.Surface) void { + procs.surfaceRelease.?(@ptrCast(surface)); + } + + pub inline fn swapChainGetCurrentTexture(swap_chain: *gpu.SwapChain) ?*gpu.Texture { + return @ptrCast(procs.swapChainGetCurrentTexture.?(@ptrCast(swap_chain))); + } + + pub inline fn swapChainGetCurrentTextureView(swap_chain: *gpu.SwapChain) ?*gpu.TextureView { + return @ptrCast(procs.swapChainGetCurrentTextureView.?(@ptrCast(swap_chain))); + } + + pub inline fn swapChainPresent(swap_chain: *gpu.SwapChain) void { + procs.swapChainPresent.?(@ptrCast(swap_chain)); + } + + pub inline fn swapChainReference(swap_chain: *gpu.SwapChain) void { + procs.swapChainReference.?(@ptrCast(swap_chain)); + } + + pub inline fn swapChainRelease(swap_chain: *gpu.SwapChain) void { + procs.swapChainRelease.?(@ptrCast(swap_chain)); + } + + pub inline fn textureCreateView(texture: *gpu.Texture, descriptor: ?*const gpu.TextureView.Descriptor) *gpu.TextureView { + return @ptrCast(procs.textureCreateView.?( + @ptrCast(texture), + @ptrCast(descriptor), + )); + } + + pub inline fn textureDestroy(texture: *gpu.Texture) void { + procs.textureDestroy.?(@ptrCast(texture)); + } + + pub inline fn textureGetDepthOrArrayLayers(texture: *gpu.Texture) u32 { + return procs.textureGetDepthOrArrayLayers.?(@ptrCast(texture)); + } + + pub inline fn textureGetDimension(texture: *gpu.Texture) gpu.Texture.Dimension { + return @enumFromInt(procs.textureGetDimension.?(@ptrCast(texture))); + } + + pub inline fn textureGetFormat(texture: *gpu.Texture) gpu.Texture.Format { + return @enumFromInt(procs.textureGetFormat.?(@ptrCast(texture))); + } + + pub inline fn textureGetHeight(texture: *gpu.Texture) u32 { + return procs.textureGetHeight.?(@ptrCast(texture)); + } + + pub inline fn textureGetMipLevelCount(texture: *gpu.Texture) u32 { + return procs.textureGetMipLevelCount.?(@ptrCast(texture)); + } + + pub inline fn textureGetSampleCount(texture: *gpu.Texture) u32 { + return procs.textureGetSampleCount.?(@ptrCast(texture)); + } + + pub inline fn textureGetUsage(texture: *gpu.Texture) gpu.Texture.UsageFlags { + return @bitCast(procs.textureGetUsage.?(@ptrCast(texture))); + } + + pub inline fn textureGetWidth(texture: *gpu.Texture) u32 { + return procs.textureGetWidth.?(@ptrCast(texture)); + } + + pub inline fn textureSetLabel(texture: *gpu.Texture, label: [*:0]const u8) void { + procs.textureSetLabel.?(@ptrCast(texture), label); + } + + pub inline fn textureReference(texture: *gpu.Texture) void { + procs.textureReference.?(@ptrCast(texture)); + } + + pub inline fn textureRelease(texture: *gpu.Texture) void { + procs.textureRelease.?(@ptrCast(texture)); + } + + pub inline fn textureViewSetLabel(texture_view: *gpu.TextureView, label: [*:0]const u8) void { + procs.textureViewSetLabel.?(@ptrCast(texture_view), label); + } + + pub inline fn textureViewReference(texture_view: *gpu.TextureView) void { + procs.textureViewReference.?(@ptrCast(texture_view)); + } + + pub inline fn textureViewRelease(texture_view: *gpu.TextureView) void { + procs.textureViewRelease.?(@ptrCast(texture_view)); + } +}; + +test "dawn_impl" { + _ = gpu.Export(Interface); +} diff --git a/src/gpu/device.zig b/src/gpu/device.zig new file mode 100644 index 00000000..efeb9ae5 --- /dev/null +++ b/src/gpu/device.zig @@ -0,0 +1,368 @@ +const std = @import("std"); +const Adapter = @import("adapter.zig").Adapter; +const Queue = @import("queue.zig").Queue; +const BindGroup = @import("bind_group.zig").BindGroup; +const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; +const Buffer = @import("buffer.zig").Buffer; +const CommandEncoder = @import("command_encoder.zig").CommandEncoder; +const ComputePipeline = @import("compute_pipeline.zig").ComputePipeline; +const ExternalTexture = @import("external_texture.zig").ExternalTexture; +const PipelineLayout = @import("pipeline_layout.zig").PipelineLayout; +const QuerySet = @import("query_set.zig").QuerySet; +const RenderBundleEncoder = @import("render_bundle_encoder.zig").RenderBundleEncoder; +const RenderPipeline = @import("render_pipeline.zig").RenderPipeline; +const Sampler = @import("sampler.zig").Sampler; +const ShaderModule = @import("shader_module.zig").ShaderModule; +const Surface = @import("surface.zig").Surface; +const SwapChain = @import("swap_chain.zig").SwapChain; +const Texture = @import("texture.zig").Texture; +const ChainedStruct = @import("main.zig").ChainedStruct; +const FeatureName = @import("main.zig").FeatureName; +const RequiredLimits = @import("main.zig").RequiredLimits; +const SupportedLimits = @import("main.zig").SupportedLimits; +const ErrorType = @import("main.zig").ErrorType; +const ErrorFilter = @import("main.zig").ErrorFilter; +const LoggingType = @import("main.zig").LoggingType; +const CreatePipelineAsyncStatus = @import("main.zig").CreatePipelineAsyncStatus; +const LoggingCallback = @import("main.zig").LoggingCallback; +const ErrorCallback = @import("main.zig").ErrorCallback; +const CreateComputePipelineAsyncCallback = @import("main.zig").CreateComputePipelineAsyncCallback; +const CreateRenderPipelineAsyncCallback = @import("main.zig").CreateRenderPipelineAsyncCallback; +const Impl = @import("interface.zig").Impl; +const dawn = @import("dawn.zig"); + +pub const Device = opaque { + pub const LostCallback = *const fn ( + reason: LostReason, + message: [*:0]const u8, + userdata: ?*anyopaque, + ) callconv(.C) void; + + pub const LostReason = enum(u32) { + undefined = 0x00000000, + destroyed = 0x00000001, + }; + + pub const Descriptor = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + dawn_toggles_descriptor: *const dawn.TogglesDescriptor, + dawn_cache_device_descriptor: *const dawn.CacheDeviceDescriptor, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*:0]const u8 = null, + required_features_count: usize = 0, + required_features: ?[*]const FeatureName = null, + required_limits: ?*const RequiredLimits = null, + default_queue: Queue.Descriptor = Queue.Descriptor{}, + device_lost_callback: LostCallback, + device_lost_userdata: ?*anyopaque, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*:0]const u8 = null, + required_features: ?[]const FeatureName = null, + required_limits: ?*const RequiredLimits = null, + default_queue: Queue.Descriptor = Queue.Descriptor{}, + }) Descriptor { + return .{ + .next_in_chain = v.next_in_chain, + .label = v.label, + .required_features_count = if (v.required_features) |e| e.len else 0, + .required_features = if (v.required_features) |e| e.ptr else null, + .default_queue = v.default_queue, + }; + } + }; + + pub inline fn createBindGroup(device: *Device, descriptor: *const BindGroup.Descriptor) *BindGroup { + return Impl.deviceCreateBindGroup(device, descriptor); + } + + pub inline fn createBindGroupLayout(device: *Device, descriptor: *const BindGroupLayout.Descriptor) *BindGroupLayout { + return Impl.deviceCreateBindGroupLayout(device, descriptor); + } + + pub inline fn createBuffer(device: *Device, descriptor: *const Buffer.Descriptor) *Buffer { + return Impl.deviceCreateBuffer(device, descriptor); + } + + pub inline fn createCommandEncoder(device: *Device, descriptor: ?*const CommandEncoder.Descriptor) *CommandEncoder { + return Impl.deviceCreateCommandEncoder(device, descriptor); + } + + pub inline fn createComputePipeline(device: *Device, descriptor: *const ComputePipeline.Descriptor) *ComputePipeline { + return Impl.deviceCreateComputePipeline(device, descriptor); + } + + pub inline fn createComputePipelineAsync( + device: *Device, + descriptor: *const ComputePipeline.Descriptor, + context: anytype, + comptime callback: fn ( + status: CreatePipelineAsyncStatus, + compute_pipeline: ?*ComputePipeline, + message: ?[*:0]const u8, + ctx: @TypeOf(context), + ) callconv(.Inline) void, + ) void { + const Context = @TypeOf(context); + const Helper = struct { + pub fn cCallback( + status: CreatePipelineAsyncStatus, + compute_pipeline: ?*ComputePipeline, + message: ?[*:0]const u8, + userdata: ?*anyopaque, + ) callconv(.C) void { + callback( + status, + compute_pipeline, + message, + if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), + ); + } + }; + Impl.deviceCreateComputePipelineAsync(device, descriptor, Helper.cCallback, if (Context == void) null else context); + } + + pub inline fn createErrorBuffer(device: *Device, descriptor: *const Buffer.Descriptor) *Buffer { + return Impl.deviceCreateErrorBuffer(device, descriptor); + } + + pub inline fn createErrorExternalTexture(device: *Device) *ExternalTexture { + return Impl.deviceCreateErrorExternalTexture(device); + } + + pub inline fn createErrorTexture(device: *Device, descriptor: *const Texture.Descriptor) *Texture { + return Impl.deviceCreateErrorTexture(device, descriptor); + } + + pub inline fn createExternalTexture(device: *Device, external_texture_descriptor: *const ExternalTexture.Descriptor) *ExternalTexture { + return Impl.deviceCreateExternalTexture(device, external_texture_descriptor); + } + + pub inline fn createPipelineLayout(device: *Device, pipeline_layout_descriptor: *const PipelineLayout.Descriptor) *PipelineLayout { + return Impl.deviceCreatePipelineLayout(device, pipeline_layout_descriptor); + } + + pub inline fn createQuerySet(device: *Device, descriptor: *const QuerySet.Descriptor) *QuerySet { + return Impl.deviceCreateQuerySet(device, descriptor); + } + + pub inline fn createRenderBundleEncoder(device: *Device, descriptor: *const RenderBundleEncoder.Descriptor) *RenderBundleEncoder { + return Impl.deviceCreateRenderBundleEncoder(device, descriptor); + } + + pub inline fn createRenderPipeline(device: *Device, descriptor: *const RenderPipeline.Descriptor) *RenderPipeline { + return Impl.deviceCreateRenderPipeline(device, descriptor); + } + + pub inline fn createRenderPipelineAsync( + device: *Device, + descriptor: *const RenderPipeline.Descriptor, + context: anytype, + comptime callback: fn ( + ctx: @TypeOf(context), + status: CreatePipelineAsyncStatus, + pipeline: ?*RenderPipeline, + message: ?[*:0]const u8, + ) callconv(.Inline) void, + ) void { + const Context = @TypeOf(context); + const Helper = struct { + pub fn cCallback( + status: CreatePipelineAsyncStatus, + pipeline: ?*RenderPipeline, + message: ?[*:0]const u8, + userdata: ?*anyopaque, + ) callconv(.C) void { + callback( + if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), + status, + pipeline, + message, + ); + } + }; + Impl.deviceCreateRenderPipelineAsync(device, descriptor, Helper.cCallback, if (Context == void) null else context); + } + + pub inline fn createSampler(device: *Device, descriptor: ?*const Sampler.Descriptor) *Sampler { + return Impl.deviceCreateSampler(device, descriptor); + } + + pub inline fn createShaderModule(device: *Device, descriptor: *const ShaderModule.Descriptor) *ShaderModule { + return Impl.deviceCreateShaderModule(device, descriptor); + } + + /// Helper to make createShaderModule invocations slightly nicer. + pub inline fn createShaderModuleWGSL( + device: *Device, + label: ?[*:0]const u8, + wgsl_code: [*:0]const u8, + ) *ShaderModule { + return device.createShaderModule(&ShaderModule.Descriptor{ + .next_in_chain = .{ .wgsl_descriptor = &.{ + .code = wgsl_code, + } }, + .label = label, + }); + } + + pub inline fn createSwapChain(device: *Device, surface: ?*Surface, descriptor: *const SwapChain.Descriptor) *SwapChain { + return Impl.deviceCreateSwapChain(device, surface, descriptor); + } + + pub inline fn createTexture(device: *Device, descriptor: *const Texture.Descriptor) *Texture { + return Impl.deviceCreateTexture(device, descriptor); + } + + pub inline fn destroy(device: *Device) void { + Impl.deviceDestroy(device); + } + + /// Call once with null to determine the array length, and again to fetch the feature list. + /// + /// Consider using the enumerateFeaturesOwned helper. + pub inline fn enumerateFeatures(device: *Device, features: ?[*]FeatureName) usize { + return Impl.deviceEnumerateFeatures(device, features); + } + + /// Enumerates the adapter features, storing the result in an allocated slice which is owned by + /// the caller. + pub inline fn enumerateFeaturesOwned(device: *Device, allocator: std.mem.Allocator) ![]FeatureName { + const count = device.enumerateFeatures(null); + const data = try allocator.alloc(FeatureName, count); + _ = device.enumerateFeatures(data.ptr); + return data; + } + + pub inline fn forceLoss(device: *Device, reason: LostReason, message: [*:0]const u8) void { + return Impl.deviceForceLoss(device, reason, message); + } + + pub inline fn getAdapter(device: *Device) *Adapter { + return Impl.deviceGetAdapter(device); + } + + pub inline fn getLimits(device: *Device, limits: *SupportedLimits) bool { + return Impl.deviceGetLimits(device, limits) != 0; + } + + pub inline fn getQueue(device: *Device) *Queue { + return Impl.deviceGetQueue(device); + } + + pub inline fn hasFeature(device: *Device, feature: FeatureName) bool { + return Impl.deviceHasFeature(device, feature) != 0; + } + + pub inline fn injectError(device: *Device, typ: ErrorType, message: [*:0]const u8) void { + Impl.deviceInjectError(device, typ, message); + } + + pub inline fn popErrorScope( + device: *Device, + context: anytype, + comptime callback: fn (ctx: @TypeOf(context), typ: ErrorType, message: [*:0]const u8) callconv(.Inline) void, + ) void { + const Context = @TypeOf(context); + const Helper = struct { + pub fn cCallback(typ: ErrorType, message: [*:0]const u8, userdata: ?*anyopaque) callconv(.C) void { + callback(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), typ, message); + } + }; + Impl.devicePopErrorScope(device, Helper.cCallback, if (Context == void) null else context); + } + + pub inline fn pushErrorScope(device: *Device, filter: ErrorFilter) void { + Impl.devicePushErrorScope(device, filter); + } + + pub inline fn setDeviceLostCallback( + device: *Device, + context: anytype, + comptime callback: ?fn (ctx: @TypeOf(context), reason: LostReason, message: [*:0]const u8) callconv(.Inline) void, + ) void { + if (callback) |cb| { + const Context = @TypeOf(context); + const Helper = struct { + pub fn cCallback(reason: LostReason, message: [*:0]const u8, userdata: ?*anyopaque) callconv(.C) void { + cb(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), reason, message); + } + }; + Impl.deviceSetDeviceLostCallback(device, Helper.cCallback, if (Context == void) null else context); + } else { + Impl.deviceSetDeviceLostCallback(device, null, null); + } + } + + pub inline fn setLabel(device: *Device, label: [*:0]const u8) void { + Impl.deviceSetLabel(device, label); + } + + pub inline fn setLoggingCallback( + device: *Device, + context: anytype, + comptime callback: ?fn (ctx: @TypeOf(context), typ: LoggingType, message: [*:0]const u8) callconv(.Inline) void, + ) void { + if (callback) |cb| { + const Context = @TypeOf(context); + const Helper = struct { + pub fn cCallback(typ: LoggingType, message: [*:0]const u8, userdata: ?*anyopaque) callconv(.C) void { + cb(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), typ, message); + } + }; + Impl.deviceSetLoggingCallback(device, Helper.cCallback, if (Context == void) null else context); + } else { + Impl.deviceSetLoggingCallback(device, null, null); + } + } + + pub inline fn setUncapturedErrorCallback( + device: *Device, + context: anytype, + comptime callback: ?fn (ctx: @TypeOf(context), typ: ErrorType, message: [*:0]const u8) callconv(.Inline) void, + ) void { + if (callback) |cb| { + const Context = @TypeOf(context); + const Helper = struct { + pub fn cCallback(typ: ErrorType, message: [*:0]const u8, userdata: ?*anyopaque) callconv(.C) void { + cb(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), typ, message); + } + }; + Impl.deviceSetUncapturedErrorCallback(device, Helper.cCallback, if (Context == void) null else context); + } else { + Impl.deviceSetUncapturedErrorCallback(device, null, null); + } + } + + pub inline fn tick(device: *Device) void { + Impl.deviceTick(device); + } + + // Mach WebGPU extension. Supported with mach-gpu-dawn. + // + // When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't + // mean that the operations will be visible to other APIs/Metal devices right away. macOS + // does have a global queue of graphics operations, but the command buffers are inserted there + // when they are "scheduled". Submitting other operations before the command buffer is + // scheduled could lead to races in who gets scheduled first and incorrect rendering. + pub inline fn machWaitForCommandsToBeScheduled(device: *Device) void { + Impl.machDeviceWaitForCommandsToBeScheduled(device); + } + + pub inline fn validateTextureDescriptor(device: *Device, descriptor: *const Texture.Descriptor) void { + Impl.deviceVlidateTextureDescriptor(device, descriptor); + } + + pub inline fn reference(device: *Device) void { + Impl.deviceReference(device); + } + + pub inline fn release(device: *Device) void { + Impl.deviceRelease(device); + } +}; diff --git a/src/gpu/example/main.zig b/src/gpu/example/main.zig new file mode 100644 index 00000000..02dae02e --- /dev/null +++ b/src/gpu/example/main.zig @@ -0,0 +1,245 @@ +const std = @import("std"); +const util = @import("util.zig"); +const glfw = @import("mach-glfw"); +const gpu = @import("mach").gpu; + +pub const GPUInterface = gpu.dawn.Interface; + +pub fn main() !void { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var allocator = gpa.allocator(); + + try gpu.Impl.init(allocator, .{}); + const setup = try setupWindow(allocator); + const framebuffer_size = setup.window.getFramebufferSize(); + + const window_data = try allocator.create(WindowData); + window_data.* = .{ + .surface = setup.surface, + .swap_chain = null, + .swap_chain_format = undefined, + .current_desc = undefined, + .target_desc = undefined, + }; + setup.window.setUserPointer(window_data); + + window_data.swap_chain_format = .bgra8_unorm; + const descriptor = gpu.SwapChain.Descriptor{ + .label = "basic swap chain", + .usage = .{ .render_attachment = true }, + .format = window_data.swap_chain_format, + .width = framebuffer_size.width, + .height = framebuffer_size.height, + .present_mode = .fifo, + }; + + window_data.current_desc = descriptor; + window_data.target_desc = descriptor; + + const vs = + \\ @vertex fn main( + \\ @builtin(vertex_index) VertexIndex : u32 + \\ ) -> @builtin(position) vec4 { + \\ var pos = array, 3>( + \\ vec2( 0.0, 0.5), + \\ vec2(-0.5, -0.5), + \\ vec2( 0.5, -0.5) + \\ ); + \\ return vec4(pos[VertexIndex], 0.0, 1.0); + \\ } + ; + const vs_module = setup.device.createShaderModuleWGSL("my vertex shader", vs); + + const fs = + \\ @fragment fn main() -> @location(0) vec4 { + \\ return vec4(1.0, 0.0, 0.0, 1.0); + \\ } + ; + const fs_module = setup.device.createShaderModuleWGSL("my fragment shader", fs); + + // Fragment state + const blend = gpu.BlendState{ + .color = .{ + .dst_factor = .one, + }, + .alpha = .{ + .dst_factor = .one, + }, + }; + const color_target = gpu.ColorTargetState{ + .format = window_data.swap_chain_format, + .blend = &blend, + .write_mask = gpu.ColorWriteMaskFlags.all, + }; + const fragment = gpu.FragmentState.init(.{ + .module = fs_module, + .entry_point = "main", + .targets = &.{color_target}, + }); + const pipeline_descriptor = gpu.RenderPipeline.Descriptor{ + .fragment = &fragment, + .layout = null, + .depth_stencil = null, + .vertex = gpu.VertexState{ + .module = vs_module, + .entry_point = "main", + }, + .multisample = .{}, + .primitive = .{}, + }; + const pipeline = setup.device.createRenderPipeline(&pipeline_descriptor); + + vs_module.release(); + fs_module.release(); + + // Reconfigure the swap chain with the new framebuffer width/height, otherwise e.g. the Vulkan + // device would be lost after a resize. + setup.window.setFramebufferSizeCallback((struct { + fn callback(window: glfw.Window, width: u32, height: u32) void { + const pl = window.getUserPointer(WindowData); + pl.?.target_desc.width = width; + pl.?.target_desc.height = height; + } + }).callback); + + const queue = setup.device.getQueue(); + while (!setup.window.shouldClose()) { + try frame(.{ + .window = setup.window, + .device = setup.device, + .pipeline = pipeline, + .queue = queue, + }); + std.time.sleep(16 * std.time.ns_per_ms); + } +} + +const WindowData = struct { + surface: ?*gpu.Surface, + swap_chain: ?*gpu.SwapChain, + swap_chain_format: gpu.Texture.Format, + current_desc: gpu.SwapChain.Descriptor, + target_desc: gpu.SwapChain.Descriptor, +}; + +const FrameParams = struct { + window: glfw.Window, + device: *gpu.Device, + pipeline: *gpu.RenderPipeline, + queue: *gpu.Queue, +}; + +fn frame(params: FrameParams) !void { + glfw.pollEvents(); + params.device.tick(); + const pl = params.window.getUserPointer(WindowData).?; + if (pl.swap_chain == null or !std.meta.eql(pl.current_desc, pl.target_desc)) { + pl.swap_chain = params.device.createSwapChain(pl.surface, &pl.target_desc); + pl.current_desc = pl.target_desc; + } + + const back_buffer_view = pl.swap_chain.?.getCurrentTextureView().?; + const color_attachment = gpu.RenderPassColorAttachment{ + .view = back_buffer_view, + .resolve_target = null, + .clear_value = std.mem.zeroes(gpu.Color), + .load_op = .clear, + .store_op = .store, + }; + + const encoder = params.device.createCommandEncoder(null); + const render_pass_info = gpu.RenderPassDescriptor.init(.{ + .color_attachments = &.{color_attachment}, + }); + const pass = encoder.beginRenderPass(&render_pass_info); + pass.setPipeline(params.pipeline); + pass.draw(3, 1, 0, 0); + pass.end(); + pass.release(); + + var command = encoder.finish(null); + encoder.release(); + + params.queue.submit(&[_]*gpu.CommandBuffer{command}); + command.release(); + pl.swap_chain.?.present(); + back_buffer_view.release(); +} + +const Setup = struct { + instance: *gpu.Instance, + adapter: *gpu.Adapter, + device: *gpu.Device, + window: glfw.Window, + surface: *gpu.Surface, +}; + +/// Default GLFW error handling callback +fn errorCallback(error_code: glfw.ErrorCode, description: [:0]const u8) void { + std.log.err("glfw: {}: {s}\n", .{ error_code, description }); +} + +pub fn setupWindow(allocator: std.mem.Allocator) !Setup { + const backend_type = try util.detectBackendType(allocator); + + glfw.setErrorCallback(errorCallback); + if (!glfw.init(.{})) { + std.log.err("failed to initialize GLFW: {?s}", .{glfw.getErrorString()}); + std.process.exit(1); + } + + // Create the test window and discover adapters using it (esp. for OpenGL) + var hints = util.glfwWindowHintsForBackend(backend_type); + hints.cocoa_retina_framebuffer = true; + const window = glfw.Window.create(640, 480, "mach/gpu window", null, null, hints) orelse { + std.log.err("failed to create GLFW window: {?s}", .{glfw.getErrorString()}); + std.process.exit(1); + }; + + if (backend_type == .opengl) glfw.makeContextCurrent(window); + if (backend_type == .opengles) glfw.makeContextCurrent(window); + + const instance = gpu.createInstance(null); + if (instance == null) { + std.debug.print("failed to create GPU instance\n", .{}); + std.process.exit(1); + } + const surface = try util.createSurfaceForWindow(instance.?, window, comptime util.detectGLFWOptions()); + + var response: util.RequestAdapterResponse = undefined; + instance.?.requestAdapter(&gpu.RequestAdapterOptions{ + .compatible_surface = surface, + .power_preference = .undefined, + .force_fallback_adapter = .false, + }, &response, util.requestAdapterCallback); + if (response.status != .success) { + std.debug.print("failed to create GPU adapter: {s}\n", .{response.message.?}); + std.process.exit(1); + } + + // Print which adapter we are using. + var props = std.mem.zeroes(gpu.Adapter.Properties); + response.adapter.?.getProperties(&props); + std.debug.print("found {s} backend on {s} adapter: {s}, {s}\n", .{ + props.backend_type.name(), + props.adapter_type.name(), + props.name, + props.driver_description, + }); + + // Create a device with default limits/features. + const device = response.adapter.?.createDevice(null); + if (device == null) { + std.debug.print("failed to create GPU device\n", .{}); + std.process.exit(1); + } + + device.?.setUncapturedErrorCallback({}, util.printUnhandledErrorCallback); + return Setup{ + .instance = instance.?, + .adapter = response.adapter.?, + .device = device.?, + .window = window, + .surface = surface, + }; +} diff --git a/src/gpu/example/objc_message.zig b/src/gpu/example/objc_message.zig new file mode 100644 index 00000000..e039b4a9 --- /dev/null +++ b/src/gpu/example/objc_message.zig @@ -0,0 +1,7 @@ +// Extracted from `zig translate-c tmp.c` with `#include ` in the file. +pub const SEL = opaque {}; +pub const Class = opaque {}; + +pub extern fn sel_getUid(str: [*c]const u8) ?*SEL; +pub extern fn objc_getClass(name: [*c]const u8) ?*Class; +pub extern fn objc_msgSend() void; diff --git a/src/gpu/example/util.zig b/src/gpu/example/util.zig new file mode 100644 index 00000000..c46aea29 --- /dev/null +++ b/src/gpu/example/util.zig @@ -0,0 +1,201 @@ +const std = @import("std"); + +const glfw = @import("mach-glfw"); +const gpu = @import("mach").gpu; +const objc = @import("objc_message.zig"); + +pub inline fn printUnhandledErrorCallback(_: void, typ: gpu.ErrorType, message: [*:0]const u8) void { + switch (typ) { + .validation => std.log.err("gpu: validation error: {s}\n", .{message}), + .out_of_memory => std.log.err("gpu: out of memory: {s}\n", .{message}), + .device_lost => std.log.err("gpu: device lost: {s}\n", .{message}), + .unknown => std.log.err("gpu: unknown error: {s}\n", .{message}), + else => unreachable, + } + std.os.exit(1); +} + +fn getEnvVarOwned(allocator: std.mem.Allocator, key: []const u8) error{ OutOfMemory, InvalidUtf8, InvalidWtf8 }!?[]u8 { + return std.process.getEnvVarOwned(allocator, key) catch |err| switch (err) { + error.EnvironmentVariableNotFound => @as(?[]u8, null), + else => |e| e, + }; +} + +pub fn detectBackendType(allocator: std.mem.Allocator) !gpu.BackendType { + const MACH_GPU_BACKEND = try getEnvVarOwned(allocator, "MACH_GPU_BACKEND"); + if (MACH_GPU_BACKEND) |backend| { + defer allocator.free(backend); + if (std.ascii.eqlIgnoreCase(backend, "null")) return .null; + if (std.ascii.eqlIgnoreCase(backend, "d3d11")) return .d3d11; + if (std.ascii.eqlIgnoreCase(backend, "d3d12")) return .d3d12; + if (std.ascii.eqlIgnoreCase(backend, "metal")) return .metal; + if (std.ascii.eqlIgnoreCase(backend, "vulkan")) return .vulkan; + if (std.ascii.eqlIgnoreCase(backend, "opengl")) return .opengl; + if (std.ascii.eqlIgnoreCase(backend, "opengles")) return .opengles; + @panic("unknown MACH_GPU_BACKEND type"); + } + + const target = @import("builtin").target; + if (target.isDarwin()) return .metal; + if (target.os.tag == .windows) return .d3d12; + return .vulkan; +} + +pub const RequestAdapterResponse = struct { + status: gpu.RequestAdapterStatus, + adapter: ?*gpu.Adapter, + message: ?[*:0]const u8, +}; + +pub inline fn requestAdapterCallback( + context: *RequestAdapterResponse, + status: gpu.RequestAdapterStatus, + adapter: ?*gpu.Adapter, + message: ?[*:0]const u8, +) void { + context.* = RequestAdapterResponse{ + .status = status, + .adapter = adapter, + .message = message, + }; +} + +pub fn glfwWindowHintsForBackend(backend: gpu.BackendType) glfw.Window.Hints { + return switch (backend) { + .opengl => .{ + // Ask for OpenGL 4.4 which is what the GL backend requires for compute shaders and + // texture views. + .context_version_major = 4, + .context_version_minor = 4, + .opengl_forward_compat = true, + .opengl_profile = .opengl_core_profile, + }, + .opengles => .{ + .context_version_major = 3, + .context_version_minor = 1, + .client_api = .opengl_es_api, + .context_creation_api = .egl_context_api, + }, + else => .{ + // Without this GLFW will initialize a GL context on the window, which prevents using + // the window with other APIs (by crashing in weird ways). + .client_api = .no_api, + }, + }; +} + +pub fn detectGLFWOptions() glfw.BackendOptions { + const target = @import("builtin").target; + if (target.isDarwin()) return .{ .cocoa = true }; + return switch (target.os.tag) { + .windows => .{ .win32 = true }, + .linux => .{ .x11 = true, .wayland = true }, + else => .{}, + }; +} + +pub fn createSurfaceForWindow( + instance: *gpu.Instance, + window: glfw.Window, + comptime glfw_options: glfw.BackendOptions, +) !*gpu.Surface { + const glfw_native = glfw.Native(glfw_options); + if (glfw_options.win32) { + return instance.createSurface(&gpu.Surface.Descriptor{ + .next_in_chain = .{ + .from_windows_hwnd = &.{ + .hinstance = std.os.windows.kernel32.GetModuleHandleW(null).?, + .hwnd = glfw_native.getWin32Window(window), + }, + }, + }); + } else if (glfw_options.x11) { + return instance.createSurface(&gpu.Surface.Descriptor{ + .next_in_chain = .{ + .from_xlib_window = &.{ + .display = glfw_native.getX11Display(), + .window = glfw_native.getX11Window(window), + }, + }, + }); + } else if (glfw_options.wayland) { + return instance.createSurface(&gpu.Surface.Descriptor{ + .next_in_chain = .{ + .from_wayland_surface = &.{ + .display = glfw_native.getWaylandDisplay(), + .surface = glfw_native.getWaylandWindow(window), + }, + }, + }); + } else if (glfw_options.cocoa) { + const pool = try AutoReleasePool.init(); + defer AutoReleasePool.release(pool); + + const ns_window = glfw_native.getCocoaWindow(window); + const ns_view = msgSend(ns_window, "contentView", .{}, *anyopaque); // [nsWindow contentView] + + // Create a CAMetalLayer that covers the whole window that will be passed to CreateSurface. + msgSend(ns_view, "setWantsLayer:", .{true}, void); // [view setWantsLayer:YES] + const layer = msgSend(objc.objc_getClass("CAMetalLayer"), "layer", .{}, ?*anyopaque); // [CAMetalLayer layer] + if (layer == null) @panic("failed to create Metal layer"); + msgSend(ns_view, "setLayer:", .{layer.?}, void); // [view setLayer:layer] + + // Use retina if the window was created with retina support. + const scale_factor = msgSend(ns_window, "backingScaleFactor", .{}, f64); // [ns_window backingScaleFactor] + msgSend(layer.?, "setContentsScale:", .{scale_factor}, void); // [layer setContentsScale:scale_factor] + + return instance.createSurface(&gpu.Surface.Descriptor{ + .next_in_chain = .{ + .from_metal_layer = &.{ .layer = layer.? }, + }, + }); + } else unreachable; +} + +pub const AutoReleasePool = if (!@import("builtin").target.isDarwin()) opaque { + pub fn init() error{OutOfMemory}!?*AutoReleasePool { + return null; + } + + pub fn release(pool: ?*AutoReleasePool) void { + _ = pool; + return; + } +} else opaque { + pub fn init() error{OutOfMemory}!?*AutoReleasePool { + // pool = [NSAutoreleasePool alloc]; + var pool = msgSend(objc.objc_getClass("NSAutoreleasePool"), "alloc", .{}, ?*AutoReleasePool); + if (pool == null) return error.OutOfMemory; + + // pool = [pool init]; + pool = msgSend(pool, "init", .{}, ?*AutoReleasePool); + if (pool == null) unreachable; + + return pool; + } + + pub fn release(pool: ?*AutoReleasePool) void { + // [pool release]; + msgSend(pool, "release", .{}, void); + } +}; + +// Borrowed from https://github.com/hazeycode/zig-objcrt +pub fn msgSend(obj: anytype, sel_name: [:0]const u8, args: anytype, comptime ReturnType: type) ReturnType { + const args_meta = @typeInfo(@TypeOf(args)).Struct.fields; + + const FnType = switch (args_meta.len) { + 0 => *const fn (@TypeOf(obj), ?*objc.SEL) callconv(.C) ReturnType, + 1 => *const fn (@TypeOf(obj), ?*objc.SEL, args_meta[0].type) callconv(.C) ReturnType, + 2 => *const fn (@TypeOf(obj), ?*objc.SEL, args_meta[0].type, args_meta[1].type) callconv(.C) ReturnType, + 3 => *const fn (@TypeOf(obj), ?*objc.SEL, args_meta[0].type, args_meta[1].type, args_meta[2].type) callconv(.C) ReturnType, + 4 => *const fn (@TypeOf(obj), ?*objc.SEL, args_meta[0].type, args_meta[1].type, args_meta[2].type, args_meta[3].type) callconv(.C) ReturnType, + else => @compileError("Unsupported number of args"), + }; + + const func = @as(FnType, @ptrCast(&objc.objc_msgSend)); + const sel = objc.sel_getUid(@as([*c]const u8, @ptrCast(sel_name))); + + return @call(.auto, func, .{ obj, sel } ++ args); +} diff --git a/src/gpu/external_texture.zig b/src/gpu/external_texture.zig new file mode 100644 index 00000000..16460d63 --- /dev/null +++ b/src/gpu/external_texture.zig @@ -0,0 +1,56 @@ +const Bool32 = @import("main.zig").Bool32; +const ChainedStruct = @import("main.zig").ChainedStruct; +const TextureView = @import("texture_view.zig").TextureView; +const Origin2D = @import("main.zig").Origin2D; +const Extent2D = @import("main.zig").Extent2D; +const Impl = @import("interface.zig").Impl; + +pub const ExternalTexture = opaque { + pub const BindingEntry = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .external_texture_binding_entry }, + external_texture: *ExternalTexture, + }; + + pub const BindingLayout = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .external_texture_binding_layout }, + }; + + const Rotation = enum(u32) { + rotate_0_degrees = 0x00000000, + rotate_90_degrees = 0x00000001, + rotate_180_degrees = 0x00000002, + rotate_270_degrees = 0x00000003, + }; + + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + plane0: *TextureView, + plane1: ?*TextureView = null, + visible_origin: Origin2D, + visible_size: Extent2D, + do_yuv_to_rgb_conversion_only: Bool32 = .false, + yuv_to_rgb_conversion_matrix: ?*const [12]f32 = null, + src_transform_function_parameters: *const [7]f32, + dst_transform_function_parameters: *const [7]f32, + gamut_conversion_matrix: *const [9]f32, + flip_y: Bool32, + rotation: Rotation, + }; + + pub inline fn destroy(external_texture: *ExternalTexture) void { + Impl.externalTextureDestroy(external_texture); + } + + pub inline fn setLabel(external_texture: *ExternalTexture, label: [*:0]const u8) void { + Impl.externalTextureSetLabel(external_texture, label); + } + + pub inline fn reference(external_texture: *ExternalTexture) void { + Impl.externalTextureReference(external_texture); + } + + pub inline fn release(external_texture: *ExternalTexture) void { + Impl.externalTextureRelease(external_texture); + } +}; diff --git a/src/gpu/instance.zig b/src/gpu/instance.zig new file mode 100644 index 00000000..0cd80d89 --- /dev/null +++ b/src/gpu/instance.zig @@ -0,0 +1,65 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const RequestAdapterStatus = @import("main.zig").RequestAdapterStatus; +const Surface = @import("surface.zig").Surface; +const Adapter = @import("adapter.zig").Adapter; +const RequestAdapterOptions = @import("main.zig").RequestAdapterOptions; +const RequestAdapterCallback = @import("main.zig").RequestAdapterCallback; +const Impl = @import("interface.zig").Impl; +const dawn = @import("dawn.zig"); + +pub const Instance = opaque { + pub const Descriptor = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + dawn_toggles_descriptor: *const dawn.TogglesDescriptor, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + }; + + pub inline fn createSurface(instance: *Instance, descriptor: *const Surface.Descriptor) *Surface { + return Impl.instanceCreateSurface(instance, descriptor); + } + + pub inline fn processEvents(instance: *Instance) void { + Impl.instanceProcessEvents(instance); + } + + pub inline fn requestAdapter( + instance: *Instance, + options: ?*const RequestAdapterOptions, + context: anytype, + comptime callback: fn ( + ctx: @TypeOf(context), + status: RequestAdapterStatus, + adapter: ?*Adapter, + message: ?[*:0]const u8, + ) callconv(.Inline) void, + ) void { + const Context = @TypeOf(context); + const Helper = struct { + pub fn cCallback( + status: RequestAdapterStatus, + adapter: ?*Adapter, + message: ?[*:0]const u8, + userdata: ?*anyopaque, + ) callconv(.C) void { + callback( + if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), + status, + adapter, + message, + ); + } + }; + Impl.instanceRequestAdapter(instance, options, Helper.cCallback, if (Context == void) null else context); + } + + pub inline fn reference(instance: *Instance) void { + Impl.instanceReference(instance); + } + + pub inline fn release(instance: *Instance) void { + Impl.instanceRelease(instance); + } +}; diff --git a/src/gpu/interface.zig b/src/gpu/interface.zig new file mode 100644 index 00000000..81073dbd --- /dev/null +++ b/src/gpu/interface.zig @@ -0,0 +1,2702 @@ +const gpu = @import("main.zig"); + +/// The gpu.Interface implementation that is used by the entire program. Only one may exist, since +/// it is resolved fully at comptime with no vtable indirection, etc. +/// +/// Depending on the implementation, it may need to be `.init()`ialized before use. +pub const Impl = blk: { + if (@import("builtin").is_test) { + break :blk StubInterface; + } else { + const root = @import("root"); + + // Default to Dawn implementation of gpu.Interface if none was specified. + if (!@hasDecl(root, "GPUInterface")) break :blk gpu.Interface(@import("dawn_impl.zig").Interface); + + _ = gpu.Interface(root.GPUInterface); // verify the type + break :blk root.GPUInterface; + } +}; + +/// Verifies that a gpu.Interface implementation exposes the expected function declarations. +pub fn Interface(comptime T: type) type { + // gpu.Device + assertDecl(T, "deviceCreateRenderPipeline", fn (device: *gpu.Device, descriptor: *const gpu.RenderPipeline.Descriptor) callconv(.Inline) *gpu.RenderPipeline); + assertDecl(T, "deviceCreateRenderPipelineAsync", fn (device: *gpu.Device, descriptor: *const gpu.RenderPipeline.Descriptor, callback: gpu.CreateRenderPipelineAsyncCallback, userdata: ?*anyopaque) callconv(.Inline) void); + assertDecl(T, "deviceCreatePipelineLayout", fn (device: *gpu.Device, pipeline_layout_descriptor: *const gpu.PipelineLayout.Descriptor) callconv(.Inline) *gpu.PipelineLayout); + + // gpu.PipelineLayout + assertDecl(T, "pipelineLayoutSetLabel", fn (pipeline_layout: *gpu.PipelineLayout, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "pipelineLayoutReference", fn (pipeline_layout: *gpu.PipelineLayout) callconv(.Inline) void); + assertDecl(T, "pipelineLayoutRelease", fn (pipeline_layout: *gpu.PipelineLayout) callconv(.Inline) void); + + // gpu.RenderBundleEncoder + assertDecl(T, "renderBundleEncoderSetPipeline", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, pipeline: *gpu.RenderPipeline) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderSetBindGroup", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) callconv(.Inline) void); + + // gpu.RenderPassEncoder + assertDecl(T, "renderPassEncoderSetPipeline", fn (render_pass_encoder: *gpu.RenderPassEncoder, pipeline: *gpu.RenderPipeline) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderSetBindGroup", fn (render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) callconv(.Inline) void); + + // gpu.BindGroup + assertDecl(T, "bindGroupSetLabel", fn (bind_group: *gpu.BindGroup, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "bindGroupReference", fn (bind_group: *gpu.BindGroup) callconv(.Inline) void); + assertDecl(T, "bindGroupRelease", fn (bind_group: *gpu.BindGroup) callconv(.Inline) void); + + // gpu.BindGroupLayout + assertDecl(T, "bindGroupLayoutSetLabel", fn (bind_group_layout: *gpu.BindGroupLayout, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "bindGroupLayoutReference", fn (bind_group_layout: *gpu.BindGroupLayout) callconv(.Inline) void); + assertDecl(T, "bindGroupLayoutRelease", fn (bind_group_layout: *gpu.BindGroupLayout) callconv(.Inline) void); + + // gpu.RenderPipeline + assertDecl(T, "renderPipelineGetBindGroupLayout", fn (render_pipeline: *gpu.RenderPipeline, group_index: u32) callconv(.Inline) *gpu.BindGroupLayout); + assertDecl(T, "renderPipelineSetLabel", fn (render_pipeline: *gpu.RenderPipeline, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "renderPipelineReference", fn (render_pipeline: *gpu.RenderPipeline) callconv(.Inline) void); + assertDecl(T, "renderPipelineRelease", fn (render_pipeline: *gpu.RenderPipeline) callconv(.Inline) void); + + // gpu.Instance + assertDecl(T, "createInstance", fn (descriptor: ?*const gpu.Instance.Descriptor) callconv(.Inline) ?*gpu.Instance); + + // gpu.Adapter + assertDecl(T, "adapterCreateDevice", fn (adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor) callconv(.Inline) ?*gpu.Device); + assertDecl(T, "adapterEnumerateFeatures", fn (adapter: *gpu.Adapter, features: ?[*]gpu.FeatureName) callconv(.Inline) usize); + assertDecl(T, "adapterGetInstance", fn (adapter: *gpu.Adapter) callconv(.Inline) *gpu.Instance); + assertDecl(T, "adapterGetLimits", fn (adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) callconv(.Inline) u32); + assertDecl(T, "adapterGetProperties", fn (adapter: *gpu.Adapter, properties: *gpu.Adapter.Properties) callconv(.Inline) void); + assertDecl(T, "adapterHasFeature", fn (adapter: *gpu.Adapter, feature: gpu.FeatureName) callconv(.Inline) u32); + assertDecl(T, "adapterPropertiesFreeMembers", fn (value: gpu.Adapter.Properties) callconv(.Inline) void); + assertDecl(T, "adapterRequestDevice", fn (adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor, callback: gpu.RequestDeviceCallback, userdata: ?*anyopaque) callconv(.Inline) void); + assertDecl(T, "adapterReference", fn (adapter: *gpu.Adapter) callconv(.Inline) void); + assertDecl(T, "adapterRelease", fn (adapter: *gpu.Adapter) callconv(.Inline) void); + + // gpu.Buffer + assertDecl(T, "bufferDestroy", fn (buffer: *gpu.Buffer) callconv(.Inline) void); + assertDecl(T, "bufferGetConstMappedRange", fn (buffer: *gpu.Buffer, offset: usize, size: usize) callconv(.Inline) ?*const anyopaque); + assertDecl(T, "bufferGetMappedRange", fn (buffer: *gpu.Buffer, offset: usize, size: usize) callconv(.Inline) ?*anyopaque); + assertDecl(T, "bufferGetSize", fn (buffer: *gpu.Buffer) callconv(.Inline) u64); + assertDecl(T, "bufferGetUsage", fn (buffer: *gpu.Buffer) callconv(.Inline) gpu.Buffer.UsageFlags); + assertDecl(T, "bufferMapAsync", fn (buffer: *gpu.Buffer, mode: gpu.MapModeFlags, offset: usize, size: usize, callback: gpu.Buffer.MapCallback, userdata: ?*anyopaque) callconv(.Inline) void); + assertDecl(T, "bufferSetLabel", fn (buffer: *gpu.Buffer, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "bufferUnmap", fn (buffer: *gpu.Buffer) callconv(.Inline) void); + assertDecl(T, "bufferReference", fn (buffer: *gpu.Buffer) callconv(.Inline) void); + assertDecl(T, "bufferRelease", fn (buffer: *gpu.Buffer) callconv(.Inline) void); + + // gpu.CommandBuffer + assertDecl(T, "commandBufferSetLabel", fn (command_buffer: *gpu.CommandBuffer, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "commandBufferReference", fn (command_buffer: *gpu.CommandBuffer) callconv(.Inline) void); + assertDecl(T, "commandBufferRelease", fn (command_buffer: *gpu.CommandBuffer) callconv(.Inline) void); + + // gpu.CommandEncoder + assertDecl(T, "commandEncoderBeginComputePass", fn (command_encoder: *gpu.CommandEncoder, descriptor: ?*const gpu.ComputePassDescriptor) callconv(.Inline) *gpu.ComputePassEncoder); + assertDecl(T, "commandEncoderBeginRenderPass", fn (command_encoder: *gpu.CommandEncoder, descriptor: *const gpu.RenderPassDescriptor) callconv(.Inline) *gpu.RenderPassEncoder); + assertDecl(T, "commandEncoderClearBuffer", fn (command_encoder: *gpu.CommandEncoder, buffer: *gpu.Buffer, offset: u64, size: u64) callconv(.Inline) void); + assertDecl(T, "commandEncoderCopyBufferToBuffer", fn (command_encoder: *gpu.CommandEncoder, source: *gpu.Buffer, source_offset: u64, destination: *gpu.Buffer, destination_offset: u64, size: u64) callconv(.Inline) void); + assertDecl(T, "commandEncoderCopyBufferToTexture", fn (command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyBuffer, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D) callconv(.Inline) void); + assertDecl(T, "commandEncoderCopyTextureToBuffer", fn (command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyBuffer, copy_size: *const gpu.Extent3D) callconv(.Inline) void); + assertDecl(T, "commandEncoderCopyTextureToTexture", fn (command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D) callconv(.Inline) void); + assertDecl(T, "commandEncoderFinish", fn (command_encoder: *gpu.CommandEncoder, descriptor: ?*const gpu.CommandBuffer.Descriptor) callconv(.Inline) *gpu.CommandBuffer); + assertDecl(T, "commandEncoderInjectValidationError", fn (command_encoder: *gpu.CommandEncoder, message: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "commandEncoderInsertDebugMarker", fn (command_encoder: *gpu.CommandEncoder, marker_label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "commandEncoderPopDebugGroup", fn (command_encoder: *gpu.CommandEncoder) callconv(.Inline) void); + assertDecl(T, "commandEncoderPushDebugGroup", fn (command_encoder: *gpu.CommandEncoder, group_label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "commandEncoderResolveQuerySet", fn (command_encoder: *gpu.CommandEncoder, query_set: *gpu.QuerySet, first_query: u32, query_count: u32, destination: *gpu.Buffer, destination_offset: u64) callconv(.Inline) void); + assertDecl(T, "commandEncoderSetLabel", fn (command_encoder: *gpu.CommandEncoder, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "commandEncoderWriteBuffer", fn (command_encoder: *gpu.CommandEncoder, buffer: *gpu.Buffer, buffer_offset: u64, data: [*]const u8, size: u64) callconv(.Inline) void); + assertDecl(T, "commandEncoderWriteTimestamp", fn (command_encoder: *gpu.CommandEncoder, query_set: *gpu.QuerySet, query_index: u32) callconv(.Inline) void); + assertDecl(T, "commandEncoderReference", fn (command_encoder: *gpu.CommandEncoder) callconv(.Inline) void); + assertDecl(T, "commandEncoderRelease", fn (command_encoder: *gpu.CommandEncoder) callconv(.Inline) void); + + // gpu.ComputePassEncoder + assertDecl(T, "computePassEncoderDispatchWorkgroups", fn (compute_pass_encoder: *gpu.ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) callconv(.Inline) void); + assertDecl(T, "computePassEncoderDispatchWorkgroupsIndirect", fn (compute_pass_encoder: *gpu.ComputePassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) callconv(.Inline) void); + assertDecl(T, "computePassEncoderEnd", fn (compute_pass_encoder: *gpu.ComputePassEncoder) callconv(.Inline) void); + assertDecl(T, "computePassEncoderInsertDebugMarker", fn (compute_pass_encoder: *gpu.ComputePassEncoder, marker_label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "computePassEncoderPopDebugGroup", fn (compute_pass_encoder: *gpu.ComputePassEncoder) callconv(.Inline) void); + assertDecl(T, "computePassEncoderPushDebugGroup", fn (compute_pass_encoder: *gpu.ComputePassEncoder, group_label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "computePassEncoderSetBindGroup", fn (compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) callconv(.Inline) void); + assertDecl(T, "computePassEncoderSetLabel", fn (compute_pass_encoder: *gpu.ComputePassEncoder, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "computePassEncoderSetPipeline", fn (compute_pass_encoder: *gpu.ComputePassEncoder, pipeline: *gpu.ComputePipeline) callconv(.Inline) void); + assertDecl(T, "computePassEncoderWriteTimestamp", fn (compute_pass_encoder: *gpu.ComputePassEncoder, query_set: *gpu.QuerySet, query_index: u32) callconv(.Inline) void); + assertDecl(T, "computePassEncoderReference", fn (compute_pass_encoder: *gpu.ComputePassEncoder) callconv(.Inline) void); + assertDecl(T, "computePassEncoderRelease", fn (compute_pass_encoder: *gpu.ComputePassEncoder) callconv(.Inline) void); + + // gpu.ComputePipeline + assertDecl(T, "computePipelineGetBindGroupLayout", fn (compute_pipeline: *gpu.ComputePipeline, group_index: u32) callconv(.Inline) *gpu.BindGroupLayout); + assertDecl(T, "computePipelineSetLabel", fn (compute_pipeline: *gpu.ComputePipeline, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "computePipelineReference", fn (compute_pipeline: *gpu.ComputePipeline) callconv(.Inline) void); + assertDecl(T, "computePipelineRelease", fn (compute_pipeline: *gpu.ComputePipeline) callconv(.Inline) void); + + // gpu.Device + assertDecl(T, "getProcAddress", fn (device: *gpu.Device, proc_name: [*:0]const u8) callconv(.Inline) ?gpu.Proc); + assertDecl(T, "deviceCreateBindGroup", fn (device: *gpu.Device, descriptor: *const gpu.BindGroup.Descriptor) callconv(.Inline) *gpu.BindGroup); + assertDecl(T, "deviceCreateBindGroupLayout", fn (device: *gpu.Device, descriptor: *const gpu.BindGroupLayout.Descriptor) callconv(.Inline) *gpu.BindGroupLayout); + assertDecl(T, "deviceCreateBuffer", fn (device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) callconv(.Inline) *gpu.Buffer); + assertDecl(T, "deviceCreateCommandEncoder", fn (device: *gpu.Device, descriptor: ?*const gpu.CommandEncoder.Descriptor) callconv(.Inline) *gpu.CommandEncoder); + assertDecl(T, "deviceCreateComputePipeline", fn (device: *gpu.Device, descriptor: *const gpu.ComputePipeline.Descriptor) callconv(.Inline) *gpu.ComputePipeline); + assertDecl(T, "deviceCreateComputePipelineAsync", fn (device: *gpu.Device, descriptor: *const gpu.ComputePipeline.Descriptor, callback: gpu.CreateComputePipelineAsyncCallback, userdata: ?*anyopaque) callconv(.Inline) void); + assertDecl(T, "deviceCreateErrorBuffer", fn (device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) callconv(.Inline) *gpu.Buffer); + assertDecl(T, "deviceCreateErrorExternalTexture", fn (device: *gpu.Device) callconv(.Inline) *gpu.ExternalTexture); + assertDecl(T, "deviceCreateErrorTexture", fn (device: *gpu.Device, descriptor: *const gpu.Texture.Descriptor) callconv(.Inline) *gpu.Texture); + assertDecl(T, "deviceCreateExternalTexture", fn (device: *gpu.Device, external_texture_descriptor: *const gpu.ExternalTexture.Descriptor) callconv(.Inline) *gpu.ExternalTexture); + assertDecl(T, "deviceCreateQuerySet", fn (device: *gpu.Device, descriptor: *const gpu.QuerySet.Descriptor) callconv(.Inline) *gpu.QuerySet); + assertDecl(T, "deviceCreateRenderBundleEncoder", fn (device: *gpu.Device, descriptor: *const gpu.RenderBundleEncoder.Descriptor) callconv(.Inline) *gpu.RenderBundleEncoder); + // TODO(self-hosted): this cannot be marked as inline for some reason: + // https://github.com/ziglang/zig/issues/12545 + assertDecl(T, "deviceCreateSampler", fn (device: *gpu.Device, descriptor: ?*const gpu.Sampler.Descriptor) *gpu.Sampler); + assertDecl(T, "deviceCreateShaderModule", fn (device: *gpu.Device, descriptor: *const gpu.ShaderModule.Descriptor) callconv(.Inline) *gpu.ShaderModule); + assertDecl(T, "deviceCreateSwapChain", fn (device: *gpu.Device, surface: ?*gpu.Surface, descriptor: *const gpu.SwapChain.Descriptor) callconv(.Inline) *gpu.SwapChain); + assertDecl(T, "deviceCreateTexture", fn (device: *gpu.Device, descriptor: *const gpu.Texture.Descriptor) callconv(.Inline) *gpu.Texture); + assertDecl(T, "deviceDestroy", fn (device: *gpu.Device) callconv(.Inline) void); + assertDecl(T, "deviceEnumerateFeatures", fn (device: *gpu.Device, features: ?[*]gpu.FeatureName) callconv(.Inline) usize); + assertDecl(T, "deviceGetLimits", fn (device: *gpu.Device, limits: *gpu.SupportedLimits) callconv(.Inline) u32); + assertDecl(T, "deviceGetQueue", fn (device: *gpu.Device) callconv(.Inline) *gpu.Queue); + assertDecl(T, "deviceHasFeature", fn (device: *gpu.Device, feature: gpu.FeatureName) callconv(.Inline) u32); + assertDecl(T, "deviceImportSharedFence", fn (device: *gpu.Device, descriptor: *const gpu.SharedFence.Descriptor) callconv(.Inline) *gpu.SharedFence); + assertDecl(T, "deviceImportSharedTextureMemory", fn (device: *gpu.Device, descriptor: *const gpu.SharedTextureMemory.Descriptor) callconv(.Inline) *gpu.SharedTextureMemory); + assertDecl(T, "deviceInjectError", fn (device: *gpu.Device, typ: gpu.ErrorType, message: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "devicePopErrorScope", fn (device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) callconv(.Inline) void); + assertDecl(T, "devicePushErrorScope", fn (device: *gpu.Device, filter: gpu.ErrorFilter) callconv(.Inline) void); + assertDecl(T, "deviceSetDeviceLostCallback", fn (device: *gpu.Device, callback: ?gpu.Device.LostCallback, userdata: ?*anyopaque) callconv(.Inline) void); + assertDecl(T, "deviceSetLabel", fn (device: *gpu.Device, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "deviceSetLoggingCallback", fn (device: *gpu.Device, callback: ?gpu.LoggingCallback, userdata: ?*anyopaque) callconv(.Inline) void); + assertDecl(T, "deviceSetUncapturedErrorCallback", fn (device: *gpu.Device, callback: ?gpu.ErrorCallback, userdata: ?*anyopaque) callconv(.Inline) void); + assertDecl(T, "deviceTick", fn (device: *gpu.Device) callconv(.Inline) void); + assertDecl(T, "machDeviceWaitForCommandsToBeScheduled", fn (device: *gpu.Device) callconv(.Inline) void); + assertDecl(T, "deviceReference", fn (device: *gpu.Device) callconv(.Inline) void); + assertDecl(T, "deviceRelease", fn (device: *gpu.Device) callconv(.Inline) void); + + // gpu.ExternalTexture + assertDecl(T, "externalTextureDestroy", fn (external_texture: *gpu.ExternalTexture) callconv(.Inline) void); + assertDecl(T, "externalTextureSetLabel", fn (external_texture: *gpu.ExternalTexture, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "externalTextureReference", fn (external_texture: *gpu.ExternalTexture) callconv(.Inline) void); + assertDecl(T, "externalTextureRelease", fn (external_texture: *gpu.ExternalTexture) callconv(.Inline) void); + + // gpu.Instance + assertDecl(T, "instanceCreateSurface", fn (instance: *gpu.Instance, descriptor: *const gpu.Surface.Descriptor) callconv(.Inline) *gpu.Surface); + assertDecl(T, "instanceProcessEvents", fn (instance: *gpu.Instance) callconv(.Inline) void); + assertDecl(T, "instanceRequestAdapter", fn (instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) callconv(.Inline) void); + assertDecl(T, "instanceReference", fn (instance: *gpu.Instance) callconv(.Inline) void); + assertDecl(T, "instanceRelease", fn (instance: *gpu.Instance) callconv(.Inline) void); + + // gpu.QuerySet + assertDecl(T, "querySetDestroy", fn (query_set: *gpu.QuerySet) callconv(.Inline) void); + assertDecl(T, "querySetGetCount", fn (query_set: *gpu.QuerySet) callconv(.Inline) u32); + assertDecl(T, "querySetGetType", fn (query_set: *gpu.QuerySet) callconv(.Inline) gpu.QueryType); + assertDecl(T, "querySetSetLabel", fn (query_set: *gpu.QuerySet, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "querySetReference", fn (query_set: *gpu.QuerySet) callconv(.Inline) void); + assertDecl(T, "querySetRelease", fn (query_set: *gpu.QuerySet) callconv(.Inline) void); + + // gpu.Queue + assertDecl(T, "queueCopyTextureForBrowser", fn (queue: *gpu.Queue, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D, options: *const gpu.CopyTextureForBrowserOptions) callconv(.Inline) void); + assertDecl(T, "queueOnSubmittedWorkDone", fn (queue: *gpu.Queue, signal_value: u64, callback: gpu.Queue.WorkDoneCallback, userdata: ?*anyopaque) callconv(.Inline) void); + assertDecl(T, "queueSetLabel", fn (queue: *gpu.Queue, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "queueSubmit", fn (queue: *gpu.Queue, command_count: usize, commands: [*]const *const gpu.CommandBuffer) callconv(.Inline) void); + assertDecl(T, "queueWriteBuffer", fn (queue: *gpu.Queue, buffer: *gpu.Buffer, buffer_offset: u64, data: *const anyopaque, size: usize) callconv(.Inline) void); + assertDecl(T, "queueWriteTexture", fn (queue: *gpu.Queue, destination: *const gpu.ImageCopyTexture, data: *const anyopaque, data_size: usize, data_layout: *const gpu.Texture.DataLayout, write_size: *const gpu.Extent3D) callconv(.Inline) void); + assertDecl(T, "queueReference", fn (queue: *gpu.Queue) callconv(.Inline) void); + assertDecl(T, "queueRelease", fn (queue: *gpu.Queue) callconv(.Inline) void); + + // gpu.RenderBundle + assertDecl(T, "renderBundleSetLabel", fn (render_bundle: *gpu.RenderBundle, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "renderBundleReference", fn (render_bundle: *gpu.RenderBundle) callconv(.Inline) void); + assertDecl(T, "renderBundleRelease", fn (render_bundle: *gpu.RenderBundle) callconv(.Inline) void); + + // gpu.RenderBundleEncoder + assertDecl(T, "renderBundleEncoderDraw", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderDrawIndexed", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderDrawIndexedIndirect", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderDrawIndirect", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderFinish", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, descriptor: ?*const gpu.RenderBundle.Descriptor) callconv(.Inline) *gpu.RenderBundle); + assertDecl(T, "renderBundleEncoderInsertDebugMarker", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, marker_label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderPopDebugGroup", fn (render_bundle_encoder: *gpu.RenderBundleEncoder) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderPushDebugGroup", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, group_label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderSetIndexBuffer", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderSetLabel", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderSetVertexBuffer", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, slot: u32, buffer: *gpu.Buffer, offset: u64, size: u64) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderReference", fn (render_bundle_encoder: *gpu.RenderBundleEncoder) callconv(.Inline) void); + assertDecl(T, "renderBundleEncoderRelease", fn (render_bundle_encoder: *gpu.RenderBundleEncoder) callconv(.Inline) void); + + // gpu.RenderPassEncoder + assertDecl(T, "renderPassEncoderBeginOcclusionQuery", fn (render_pass_encoder: *gpu.RenderPassEncoder, query_index: u32) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderDraw", fn (render_pass_encoder: *gpu.RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderDrawIndexed", fn (render_pass_encoder: *gpu.RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderDrawIndexedIndirect", fn (render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderDrawIndirect", fn (render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderEnd", fn (render_pass_encoder: *gpu.RenderPassEncoder) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderEndOcclusionQuery", fn (render_pass_encoder: *gpu.RenderPassEncoder) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderExecuteBundles", fn (render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const gpu.RenderBundle) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderInsertDebugMarker", fn (render_pass_encoder: *gpu.RenderPassEncoder, marker_label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderPopDebugGroup", fn (render_pass_encoder: *gpu.RenderPassEncoder) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderPushDebugGroup", fn (render_pass_encoder: *gpu.RenderPassEncoder, group_label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderSetBlendConstant", fn (render_pass_encoder: *gpu.RenderPassEncoder, color: *const gpu.Color) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderSetIndexBuffer", fn (render_pass_encoder: *gpu.RenderPassEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderSetLabel", fn (render_pass_encoder: *gpu.RenderPassEncoder, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderSetScissorRect", fn (render_pass_encoder: *gpu.RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderSetStencilReference", fn (render_pass_encoder: *gpu.RenderPassEncoder, reference: u32) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderSetVertexBuffer", fn (render_pass_encoder: *gpu.RenderPassEncoder, slot: u32, buffer: *gpu.Buffer, offset: u64, size: u64) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderSetViewport", fn (render_pass_encoder: *gpu.RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderWriteTimestamp", fn (render_pass_encoder: *gpu.RenderPassEncoder, query_set: *gpu.QuerySet, query_index: u32) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderReference", fn (render_pass_encoder: *gpu.RenderPassEncoder) callconv(.Inline) void); + assertDecl(T, "renderPassEncoderRelease", fn (render_pass_encoder: *gpu.RenderPassEncoder) callconv(.Inline) void); + + // gpu.Sampler + assertDecl(T, "samplerSetLabel", fn (sampler: *gpu.Sampler, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "samplerReference", fn (sampler: *gpu.Sampler) callconv(.Inline) void); + assertDecl(T, "samplerRelease", fn (sampler: *gpu.Sampler) callconv(.Inline) void); + + // gpu.ShaderModule + assertDecl(T, "shaderModuleGetCompilationInfo", fn (shader_module: *gpu.ShaderModule, callback: gpu.CompilationInfoCallback, userdata: ?*anyopaque) callconv(.Inline) void); + assertDecl(T, "shaderModuleSetLabel", fn (shader_module: *gpu.ShaderModule, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "shaderModuleReference", fn (shader_module: *gpu.ShaderModule) callconv(.Inline) void); + assertDecl(T, "shaderModuleRelease", fn (shader_module: *gpu.ShaderModule) callconv(.Inline) void); + + // gpu.SharedFence + assertDecl(T, "sharedFenceExportInfo", fn (shared_fence: *gpu.SharedFence, info: *gpu.SharedFence.ExportInfo) callconv(.Inline) void); + assertDecl(T, "sharedFenceReference", fn (shared_fence: *gpu.SharedFence) callconv(.Inline) void); + assertDecl(T, "sharedFenceRelease", fn (shared_fence: *gpu.SharedFence) callconv(.Inline) void); + + // gpu.SharedTextureMemory + assertDecl(T, "sharedTextureMemoryBeginAccess", fn (shared_texture_memory: *gpu.SharedTextureMemory, texture: *gpu.Texture, descriptor: *const gpu.SharedTextureMemory.BeginAccessDescriptor) callconv(.Inline) void); + assertDecl(T, "sharedTextureMemoryCreateTexture", fn (shared_texture_memory: *gpu.SharedTextureMemory, descriptor: *const gpu.Texture.Descriptor) callconv(.Inline) *gpu.Texture); + assertDecl(T, "sharedTextureMemoryEndAccess", fn (shared_texture_memory: *gpu.SharedTextureMemory, texture: *gpu.Texture, descriptor: *gpu.SharedTextureMemory.EndAccessState) callconv(.Inline) void); + assertDecl(T, "sharedTextureMemoryEndAccessStateFreeMembers", fn (value: gpu.SharedTextureMemory.EndAccessState) callconv(.Inline) void); + assertDecl(T, "sharedTextureMemoryGetProperties", fn (shared_texture_memory: *gpu.SharedTextureMemory, properties: *gpu.SharedTextureMemory.Properties) callconv(.Inline) void); + assertDecl(T, "sharedTextureMemorySetLabel", fn (shared_texture_memory: *gpu.SharedTextureMemory, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "sharedTextureMemoryReference", fn (shared_texture_memory: *gpu.SharedTextureMemory) callconv(.Inline) void); + assertDecl(T, "sharedTextureMemoryRelease", fn (shared_texture_memory: *gpu.SharedTextureMemory) callconv(.Inline) void); + + // gpu.Surface + assertDecl(T, "surfaceReference", fn (surface: *gpu.Surface) callconv(.Inline) void); + assertDecl(T, "surfaceRelease", fn (surface: *gpu.Surface) callconv(.Inline) void); + + // gpu.SwapChain + assertDecl(T, "swapChainGetCurrentTexture", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) ?*gpu.Texture); + assertDecl(T, "swapChainGetCurrentTextureView", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) ?*gpu.TextureView); + assertDecl(T, "swapChainPresent", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) void); + assertDecl(T, "swapChainReference", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) void); + assertDecl(T, "swapChainRelease", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) void); + + // gpu.Texture + assertDecl(T, "textureCreateView", fn (texture: *gpu.Texture, descriptor: ?*const gpu.TextureView.Descriptor) callconv(.Inline) *gpu.TextureView); + assertDecl(T, "textureDestroy", fn (texture: *gpu.Texture) callconv(.Inline) void); + assertDecl(T, "textureGetDepthOrArrayLayers", fn (texture: *gpu.Texture) callconv(.Inline) u32); + assertDecl(T, "textureGetDimension", fn (texture: *gpu.Texture) callconv(.Inline) gpu.Texture.Dimension); + assertDecl(T, "textureGetFormat", fn (texture: *gpu.Texture) callconv(.Inline) gpu.Texture.Format); + assertDecl(T, "textureGetHeight", fn (texture: *gpu.Texture) callconv(.Inline) u32); + assertDecl(T, "textureGetMipLevelCount", fn (texture: *gpu.Texture) callconv(.Inline) u32); + assertDecl(T, "textureGetSampleCount", fn (texture: *gpu.Texture) callconv(.Inline) u32); + assertDecl(T, "textureGetUsage", fn (texture: *gpu.Texture) callconv(.Inline) gpu.Texture.UsageFlags); + assertDecl(T, "textureGetWidth", fn (texture: *gpu.Texture) callconv(.Inline) u32); + assertDecl(T, "textureSetLabel", fn (texture: *gpu.Texture, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "textureReference", fn (texture: *gpu.Texture) callconv(.Inline) void); + assertDecl(T, "textureRelease", fn (texture: *gpu.Texture) callconv(.Inline) void); + assertDecl(T, "textureViewSetLabel", fn (texture_view: *gpu.TextureView, label: [*:0]const u8) callconv(.Inline) void); + assertDecl(T, "textureViewReference", fn (texture_view: *gpu.TextureView) callconv(.Inline) void); + assertDecl(T, "textureViewRelease", fn (texture_view: *gpu.TextureView) callconv(.Inline) void); + return T; +} + +fn assertDecl(comptime T: anytype, comptime name: []const u8, comptime Decl: type) void { + if (!@hasDecl(T, name)) @compileError("gpu.Interface missing declaration: " ++ @typeName(Decl)); + const FoundDecl = @TypeOf(@field(T, name)); + if (FoundDecl != Decl) @compileError("gpu.Interface field '" ++ name ++ "'\n\texpected type: " ++ @typeName(Decl) ++ "\n\t found type: " ++ @typeName(FoundDecl)); +} + +/// Exports C ABI function declarations for the given gpu.Interface implementation. +pub fn Export(comptime T: type) type { + _ = Interface(T); // verify implementation is a valid interface + return struct { + // WGPU_EXPORT WGPUInstance wgpuCreateInstance(WGPUInstanceDescriptor const * descriptor); + export fn wgpuCreateInstance(descriptor: ?*const gpu.Instance.Descriptor) ?*gpu.Instance { + return T.createInstance(descriptor); + } + + // WGPU_EXPORT WGPUProc wgpuGetProcAddress(WGPUDevice device, char const * procName); + export fn wgpuGetProcAddress(device: *gpu.Device, proc_name: [*:0]const u8) ?gpu.Proc { + return T.getProcAddress(device, proc_name); + } + + // WGPU_EXPORT WGPUDevice wgpuAdapterCreateDevice(WGPUAdapter adapter, WGPUDeviceDescriptor const * descriptor /* nullable */); + export fn wgpuAdapterCreateDevice(adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor) ?*gpu.Device { + return T.adapterCreateDevice(adapter, descriptor); + } + + // WGPU_EXPORT size_t wgpuAdapterEnumerateFeatures(WGPUAdapter adapter, WGPUFeatureName * features); + export fn wgpuAdapterEnumerateFeatures(adapter: *gpu.Adapter, features: ?[*]gpu.FeatureName) usize { + return T.adapterEnumerateFeatures(adapter, features); + } + + // WGPU_EXPORT WGPUInstance wgpuAdapterGetInstance(WGPUAdapter adapter); + export fn wgpuAdapterGetInstance(adapter: *gpu.Adapter) *gpu.Instance { + return T.adapterGetInstance(adapter); + } + + // WGPU_EXPORT WGPUBool wgpuAdapterGetLimits(WGPUAdapter adapter, WGPUSupportedLimits * limits); + export fn wgpuAdapterGetLimits(adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) u32 { + return T.adapterGetLimits(adapter, limits); + } + + // WGPU_EXPORT void wgpuAdapterGetProperties(WGPUAdapter adapter, WGPUAdapterProperties * properties); + export fn wgpuAdapterGetProperties(adapter: *gpu.Adapter, properties: *gpu.Adapter.Properties) void { + return T.adapterGetProperties(adapter, properties); + } + + // WGPU_EXPORT WGPUBool wgpuAdapterHasFeature(WGPUAdapter adapter, WGPUFeatureName feature); + export fn wgpuAdapterHasFeature(adapter: *gpu.Adapter, feature: gpu.FeatureName) u32 { + return T.adapterHasFeature(adapter, feature); + } + + // WGPU_EXPORT void wgpuAdapterPropertiesFreeMembers(WGPUAdapterProperties value); + export fn wgpuAdapterPropertiesFreeMembers(value: gpu.Adapter.Properties) void { + T.adapterPropertiesFreeMembers(value); + } + + // WGPU_EXPORT void wgpuAdapterRequestDevice(WGPUAdapter adapter, WGPUDeviceDescriptor const * descriptor /* nullable */, WGPURequestDeviceCallback callback, void * userdata); + export fn wgpuAdapterRequestDevice(adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor, callback: gpu.RequestDeviceCallback, userdata: ?*anyopaque) void { + T.adapterRequestDevice(adapter, descriptor, callback, userdata); + } + + // WGPU_EXPORT void wgpuAdapterReference(WGPUAdapter adapter); + export fn wgpuAdapterReference(adapter: *gpu.Adapter) void { + T.adapterReference(adapter); + } + + // WGPU_EXPORT void wgpuAdapterRelease(WGPUAdapter adapter); + export fn wgpuAdapterRelease(adapter: *gpu.Adapter) void { + T.adapterRelease(adapter); + } + + // WGPU_EXPORT void wgpuBindGroupSetLabel(WGPUBindGroup bindGroup, char const * label); + export fn wgpuBindGroupSetLabel(bind_group: *gpu.BindGroup, label: [*:0]const u8) void { + T.bindGroupSetLabel(bind_group, label); + } + + // WGPU_EXPORT void wgpuBindGroupReference(WGPUBindGroup bindGroup); + export fn wgpuBindGroupReference(bind_group: *gpu.BindGroup) void { + T.bindGroupReference(bind_group); + } + + // WGPU_EXPORT void wgpuBindGroupRelease(WGPUBindGroup bindGroup); + export fn wgpuBindGroupRelease(bind_group: *gpu.BindGroup) void { + T.bindGroupRelease(bind_group); + } + + // WGPU_EXPORT void wgpuBindGroupLayoutSetLabel(WGPUBindGroupLayout bindGroupLayout, char const * label); + export fn wgpuBindGroupLayoutSetLabel(bind_group_layout: *gpu.BindGroupLayout, label: [*:0]const u8) void { + T.bindGroupLayoutSetLabel(bind_group_layout, label); + } + + // WGPU_EXPORT void wgpuBindGroupLayoutReference(WGPUBindGroupLayout bindGroupLayout); + export fn wgpuBindGroupLayoutReference(bind_group_layout: *gpu.BindGroupLayout) void { + T.bindGroupLayoutReference(bind_group_layout); + } + + // WGPU_EXPORT void wgpuBindGroupLayoutRelease(WGPUBindGroupLayout bindGroupLayout); + export fn wgpuBindGroupLayoutRelease(bind_group_layout: *gpu.BindGroupLayout) void { + T.bindGroupLayoutRelease(bind_group_layout); + } + + // WGPU_EXPORT void wgpuBufferDestroy(WGPUBuffer buffer); + export fn wgpuBufferDestroy(buffer: *gpu.Buffer) void { + T.bufferDestroy(buffer); + } + + // WGPU_EXPORT void const * wgpuBufferGetConstMappedRange(WGPUBuffer buffer, size_t offset, size_t size); + export fn wgpuBufferGetConstMappedRange(buffer: *gpu.Buffer, offset: usize, size: usize) ?*const anyopaque { + return T.bufferGetConstMappedRange(buffer, offset, size); + } + + // WGPU_EXPORT void * wgpuBufferGetMappedRange(WGPUBuffer buffer, size_t offset, size_t size); + export fn wgpuBufferGetMappedRange(buffer: *gpu.Buffer, offset: usize, size: usize) ?*anyopaque { + return T.bufferGetMappedRange(buffer, offset, size); + } + + // WGPU_EXPORT uint64_t wgpuBufferGetSize(WGPUBuffer buffer); + export fn wgpuBufferGetSize(buffer: *gpu.Buffer) u64 { + return T.bufferGetSize(buffer); + } + + // WGPU_EXPORT WGPUBufferUsage wgpuBufferGetUsage(WGPUBuffer buffer); + export fn wgpuBufferGetUsage(buffer: *gpu.Buffer) gpu.Buffer.UsageFlags { + return T.bufferGetUsage(buffer); + } + + // WGPU_EXPORT void wgpuBufferMapAsync(WGPUBuffer buffer, WGPUMapModeFlags mode, size_t offset, size_t size, WGPUBufferMapCallback callback, void * userdata); + export fn wgpuBufferMapAsync(buffer: *gpu.Buffer, mode: u32, offset: usize, size: usize, callback: gpu.Buffer.MapCallback, userdata: ?*anyopaque) void { + T.bufferMapAsync(buffer, @as(gpu.MapModeFlags, @bitCast(mode)), offset, size, callback, userdata); + } + + // WGPU_EXPORT void wgpuBufferSetLabel(WGPUBuffer buffer, char const * label); + export fn wgpuBufferSetLabel(buffer: *gpu.Buffer, label: [*:0]const u8) void { + T.bufferSetLabel(buffer, label); + } + + // WGPU_EXPORT void wgpuBufferUnmap(WGPUBuffer buffer); + export fn wgpuBufferUnmap(buffer: *gpu.Buffer) void { + T.bufferUnmap(buffer); + } + + // WGPU_EXPORT void wgpuBufferReference(WGPUBuffer buffer); + export fn wgpuBufferReference(buffer: *gpu.Buffer) void { + T.bufferReference(buffer); + } + + // WGPU_EXPORT void wgpuBufferRelease(WGPUBuffer buffer); + export fn wgpuBufferRelease(buffer: *gpu.Buffer) void { + T.bufferRelease(buffer); + } + + // WGPU_EXPORT void wgpuCommandBufferSetLabel(WGPUCommandBuffer commandBuffer, char const * label); + export fn wgpuCommandBufferSetLabel(command_buffer: *gpu.CommandBuffer, label: [*:0]const u8) void { + T.commandBufferSetLabel(command_buffer, label); + } + + // WGPU_EXPORT void wgpuCommandBufferReference(WGPUCommandBuffer commandBuffer); + export fn wgpuCommandBufferReference(command_buffer: *gpu.CommandBuffer) void { + T.commandBufferReference(command_buffer); + } + + // WGPU_EXPORT void wgpuCommandBufferRelease(WGPUCommandBuffer commandBuffer); + export fn wgpuCommandBufferRelease(command_buffer: *gpu.CommandBuffer) void { + T.commandBufferRelease(command_buffer); + } + + // WGPU_EXPORT WGPUComputePassEncoder wgpuCommandEncoderBeginComputePass(WGPUCommandEncoder commandEncoder, WGPUComputePassDescriptor const * descriptor /* nullable */); + export fn wgpuCommandEncoderBeginComputePass(command_encoder: *gpu.CommandEncoder, descriptor: ?*const gpu.ComputePassDescriptor) *gpu.ComputePassEncoder { + return T.commandEncoderBeginComputePass(command_encoder, descriptor); + } + + // WGPU_EXPORT WGPURenderPassEncoder wgpuCommandEncoderBeginRenderPass(WGPUCommandEncoder commandEncoder, WGPURenderPassDescriptor const * descriptor); + export fn wgpuCommandEncoderBeginRenderPass(command_encoder: *gpu.CommandEncoder, descriptor: *const gpu.RenderPassDescriptor) *gpu.RenderPassEncoder { + return T.commandEncoderBeginRenderPass(command_encoder, descriptor); + } + + // WGPU_EXPORT void wgpuCommandEncoderClearBuffer(WGPUCommandEncoder commandEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size); + export fn wgpuCommandEncoderClearBuffer(command_encoder: *gpu.CommandEncoder, buffer: *gpu.Buffer, offset: u64, size: u64) void { + T.commandEncoderClearBuffer(command_encoder, buffer, offset, size); + } + + // WGPU_EXPORT void wgpuCommandEncoderCopyBufferToBuffer(WGPUCommandEncoder commandEncoder, WGPUBuffer source, uint64_t sourceOffset, WGPUBuffer destination, uint64_t destinationOffset, uint64_t size); + export fn wgpuCommandEncoderCopyBufferToBuffer(command_encoder: *gpu.CommandEncoder, source: *gpu.Buffer, source_offset: u64, destination: *gpu.Buffer, destination_offset: u64, size: u64) void { + T.commandEncoderCopyBufferToBuffer(command_encoder, source, source_offset, destination, destination_offset, size); + } + + // WGPU_EXPORT void wgpuCommandEncoderCopyBufferToTexture(WGPUCommandEncoder commandEncoder, WGPUImageCopyBuffer const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize); + export fn wgpuCommandEncoderCopyBufferToTexture(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyBuffer, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D) void { + T.commandEncoderCopyBufferToTexture(command_encoder, source, destination, copy_size); + } + + // WGPU_EXPORT void wgpuCommandEncoderCopyTextureToBuffer(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyBuffer const * destination, WGPUExtent3D const * copySize); + export fn wgpuCommandEncoderCopyTextureToBuffer(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyBuffer, copy_size: *const gpu.Extent3D) void { + T.commandEncoderCopyTextureToBuffer(command_encoder, source, destination, copy_size); + } + + // WGPU_EXPORT void wgpuCommandEncoderCopyTextureToTexture(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize); + export fn wgpuCommandEncoderCopyTextureToTexture(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D) void { + T.commandEncoderCopyTextureToTexture(command_encoder, source, destination, copy_size); + } + + // WGPU_EXPORT WGPUCommandBuffer wgpuCommandEncoderFinish(WGPUCommandEncoder commandEncoder, WGPUCommandBufferDescriptor const * descriptor /* nullable */); + export fn wgpuCommandEncoderFinish(command_encoder: *gpu.CommandEncoder, descriptor: ?*const gpu.CommandBuffer.Descriptor) *gpu.CommandBuffer { + return T.commandEncoderFinish(command_encoder, descriptor); + } + + // WGPU_EXPORT void wgpuCommandEncoderInjectValidationError(WGPUCommandEncoder commandEncoder, char const * message); + export fn wgpuCommandEncoderInjectValidationError(command_encoder: *gpu.CommandEncoder, message: [*:0]const u8) void { + T.commandEncoderInjectValidationError(command_encoder, message); + } + + // WGPU_EXPORT void wgpuCommandEncoderInsertDebugMarker(WGPUCommandEncoder commandEncoder, char const * markerLabel); + export fn wgpuCommandEncoderInsertDebugMarker(command_encoder: *gpu.CommandEncoder, marker_label: [*:0]const u8) void { + T.commandEncoderInsertDebugMarker(command_encoder, marker_label); + } + + // WGPU_EXPORT void wgpuCommandEncoderPopDebugGroup(WGPUCommandEncoder commandEncoder); + export fn wgpuCommandEncoderPopDebugGroup(command_encoder: *gpu.CommandEncoder) void { + T.commandEncoderPopDebugGroup(command_encoder); + } + + // WGPU_EXPORT void wgpuCommandEncoderPushDebugGroup(WGPUCommandEncoder commandEncoder, char const * groupLabel); + export fn wgpuCommandEncoderPushDebugGroup(command_encoder: *gpu.CommandEncoder, group_label: [*:0]const u8) void { + T.commandEncoderPushDebugGroup(command_encoder, group_label); + } + + // WGPU_EXPORT void wgpuCommandEncoderResolveQuerySet(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t firstQuery, uint32_t queryCount, WGPUBuffer destination, uint64_t destinationOffset); + export fn wgpuCommandEncoderResolveQuerySet(command_encoder: *gpu.CommandEncoder, query_set: *gpu.QuerySet, first_query: u32, query_count: u32, destination: *gpu.Buffer, destination_offset: u64) void { + T.commandEncoderResolveQuerySet(command_encoder, query_set, first_query, query_count, destination, destination_offset); + } + + // WGPU_EXPORT void wgpuCommandEncoderSetLabel(WGPUCommandEncoder commandEncoder, char const * label); + export fn wgpuCommandEncoderSetLabel(command_encoder: *gpu.CommandEncoder, label: [*:0]const u8) void { + T.commandEncoderSetLabel(command_encoder, label); + } + + // WGPU_EXPORT void wgpuCommandEncoderWriteBuffer(WGPUCommandEncoder commandEncoder, WGPUBuffer buffer, uint64_t bufferOffset, uint8_t const * data, uint64_t size); + export fn wgpuCommandEncoderWriteBuffer(command_encoder: *gpu.CommandEncoder, buffer: *gpu.Buffer, buffer_offset: u64, data: [*]const u8, size: u64) void { + T.commandEncoderWriteBuffer(command_encoder, buffer, buffer_offset, data, size); + } + + // WGPU_EXPORT void wgpuCommandEncoderWriteTimestamp(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t queryIndex); + export fn wgpuCommandEncoderWriteTimestamp(command_encoder: *gpu.CommandEncoder, query_set: *gpu.QuerySet, query_index: u32) void { + T.commandEncoderWriteTimestamp(command_encoder, query_set, query_index); + } + + // WGPU_EXPORT void wgpuCommandEncoderReference(WGPUCommandEncoder commandEncoder); + export fn wgpuCommandEncoderReference(command_encoder: *gpu.CommandEncoder) void { + T.commandEncoderReference(command_encoder); + } + + // WGPU_EXPORT void wgpuCommandEncoderRelease(WGPUCommandEncoder commandEncoder); + export fn wgpuCommandEncoderRelease(command_encoder: *gpu.CommandEncoder) void { + T.commandEncoderRelease(command_encoder); + } + + // WGPU_EXPORT void wgpuComputePassEncoderDispatchWorkgroups(WGPUComputePassEncoder computePassEncoder, uint32_t workgroupCountX, uint32_t workgroupCountY, uint32_t workgroupCountZ); + export fn wgpuComputePassEncoderDispatchWorkgroups(compute_pass_encoder: *gpu.ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) void { + T.computePassEncoderDispatchWorkgroups(compute_pass_encoder, workgroup_count_x, workgroup_count_y, workgroup_count_z); + } + + // WGPU_EXPORT void wgpuComputePassEncoderDispatchWorkgroupsIndirect(WGPUComputePassEncoder computePassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset); + export fn wgpuComputePassEncoderDispatchWorkgroupsIndirect(compute_pass_encoder: *gpu.ComputePassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + T.computePassEncoderDispatchWorkgroupsIndirect(compute_pass_encoder, indirect_buffer, indirect_offset); + } + + // WGPU_EXPORT void wgpuComputePassEncoderEnd(WGPUComputePassEncoder computePassEncoder); + export fn wgpuComputePassEncoderEnd(compute_pass_encoder: *gpu.ComputePassEncoder) void { + T.computePassEncoderEnd(compute_pass_encoder); + } + + // WGPU_EXPORT void wgpuComputePassEncoderInsertDebugMarker(WGPUComputePassEncoder computePassEncoder, char const * markerLabel); + export fn wgpuComputePassEncoderInsertDebugMarker(compute_pass_encoder: *gpu.ComputePassEncoder, marker_label: [*:0]const u8) void { + T.computePassEncoderInsertDebugMarker(compute_pass_encoder, marker_label); + } + + // WGPU_EXPORT void wgpuComputePassEncoderPopDebugGroup(WGPUComputePassEncoder computePassEncoder); + export fn wgpuComputePassEncoderPopDebugGroup(compute_pass_encoder: *gpu.ComputePassEncoder) void { + T.computePassEncoderPopDebugGroup(compute_pass_encoder); + } + + // WGPU_EXPORT void wgpuComputePassEncoderPushDebugGroup(WGPUComputePassEncoder computePassEncoder, char const * groupLabel); + export fn wgpuComputePassEncoderPushDebugGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_label: [*:0]const u8) void { + T.computePassEncoderPushDebugGroup(compute_pass_encoder, group_label); + } + + // WGPU_EXPORT void wgpuComputePassEncoderSetBindGroup(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets); + export fn wgpuComputePassEncoderSetBindGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { + T.computePassEncoderSetBindGroup(compute_pass_encoder, group_index, group, dynamic_offset_count, dynamic_offsets); + } + + // WGPU_EXPORT void wgpuComputePassEncoderSetLabel(WGPUComputePassEncoder computePassEncoder, char const * label); + export fn wgpuComputePassEncoderSetLabel(compute_pass_encoder: *gpu.ComputePassEncoder, label: [*:0]const u8) void { + T.computePassEncoderSetLabel(compute_pass_encoder, label); + } + + // WGPU_EXPORT void wgpuComputePassEncoderSetPipeline(WGPUComputePassEncoder computePassEncoder, WGPUComputePipeline pipeline); + export fn wgpuComputePassEncoderSetPipeline(compute_pass_encoder: *gpu.ComputePassEncoder, pipeline: *gpu.ComputePipeline) void { + T.computePassEncoderSetPipeline(compute_pass_encoder, pipeline); + } + + // WGPU_EXPORT void wgpuComputePassEncoderWriteTimestamp(WGPUComputePassEncoder computePassEncoder, WGPUQuerySet querySet, uint32_t queryIndex); + export fn wgpuComputePassEncoderWriteTimestamp(compute_pass_encoder: *gpu.ComputePassEncoder, query_set: *gpu.QuerySet, query_index: u32) void { + T.computePassEncoderWriteTimestamp(compute_pass_encoder, query_set, query_index); + } + + // WGPU_EXPORT void wgpuComputePassEncoderReference(WGPUComputePassEncoder computePassEncoder); + export fn wgpuComputePassEncoderReference(compute_pass_encoder: *gpu.ComputePassEncoder) void { + T.computePassEncoderReference(compute_pass_encoder); + } + + // WGPU_EXPORT void wgpuComputePassEncoderRelease(WGPUComputePassEncoder computePassEncoder); + export fn wgpuComputePassEncoderRelease(compute_pass_encoder: *gpu.ComputePassEncoder) void { + T.computePassEncoderRelease(compute_pass_encoder); + } + + // WGPU_EXPORT WGPUBindGroupLayout wgpuComputePipelineGetBindGroupLayout(WGPUComputePipeline computePipeline, uint32_t groupIndex); + export fn wgpuComputePipelineGetBindGroupLayout(compute_pipeline: *gpu.ComputePipeline, group_index: u32) *gpu.BindGroupLayout { + return T.computePipelineGetBindGroupLayout(compute_pipeline, group_index); + } + + // WGPU_EXPORT void wgpuComputePipelineSetLabel(WGPUComputePipeline computePipeline, char const * label); + export fn wgpuComputePipelineSetLabel(compute_pipeline: *gpu.ComputePipeline, label: [*:0]const u8) void { + T.computePipelineSetLabel(compute_pipeline, label); + } + + // WGPU_EXPORT void wgpuComputePipelineReference(WGPUComputePipeline computePipeline); + export fn wgpuComputePipelineReference(compute_pipeline: *gpu.ComputePipeline) void { + T.computePipelineReference(compute_pipeline); + } + + // WGPU_EXPORT void wgpuComputePipelineRelease(WGPUComputePipeline computePipeline); + export fn wgpuComputePipelineRelease(compute_pipeline: *gpu.ComputePipeline) void { + T.computePipelineRelease(compute_pipeline); + } + + // WGPU_EXPORT WGPUBindGroup wgpuDeviceCreateBindGroup(WGPUDevice device, WGPUBindGroupDescriptor const * descriptor); + export fn wgpuDeviceCreateBindGroup(device: *gpu.Device, descriptor: *const gpu.BindGroup.Descriptor) *gpu.BindGroup { + return T.deviceCreateBindGroup(device, descriptor); + } + + // WGPU_EXPORT WGPUBindGroupLayout wgpuDeviceCreateBindGroupLayout(WGPUDevice device, WGPUBindGroupLayout.Descriptor const * descriptor); + export fn wgpuDeviceCreateBindGroupLayout(device: *gpu.Device, descriptor: *const gpu.BindGroupLayout.Descriptor) *gpu.BindGroupLayout { + return T.deviceCreateBindGroupLayout(device, descriptor); + } + + // WGPU_EXPORT WGPUBuffer wgpuDeviceCreateBuffer(WGPUDevice device, WGPUBuffer.Descriptor const * descriptor); + export fn wgpuDeviceCreateBuffer(device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) *gpu.Buffer { + return T.deviceCreateBuffer(device, descriptor); + } + + // WGPU_EXPORT WGPUCommandEncoder wgpuDeviceCreateCommandEncoder(WGPUDevice device, WGPUCommandEncoderDescriptor const * descriptor /* nullable */); + export fn wgpuDeviceCreateCommandEncoder(device: *gpu.Device, descriptor: ?*const gpu.CommandEncoder.Descriptor) *gpu.CommandEncoder { + return T.deviceCreateCommandEncoder(device, descriptor); + } + + // WGPU_EXPORT WGPUComputePipeline wgpuDeviceCreateComputePipeline(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor); + export fn wgpuDeviceCreateComputePipeline(device: *gpu.Device, descriptor: *const gpu.ComputePipeline.Descriptor) *gpu.ComputePipeline { + return T.deviceCreateComputePipeline(device, descriptor); + } + + // WGPU_EXPORT void wgpuDeviceCreateComputePipelineAsync(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor, WGPUCreateComputePipelineAsyncCallback callback, void * userdata); + export fn wgpuDeviceCreateComputePipelineAsync(device: *gpu.Device, descriptor: *const gpu.ComputePipeline.Descriptor, callback: gpu.CreateComputePipelineAsyncCallback, userdata: ?*anyopaque) void { + T.deviceCreateComputePipelineAsync(device, descriptor, callback, userdata); + } + + // WGPU_EXPORT WGPUBuffer wgpuDeviceCreateErrorBuffer(WGPUDevice device, WGPUBufferDescriptor const * descriptor); + export fn wgpuDeviceCreateErrorBuffer(device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) *gpu.Buffer { + return T.deviceCreateErrorBuffer(device, descriptor); + } + + // WGPU_EXPORT WGPUExternalTexture wgpuDeviceCreateErrorExternalTexture(WGPUDevice device); + export fn wgpuDeviceCreateErrorExternalTexture(device: *gpu.Device) *gpu.ExternalTexture { + return T.deviceCreateErrorExternalTexture(device); + } + + // WGPU_EXPORT WGPUTexture wgpuDeviceCreateErrorTexture(WGPUDevice device, WGPUTextureDescriptor const * descriptor); + export fn wgpuDeviceCreateErrorTexture(device: *gpu.Device, descriptor: *const gpu.Texture.Descriptor) *gpu.Texture { + return T.deviceCreateErrorTexture(device, descriptor); + } + + // WGPU_EXPORT WGPUExternalTexture wgpuDeviceCreateExternalTexture(WGPUDevice device, WGPUExternalTextureDescriptor const * externalTextureDescriptor); + export fn wgpuDeviceCreateExternalTexture(device: *gpu.Device, external_texture_descriptor: *const gpu.ExternalTexture.Descriptor) *gpu.ExternalTexture { + return T.deviceCreateExternalTexture(device, external_texture_descriptor); + } + + // WGPU_EXPORT WGPUPipelineLayout wgpuDeviceCreatePipelineLayout(WGPUDevice device, WGPUPipelineLayoutDescriptor const * descriptor); + export fn wgpuDeviceCreatePipelineLayout(device: *gpu.Device, pipeline_layout_descriptor: *const gpu.PipelineLayout.Descriptor) *gpu.PipelineLayout { + return T.deviceCreatePipelineLayout(device, pipeline_layout_descriptor); + } + + // WGPU_EXPORT WGPUQuerySet wgpuDeviceCreateQuerySet(WGPUDevice device, WGPUQuerySetDescriptor const * descriptor); + export fn wgpuDeviceCreateQuerySet(device: *gpu.Device, descriptor: *const gpu.QuerySet.Descriptor) *gpu.QuerySet { + return T.deviceCreateQuerySet(device, descriptor); + } + + // WGPU_EXPORT WGPURenderBundleEncoder wgpuDeviceCreateRenderBundleEncoder(WGPUDevice device, WGPURenderBundleEncoderDescriptor const * descriptor); + export fn wgpuDeviceCreateRenderBundleEncoder(device: *gpu.Device, descriptor: *const gpu.RenderBundleEncoder.Descriptor) *gpu.RenderBundleEncoder { + return T.deviceCreateRenderBundleEncoder(device, descriptor); + } + + // WGPU_EXPORT WGPURenderPipeline wgpuDeviceCreateRenderPipeline(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor); + export fn wgpuDeviceCreateRenderPipeline(device: *gpu.Device, descriptor: *const gpu.RenderPipeline.Descriptor) *gpu.RenderPipeline { + return T.deviceCreateRenderPipeline(device, descriptor); + } + + // WGPU_EXPORT void wgpuDeviceCreateRenderPipelineAsync(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor, WGPUCreateRenderPipelineAsyncCallback callback, void * userdata); + export fn wgpuDeviceCreateRenderPipelineAsync(device: *gpu.Device, descriptor: *const gpu.RenderPipeline.Descriptor, callback: gpu.CreateRenderPipelineAsyncCallback, userdata: ?*anyopaque) void { + T.deviceCreateRenderPipelineAsync(device, descriptor, callback, userdata); + } + + // WGPU_EXPORT WGPUSampler wgpuDeviceCreateSampler(WGPUDevice device, WGPUSamplerDescriptor const * descriptor /* nullable */); + export fn wgpuDeviceCreateSampler(device: *gpu.Device, descriptor: ?*const gpu.Sampler.Descriptor) *gpu.Sampler { + return T.deviceCreateSampler(device, descriptor); + } + + // WGPU_EXPORT WGPUShaderModule wgpuDeviceCreateShaderModule(WGPUDevice device, WGPUShaderModuleDescriptor const * descriptor); + export fn wgpuDeviceCreateShaderModule(device: *gpu.Device, descriptor: *const gpu.ShaderModule.Descriptor) *gpu.ShaderModule { + return T.deviceCreateShaderModule(device, descriptor); + } + + // WGPU_EXPORT WGPUSwapChain wgpuDeviceCreateSwapChain(WGPUDevice device, WGPUSurface surface /* nullable */, WGPUSwapChainDescriptor const * descriptor); + export fn wgpuDeviceCreateSwapChain(device: *gpu.Device, surface: ?*gpu.Surface, descriptor: *const gpu.SwapChain.Descriptor) *gpu.SwapChain { + return T.deviceCreateSwapChain(device, surface, descriptor); + } + + // WGPU_EXPORT WGPUTexture wgpuDeviceCreateTexture(WGPUDevice device, WGPUTextureDescriptor const * descriptor); + export fn wgpuDeviceCreateTexture(device: *gpu.Device, descriptor: *const gpu.Texture.Descriptor) *gpu.Texture { + return T.deviceCreateTexture(device, descriptor); + } + + // WGPU_EXPORT void wgpuDeviceDestroy(WGPUDevice device); + export fn wgpuDeviceDestroy(device: *gpu.Device) void { + T.deviceDestroy(device); + } + + // WGPU_EXPORT size_t wgpuDeviceEnumerateFeatures(WGPUDevice device, WGPUFeatureName * features); + export fn wgpuDeviceEnumerateFeatures(device: *gpu.Device, features: ?[*]gpu.FeatureName) usize { + return T.deviceEnumerateFeatures(device, features); + } + + // WGPU_EXPORT WGPUBool wgpuDeviceGetLimits(WGPUDevice device, WGPUSupportedLimits * limits); + export fn wgpuDeviceGetLimits(device: *gpu.Device, limits: *gpu.SupportedLimits) u32 { + return T.deviceGetLimits(device, limits); + } + + // WGPU_EXPORT WGPUSharedFence wgpuDeviceImportSharedFence(WGPUDevice device, WGPUSharedFenceDescriptor const * descriptor); + export fn wgpuDeviceImportSharedFence(device: *gpu.Device, descriptor: *const gpu.SharedFence.Descriptor) *gpu.SharedFence { + return T.deviceImportSharedFence(device, descriptor); + } + + // WGPU_EXPORT WGPUSharedTextureMemory wgpuDeviceImportSharedTextureMemory(WGPUDevice device, WGPUSharedTextureMemoryDescriptor const * descriptor); + export fn wgpuDeviceImportSharedTextureMemory(device: *gpu.Device, descriptor: *const gpu.SharedTextureMemory.Descriptor) *gpu.SharedTextureMemory { + return T.deviceImportSharedTextureMemory(device, descriptor); + } + + // WGPU_EXPORT WGPUQueue wgpuDeviceGetQueue(WGPUDevice device); + export fn wgpuDeviceGetQueue(device: *gpu.Device) *gpu.Queue { + return T.deviceGetQueue(device); + } + + // WGPU_EXPORT bool wgpuDeviceHasFeature(WGPUDevice device, WGPUFeatureName feature); + export fn wgpuDeviceHasFeature(device: *gpu.Device, feature: gpu.FeatureName) u32 { + return T.deviceHasFeature(device, feature); + } + + // WGPU_EXPORT void wgpuDeviceInjectError(WGPUDevice device, WGPUErrorType type, char const * message); + export fn wgpuDeviceInjectError(device: *gpu.Device, typ: gpu.ErrorType, message: [*:0]const u8) void { + T.deviceInjectError(device, typ, message); + } + + // WGPU_EXPORT void wgpuDevicePopErrorScope(WGPUDevice device, WGPUErrorCallback callback, void * userdata); + export fn wgpuDevicePopErrorScope(device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) void { + T.devicePopErrorScope(device, callback, userdata); + } + + // WGPU_EXPORT void wgpuDevicePushErrorScope(WGPUDevice device, WGPUErrorFilter filter); + export fn wgpuDevicePushErrorScope(device: *gpu.Device, filter: gpu.ErrorFilter) void { + T.devicePushErrorScope(device, filter); + } + + // TODO: dawn: callback not marked as nullable in dawn.json but in fact is. + // WGPU_EXPORT void wgpuDeviceSetDeviceLostCallback(WGPUDevice device, WGPUDeviceLostCallback callback, void * userdata); + export fn wgpuDeviceSetDeviceLostCallback(device: *gpu.Device, callback: ?gpu.Device.LostCallback, userdata: ?*anyopaque) void { + T.deviceSetDeviceLostCallback(device, callback, userdata); + } + + // WGPU_EXPORT void wgpuDeviceSetLabel(WGPUDevice device, char const * label); + export fn wgpuDeviceSetLabel(device: *gpu.Device, label: [*:0]const u8) void { + T.deviceSetLabel(device, label); + } + + // TODO: dawn: callback not marked as nullable in dawn.json but in fact is. + // WGPU_EXPORT void wgpuDeviceSetLoggingCallback(WGPUDevice device, WGPULoggingCallback callback, void * userdata); + export fn wgpuDeviceSetLoggingCallback(device: *gpu.Device, callback: ?gpu.LoggingCallback, userdata: ?*anyopaque) void { + T.deviceSetLoggingCallback(device, callback, userdata); + } + + // TODO: dawn: callback not marked as nullable in dawn.json but in fact is. + // WGPU_EXPORT void wgpuDeviceSetUncapturedErrorCallback(WGPUDevice device, WGPUErrorCallback callback, void * userdata); + export fn wgpuDeviceSetUncapturedErrorCallback(device: *gpu.Device, callback: ?gpu.ErrorCallback, userdata: ?*anyopaque) void { + T.deviceSetUncapturedErrorCallback(device, callback, userdata); + } + + // WGPU_EXPORT void wgpuDeviceTick(WGPUDevice device); + export fn wgpuDeviceTick(device: *gpu.Device) void { + T.deviceTick(device); + } + + // WGPU_EXPORT void wgpuMachDeviceWaitForCommandsToBeScheduled(WGPUDevice device); + export fn wgpuMachDeviceWaitForCommandsToBeScheduled(device: *gpu.Device) void { + T.machDeviceWaitForCommandsToBeScheduled(device); + } + + // WGPU_EXPORT void wgpuDeviceReference(WGPUDevice device); + export fn wgpuDeviceReference(device: *gpu.Device) void { + T.deviceReference(device); + } + + // WGPU_EXPORT void wgpuDeviceRelease(WGPUDevice device); + export fn wgpuDeviceRelease(device: *gpu.Device) void { + T.deviceRelease(device); + } + + // WGPU_EXPORT void wgpuExternalTextureDestroy(WGPUExternalTexture externalTexture); + export fn wgpuExternalTextureDestroy(external_texture: *gpu.ExternalTexture) void { + T.externalTextureDestroy(external_texture); + } + + // WGPU_EXPORT void wgpuExternalTextureSetLabel(WGPUExternalTexture externalTexture, char const * label); + export fn wgpuExternalTextureSetLabel(external_texture: *gpu.ExternalTexture, label: [*:0]const u8) void { + T.externalTextureSetLabel(external_texture, label); + } + + // WGPU_EXPORT void wgpuExternalTextureReference(WGPUExternalTexture externalTexture); + export fn wgpuExternalTextureReference(external_texture: *gpu.ExternalTexture) void { + T.externalTextureReference(external_texture); + } + + // WGPU_EXPORT void wgpuExternalTextureRelease(WGPUExternalTexture externalTexture); + export fn wgpuExternalTextureRelease(external_texture: *gpu.ExternalTexture) void { + T.externalTextureRelease(external_texture); + } + + // WGPU_EXPORT WGPUSurface wgpuInstanceCreateSurface(WGPUInstance instance, WGPUSurfaceDescriptor const * descriptor); + export fn wgpuInstanceCreateSurface(instance: *gpu.Instance, descriptor: *const gpu.Surface.Descriptor) *gpu.Surface { + return T.instanceCreateSurface(instance, descriptor); + } + + // WGPU_EXPORT void instanceProcessEvents(WGPUInstance instance); + export fn wgpuInstanceProcessEvents(instance: *gpu.Instance) void { + T.instanceProcessEvents(instance); + } + + // WGPU_EXPORT void wgpuInstanceRequestAdapter(WGPUInstance instance, WGPURequestAdapterOptions const * options /* nullable */, WGPURequestAdapterCallback callback, void * userdata); + export fn wgpuInstanceRequestAdapter(instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) void { + T.instanceRequestAdapter(instance, options, callback, userdata); + } + + // WGPU_EXPORT void wgpuInstanceReference(WGPUInstance instance); + export fn wgpuInstanceReference(instance: *gpu.Instance) void { + T.instanceReference(instance); + } + + // WGPU_EXPORT void wgpuInstanceRelease(WGPUInstance instance); + export fn wgpuInstanceRelease(instance: *gpu.Instance) void { + T.instanceRelease(instance); + } + + // WGPU_EXPORT void wgpuPipelineLayoutSetLabel(WGPUPipelineLayout pipelineLayout, char const * label); + export fn wgpuPipelineLayoutSetLabel(pipeline_layout: *gpu.PipelineLayout, label: [*:0]const u8) void { + T.pipelineLayoutSetLabel(pipeline_layout, label); + } + + // WGPU_EXPORT void wgpuPipelineLayoutReference(WGPUPipelineLayout pipelineLayout); + export fn wgpuPipelineLayoutReference(pipeline_layout: *gpu.PipelineLayout) void { + T.pipelineLayoutReference(pipeline_layout); + } + + // WGPU_EXPORT void wgpuPipelineLayoutRelease(WGPUPipelineLayout pipelineLayout); + export fn wgpuPipelineLayoutRelease(pipeline_layout: *gpu.PipelineLayout) void { + T.pipelineLayoutRelease(pipeline_layout); + } + + // WGPU_EXPORT void wgpuQuerySetDestroy(WGPUQuerySet querySet); + export fn wgpuQuerySetDestroy(query_set: *gpu.QuerySet) void { + T.querySetDestroy(query_set); + } + + // WGPU_EXPORT uint32_t wgpuQuerySetGetCount(WGPUQuerySet querySet); + export fn wgpuQuerySetGetCount(query_set: *gpu.QuerySet) u32 { + return T.querySetGetCount(query_set); + } + + // WGPU_EXPORT WGPUQueryType wgpuQuerySetGetType(WGPUQuerySet querySet); + export fn wgpuQuerySetGetType(query_set: *gpu.QuerySet) gpu.QueryType { + return T.querySetGetType(query_set); + } + + // WGPU_EXPORT void wgpuQuerySetSetLabel(WGPUQuerySet querySet, char const * label); + export fn wgpuQuerySetSetLabel(query_set: *gpu.QuerySet, label: [*:0]const u8) void { + T.querySetSetLabel(query_set, label); + } + + // WGPU_EXPORT void wgpuQuerySetReference(WGPUQuerySet querySet); + export fn wgpuQuerySetReference(query_set: *gpu.QuerySet) void { + T.querySetReference(query_set); + } + + // WGPU_EXPORT void wgpuQuerySetRelease(WGPUQuerySet querySet); + export fn wgpuQuerySetRelease(query_set: *gpu.QuerySet) void { + T.querySetRelease(query_set); + } + + // WGPU_EXPORT void wgpuQueueCopyTextureForBrowser(WGPUQueue queue, WGPUImageCopyTexture const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize, WGPUCopyTextureForBrowserOptions const * options); + export fn wgpuQueueCopyTextureForBrowser(queue: *gpu.Queue, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D, options: *const gpu.CopyTextureForBrowserOptions) void { + T.queueCopyTextureForBrowser(queue, source, destination, copy_size, options); + } + + // WGPU_EXPORT void wgpuQueueOnSubmittedWorkDone(WGPUQueue queue, uint64_t signalValue, WGPUQueueWorkDoneCallback callback, void * userdata); + export fn wgpuQueueOnSubmittedWorkDone(queue: *gpu.Queue, signal_value: u64, callback: gpu.Queue.WorkDoneCallback, userdata: ?*anyopaque) void { + T.queueOnSubmittedWorkDone(queue, signal_value, callback, userdata); + } + + // WGPU_EXPORT void wgpuQueueSetLabel(WGPUQueue queue, char const * label); + export fn wgpuQueueSetLabel(queue: *gpu.Queue, label: [*:0]const u8) void { + T.queueSetLabel(queue, label); + } + + // WGPU_EXPORT void wgpuQueueSubmit(WGPUQueue queue, size_t commandCount, WGPUCommandBuffer const * commands); + export fn wgpuQueueSubmit(queue: *gpu.Queue, command_count: usize, commands: [*]const *const gpu.CommandBuffer) void { + T.queueSubmit(queue, command_count, commands); + } + + // WGPU_EXPORT void wgpuQueueWriteBuffer(WGPUQueue queue, WGPUBuffer buffer, uint64_t bufferOffset, void const * data, size_t size); + export fn wgpuQueueWriteBuffer(queue: *gpu.Queue, buffer: *gpu.Buffer, buffer_offset: u64, data: *const anyopaque, size: usize) void { + T.queueWriteBuffer(queue, buffer, buffer_offset, data, size); + } + + // WGPU_EXPORT void wgpuQueueWriteTexture(WGPUQueue queue, WGPUImageCopyTexture const * destination, void const * data, size_t dataSize, WGPUTextureDataLayout const * dataLayout, WGPUExtent3D const * writeSize); + export fn wgpuQueueWriteTexture(queue: *gpu.Queue, destination: *const gpu.ImageCopyTexture, data: *const anyopaque, data_size: usize, data_layout: *const gpu.Texture.DataLayout, write_size: *const gpu.Extent3D) void { + T.queueWriteTexture(queue, destination, data, data_size, data_layout, write_size); + } + + // WGPU_EXPORT void wgpuQueueReference(WGPUQueue queue); + export fn wgpuQueueReference(queue: *gpu.Queue) void { + T.queueReference(queue); + } + + // WGPU_EXPORT void wgpuQueueRelease(WGPUQueue queue); + export fn wgpuQueueRelease(queue: *gpu.Queue) void { + T.queueRelease(queue); + } + + // WGPU_EXPORT void wgpuRenderBundleSetLabel(WGPURenderBundle renderBundle, char const * label); + export fn wgpuRenderBundleSetLabel(render_bundle: *gpu.RenderBundle, label: [*:0]const u8) void { + T.renderBundleSetLabel(render_bundle, label); + } + + // WGPU_EXPORT void wgpuRenderBundleReference(WGPURenderBundle renderBundle); + export fn wgpuRenderBundleReference(render_bundle: *gpu.RenderBundle) void { + T.renderBundleReference(render_bundle); + } + + // WGPU_EXPORT void wgpuRenderBundleRelease(WGPURenderBundle renderBundle); + export fn wgpuRenderBundleRelease(render_bundle: *gpu.RenderBundle) void { + T.renderBundleRelease(render_bundle); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderDraw(WGPURenderBundleEncoder renderBundleEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); + export fn wgpuRenderBundleEncoderDraw(render_bundle_encoder: *gpu.RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { + T.renderBundleEncoderDraw(render_bundle_encoder, vertex_count, instance_count, first_vertex, first_instance); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndexed(WGPURenderBundleEncoder renderBundleEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance); + export fn wgpuRenderBundleEncoderDrawIndexed(render_bundle_encoder: *gpu.RenderBundleEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { + T.renderBundleEncoderDrawIndexed(render_bundle_encoder, index_count, instance_count, first_index, base_vertex, first_instance); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndexedIndirect(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset); + export fn wgpuRenderBundleEncoderDrawIndexedIndirect(render_bundle_encoder: *gpu.RenderBundleEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + T.renderBundleEncoderDrawIndexedIndirect(render_bundle_encoder, indirect_buffer, indirect_offset); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndirect(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset); + export fn wgpuRenderBundleEncoderDrawIndirect(render_bundle_encoder: *gpu.RenderBundleEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + T.renderBundleEncoderDrawIndirect(render_bundle_encoder, indirect_buffer, indirect_offset); + } + + // WGPU_EXPORT WGPURenderBundle wgpuRenderBundleEncoderFinish(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderBundleDescriptor const * descriptor /* nullable */); + export fn wgpuRenderBundleEncoderFinish(render_bundle_encoder: *gpu.RenderBundleEncoder, descriptor: ?*const gpu.RenderBundle.Descriptor) *gpu.RenderBundle { + return T.renderBundleEncoderFinish(render_bundle_encoder, descriptor); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderInsertDebugMarker(WGPURenderBundleEncoder renderBundleEncoder, char const * markerLabel); + export fn wgpuRenderBundleEncoderInsertDebugMarker(render_bundle_encoder: *gpu.RenderBundleEncoder, marker_label: [*:0]const u8) void { + T.renderBundleEncoderInsertDebugMarker(render_bundle_encoder, marker_label); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderPopDebugGroup(WGPURenderBundleEncoder renderBundleEncoder); + export fn wgpuRenderBundleEncoderPopDebugGroup(render_bundle_encoder: *gpu.RenderBundleEncoder) void { + T.renderBundleEncoderPopDebugGroup(render_bundle_encoder); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderPushDebugGroup(WGPURenderBundleEncoder renderBundleEncoder, char const * groupLabel); + export fn wgpuRenderBundleEncoderPushDebugGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_label: [*:0]const u8) void { + T.renderBundleEncoderPushDebugGroup(render_bundle_encoder, group_label); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderSetBindGroup(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets); + export fn wgpuRenderBundleEncoderSetBindGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { + T.renderBundleEncoderSetBindGroup(render_bundle_encoder, group_index, group, dynamic_offset_count, dynamic_offsets); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderSetIndexBuffer(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size); + export fn wgpuRenderBundleEncoderSetIndexBuffer(render_bundle_encoder: *gpu.RenderBundleEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) void { + T.renderBundleEncoderSetIndexBuffer(render_bundle_encoder, buffer, format, offset, size); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderSetLabel(WGPURenderBundleEncoder renderBundleEncoder, char const * label); + export fn wgpuRenderBundleEncoderSetLabel(render_bundle_encoder: *gpu.RenderBundleEncoder, label: [*:0]const u8) void { + T.renderBundleEncoderSetLabel(render_bundle_encoder, label); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderSetPipeline(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderPipeline pipeline); + export fn wgpuRenderBundleEncoderSetPipeline(render_bundle_encoder: *gpu.RenderBundleEncoder, pipeline: *gpu.RenderPipeline) void { + T.renderBundleEncoderSetPipeline(render_bundle_encoder, pipeline); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderSetVertexBuffer(WGPURenderBundleEncoder renderBundleEncoder, uint32_t slot, WGPUBuffer buffer, uint64_t offset, uint64_t size); + export fn wgpuRenderBundleEncoderSetVertexBuffer(render_bundle_encoder: *gpu.RenderBundleEncoder, slot: u32, buffer: *gpu.Buffer, offset: u64, size: u64) void { + T.renderBundleEncoderSetVertexBuffer(render_bundle_encoder, slot, buffer, offset, size); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderReference(WGPURenderBundleEncoder renderBundleEncoder); + export fn wgpuRenderBundleEncoderReference(render_bundle_encoder: *gpu.RenderBundleEncoder) void { + T.renderBundleEncoderReference(render_bundle_encoder); + } + + // WGPU_EXPORT void wgpuRenderBundleEncoderRelease(WGPURenderBundleEncoder renderBundleEncoder); + export fn wgpuRenderBundleEncoderRelease(render_bundle_encoder: *gpu.RenderBundleEncoder) void { + T.renderBundleEncoderRelease(render_bundle_encoder); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderBeginOcclusionQuery(WGPURenderPassEncoder renderPassEncoder, uint32_t queryIndex); + export fn wgpuRenderPassEncoderBeginOcclusionQuery(render_pass_encoder: *gpu.RenderPassEncoder, query_index: u32) void { + T.renderPassEncoderBeginOcclusionQuery(render_pass_encoder, query_index); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderDraw(WGPURenderPassEncoder renderPassEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); + export fn wgpuRenderPassEncoderDraw(render_pass_encoder: *gpu.RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { + T.renderPassEncoderDraw(render_pass_encoder, vertex_count, instance_count, first_vertex, first_instance); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderDrawIndexed(WGPURenderPassEncoder renderPassEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance); + export fn wgpuRenderPassEncoderDrawIndexed(render_pass_encoder: *gpu.RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { + T.renderPassEncoderDrawIndexed(render_pass_encoder, index_count, instance_count, first_index, base_vertex, first_instance); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderDrawIndexedIndirect(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset); + export fn wgpuRenderPassEncoderDrawIndexedIndirect(render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + T.renderPassEncoderDrawIndexedIndirect(render_pass_encoder, indirect_buffer, indirect_offset); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderDrawIndirect(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset); + export fn wgpuRenderPassEncoderDrawIndirect(render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + T.renderPassEncoderDrawIndirect(render_pass_encoder, indirect_buffer, indirect_offset); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderEnd(WGPURenderPassEncoder renderPassEncoder); + export fn wgpuRenderPassEncoderEnd(render_pass_encoder: *gpu.RenderPassEncoder) void { + T.renderPassEncoderEnd(render_pass_encoder); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderEndOcclusionQuery(WGPURenderPassEncoder renderPassEncoder); + export fn wgpuRenderPassEncoderEndOcclusionQuery(render_pass_encoder: *gpu.RenderPassEncoder) void { + T.renderPassEncoderEndOcclusionQuery(render_pass_encoder); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderExecuteBundles(WGPURenderPassEncoder renderPassEncoder, size_t bundleCount, WGPURenderBundle const * bundles); + export fn wgpuRenderPassEncoderExecuteBundles(render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const gpu.RenderBundle) void { + T.renderPassEncoderExecuteBundles(render_pass_encoder, bundles_count, bundles); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderInsertDebugMarker(WGPURenderPassEncoder renderPassEncoder, char const * markerLabel); + export fn wgpuRenderPassEncoderInsertDebugMarker(render_pass_encoder: *gpu.RenderPassEncoder, marker_label: [*:0]const u8) void { + T.renderPassEncoderInsertDebugMarker(render_pass_encoder, marker_label); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderPopDebugGroup(WGPURenderPassEncoder renderPassEncoder); + export fn wgpuRenderPassEncoderPopDebugGroup(render_pass_encoder: *gpu.RenderPassEncoder) void { + T.renderPassEncoderPopDebugGroup(render_pass_encoder); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderPushDebugGroup(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel); + export fn wgpuRenderPassEncoderPushDebugGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_label: [*:0]const u8) void { + T.renderPassEncoderPushDebugGroup(render_pass_encoder, group_label); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderSetBindGroup(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets); + export fn wgpuRenderPassEncoderSetBindGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { + T.renderPassEncoderSetBindGroup(render_pass_encoder, group_index, group, dynamic_offset_count, dynamic_offsets); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderSetBlendConstant(WGPURenderPassEncoder renderPassEncoder, WGPUColor const * color); + export fn wgpuRenderPassEncoderSetBlendConstant(render_pass_encoder: *gpu.RenderPassEncoder, color: *const gpu.Color) void { + T.renderPassEncoderSetBlendConstant(render_pass_encoder, color); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderSetIndexBuffer(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size); + export fn wgpuRenderPassEncoderSetIndexBuffer(render_pass_encoder: *gpu.RenderPassEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) void { + T.renderPassEncoderSetIndexBuffer(render_pass_encoder, buffer, format, offset, size); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderSetLabel(WGPURenderPassEncoder renderPassEncoder, char const * label); + export fn wgpuRenderPassEncoderSetLabel(render_pass_encoder: *gpu.RenderPassEncoder, label: [*:0]const u8) void { + T.renderPassEncoderSetLabel(render_pass_encoder, label); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderSetPipeline(WGPURenderPassEncoder renderPassEncoder, WGPURenderPipeline pipeline); + export fn wgpuRenderPassEncoderSetPipeline(render_pass_encoder: *gpu.RenderPassEncoder, pipeline: *gpu.RenderPipeline) void { + T.renderPassEncoderSetPipeline(render_pass_encoder, pipeline); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderSetScissorRect(WGPURenderPassEncoder renderPassEncoder, uint32_t x, uint32_t y, uint32_t width, uint32_t height); + export fn wgpuRenderPassEncoderSetScissorRect(render_pass_encoder: *gpu.RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) void { + T.renderPassEncoderSetScissorRect(render_pass_encoder, x, y, width, height); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderSetStencilReference(WGPURenderPassEncoder renderPassEncoder, uint32_t reference); + export fn wgpuRenderPassEncoderSetStencilReference(render_pass_encoder: *gpu.RenderPassEncoder, reference: u32) void { + T.renderPassEncoderSetStencilReference(render_pass_encoder, reference); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderSetVertexBuffer(WGPURenderPassEncoder renderPassEncoder, uint32_t slot, WGPUBuffer buffer, uint64_t offset, uint64_t size); + export fn wgpuRenderPassEncoderSetVertexBuffer(render_pass_encoder: *gpu.RenderPassEncoder, slot: u32, buffer: *gpu.Buffer, offset: u64, size: u64) void { + T.renderPassEncoderSetVertexBuffer(render_pass_encoder, slot, buffer, offset, size); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderSetViewport(WGPURenderPassEncoder renderPassEncoder, float x, float y, float width, float height, float minDepth, float maxDepth); + export fn wgpuRenderPassEncoderSetViewport(render_pass_encoder: *gpu.RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32) void { + T.renderPassEncoderSetViewport(render_pass_encoder, x, y, width, height, min_depth, max_depth); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderWriteTimestamp(WGPURenderPassEncoder renderPassEncoder, WGPUQuerySet querySet, uint32_t queryIndex); + export fn wgpuRenderPassEncoderWriteTimestamp(render_pass_encoder: *gpu.RenderPassEncoder, query_set: *gpu.QuerySet, query_index: u32) void { + T.renderPassEncoderWriteTimestamp(render_pass_encoder, query_set, query_index); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderReference(WGPURenderPassEncoder renderPassEncoder); + export fn wgpuRenderPassEncoderReference(render_pass_encoder: *gpu.RenderPassEncoder) void { + T.renderPassEncoderReference(render_pass_encoder); + } + + // WGPU_EXPORT void wgpuRenderPassEncoderRelease(WGPURenderPassEncoder renderPassEncoder); + export fn wgpuRenderPassEncoderRelease(render_pass_encoder: *gpu.RenderPassEncoder) void { + T.renderPassEncoderRelease(render_pass_encoder); + } + + // WGPU_EXPORT WGPUBindGroupLayout wgpuRenderPipelineGetBindGroupLayout(WGPURenderPipeline renderPipeline, uint32_t groupIndex); + export fn wgpuRenderPipelineGetBindGroupLayout(render_pipeline: *gpu.RenderPipeline, group_index: u32) *gpu.BindGroupLayout { + return T.renderPipelineGetBindGroupLayout(render_pipeline, group_index); + } + + // WGPU_EXPORT void wgpuRenderPipelineSetLabel(WGPURenderPipeline renderPipeline, char const * label); + export fn wgpuRenderPipelineSetLabel(render_pipeline: *gpu.RenderPipeline, label: [*:0]const u8) void { + T.renderPipelineSetLabel(render_pipeline, label); + } + + // WGPU_EXPORT void wgpuRenderPipelineReference(WGPURenderPipeline renderPipeline); + export fn wgpuRenderPipelineReference(render_pipeline: *gpu.RenderPipeline) void { + T.renderPipelineReference(render_pipeline); + } + + // WGPU_EXPORT void wgpuRenderPipelineRelease(WGPURenderPipeline renderPipeline); + export fn wgpuRenderPipelineRelease(render_pipeline: *gpu.RenderPipeline) void { + T.renderPipelineRelease(render_pipeline); + } + + // WGPU_EXPORT void wgpuSamplerSetLabel(WGPUSampler sampler, char const * label); + export fn wgpuSamplerSetLabel(sampler: *gpu.Sampler, label: [*:0]const u8) void { + T.samplerSetLabel(sampler, label); + } + + // WGPU_EXPORT void wgpuSamplerReference(WGPUSampler sampler); + export fn wgpuSamplerReference(sampler: *gpu.Sampler) void { + T.samplerReference(sampler); + } + + // WGPU_EXPORT void wgpuSamplerRelease(WGPUSampler sampler); + export fn wgpuSamplerRelease(sampler: *gpu.Sampler) void { + T.samplerRelease(sampler); + } + + // WGPU_EXPORT void wgpuShaderModuleGetCompilationInfo(WGPUShaderModule shaderModule, WGPUCompilationInfoCallback callback, void * userdata); + export fn wgpuShaderModuleGetCompilationInfo(shader_module: *gpu.ShaderModule, callback: gpu.CompilationInfoCallback, userdata: ?*anyopaque) void { + T.shaderModuleGetCompilationInfo(shader_module, callback, userdata); + } + + // WGPU_EXPORT void wgpuShaderModuleSetLabel(WGPUShaderModule shaderModule, char const * label); + export fn wgpuShaderModuleSetLabel(shader_module: *gpu.ShaderModule, label: [*:0]const u8) void { + T.shaderModuleSetLabel(shader_module, label); + } + + // WGPU_EXPORT void wgpuShaderModuleReference(WGPUShaderModule shaderModule); + export fn wgpuShaderModuleReference(shader_module: *gpu.ShaderModule) void { + T.shaderModuleReference(shader_module); + } + + // WGPU_EXPORT void wgpuShaderModuleRelease(WGPUShaderModule shaderModule); + export fn wgpuShaderModuleRelease(shader_module: *gpu.ShaderModule) void { + T.shaderModuleRelease(shader_module); + } + + // WGPU_EXPORT void wgpuSharedFenceExportInfo(WGPUSharedFence sharedFence, WGPUSharedFenceExportInfo * info); + export fn wgpuSharedFenceExportInfo(shared_fence: *gpu.SharedFence, info: *gpu.SharedFence.ExportInfo) void { + T.sharedFenceExportInfo(shared_fence, info); + } + + // WGPU_EXPORT void wgpuSharedFenceReference(WGPUSharedFence sharedFence); + export fn wgpuSharedFenceReference(shared_fence: *gpu.SharedFence) void { + T.sharedFenceReference(shared_fence); + } + + // WGPU_EXPORT void wgpuSharedFenceRelease(WGPUSharedFence sharedFence); + export fn wgpuSharedFenceRelease(shared_fence: *gpu.SharedFence) void { + T.sharedFenceRelease(shared_fence); + } + + // WGPU_EXPORT void wgpuSharedTextureMemoryBeginAccess(WGPUSharedTextureMemory sharedTextureMemory, WGPUTexture texture, WGPUSharedTextureMemoryBeginAccessDescriptor const * descriptor); + export fn wgpuSharedTextureMemoryBeginAccess(shared_texture_memory: *gpu.SharedTextureMemory, texture: *gpu.Texture, descriptor: *const gpu.SharedTextureMemory.BeginAccessDescriptor) void { + T.sharedTextureMemoryBeginAccess(shared_texture_memory, texture, descriptor); + } + + // WGPU_EXPORT WGPUTexture wgpuSharedTextureMemoryCreateTexture(WGPUSharedTextureMemory sharedTextureMemory, WGPUTextureDescriptor const * descriptor); + export fn wgpuSharedTextureMemoryCreateTexture(shared_texture_memory: *gpu.SharedTextureMemory, descriptor: *const gpu.Texture.Descriptor) *gpu.Texture { + return T.sharedTextureMemoryCreateTexture(shared_texture_memory, descriptor); + } + + // WGPU_EXPORT void wgpuSharedTextureMemoryEndAccess(WGPUSharedTextureMemory sharedTextureMemory, WGPUTexture texture, WGPUSharedTextureMemoryEndAccessState * descriptor); + export fn wgpuSharedTextureMemoryEndAccess(shared_texture_memory: *gpu.SharedTextureMemory, texture: *gpu.Texture, descriptor: *gpu.SharedTextureMemory.EndAccessState) void { + T.sharedTextureMemoryEndAccess(shared_texture_memory, texture, descriptor); + } + + // WGPU_EXPORT void wgpuSharedTextureMemoryEndAccessStateFreeMembers(WGPUSharedTextureMemoryEndAccessState value); + export fn wgpuSharedTextureMemoryEndAccessStateFreeMembers(value: gpu.SharedTextureMemory.EndAccessState) void { + T.sharedTextureMemoryEndAccessStateFreeMembers(value); + } + + // WGPU_EXPORT void wgpuSharedTextureMemoryGetProperties(WGPUSharedTextureMemory sharedTextureMemory, WGPUSharedTextureMemoryProperties * properties); + export fn wgpuSharedTextureMemoryGetProperties(shared_texture_memory: *gpu.SharedTextureMemory, properties: *gpu.SharedTextureMemory.Properties) void { + T.sharedTextureMemoryGetProperties(shared_texture_memory, properties); + } + + // WGPU_EXPORT void wgpuSharedTextureMemorySetLabel(WGPUSharedTextureMemory sharedTextureMemory, char const * label); + export fn wgpuSharedTextureMemorySetLabel(shared_texture_memory: *gpu.SharedTextureMemory, label: [*:0]const u8) void { + T.sharedTextureMemorySetLabel(shared_texture_memory, label); + } + + // WGPU_EXPORT void wgpuSharedTextureMemoryReference(WGPUSharedTextureMemory sharedTextureMemory); + export fn wgpuSharedTextureMemoryReference(shared_texture_memory: *gpu.SharedTextureMemory) void { + T.sharedTextureMemoryReference(shared_texture_memory); + } + + // WGPU_EXPORT void wgpuSharedTextureMemoryRelease(WGPUSharedTextureMemory sharedTextureMemory); + export fn wgpuSharedTextureMemoryRelease(shared_texture_memory: *gpu.SharedTextureMemory) void { + T.sharedTextureMemoryRelease(shared_texture_memory); + } + + // WGPU_EXPORT void wgpuSurfaceReference(WGPUSurface surface); + export fn wgpuSurfaceReference(surface: *gpu.Surface) void { + T.surfaceReference(surface); + } + + // WGPU_EXPORT void wgpuSurfaceRelease(WGPUSurface surface); + export fn wgpuSurfaceRelease(surface: *gpu.Surface) void { + T.surfaceRelease(surface); + } + + // WGPU_EXPORT WGPUTexture wgpuSwapChainGetCurrentTexture(WGPUSwapChain swapChain); + export fn wgpuSwapChainGetCurrentTexture(swap_chain: *gpu.SwapChain) ?*gpu.Texture { + return T.swapChainGetCurrentTexture(swap_chain); + } + + // WGPU_EXPORT WGPUTextureView wgpuSwapChainGetCurrentTextureView(WGPUSwapChain swapChain); + export fn wgpuSwapChainGetCurrentTextureView(swap_chain: *gpu.SwapChain) ?*gpu.TextureView { + return T.swapChainGetCurrentTextureView(swap_chain); + } + + // WGPU_EXPORT void wgpuSwapChainPresent(WGPUSwapChain swapChain); + export fn wgpuSwapChainPresent(swap_chain: *gpu.SwapChain) void { + T.swapChainPresent(swap_chain); + } + + // WGPU_EXPORT void wgpuSwapChainReference(WGPUSwapChain swapChain); + export fn wgpuSwapChainReference(swap_chain: *gpu.SwapChain) void { + T.swapChainReference(swap_chain); + } + + // WGPU_EXPORT void wgpuSwapChainRelease(WGPUSwapChain swapChain); + export fn wgpuSwapChainRelease(swap_chain: *gpu.SwapChain) void { + T.swapChainRelease(swap_chain); + } + + // WGPU_EXPORT WGPUTextureView wgpuTextureCreateView(WGPUTexture texture, WGPUTextureViewDescriptor const * descriptor /* nullable */); + export fn wgpuTextureCreateView(texture: *gpu.Texture, descriptor: ?*const gpu.TextureView.Descriptor) *gpu.TextureView { + return T.textureCreateView(texture, descriptor); + } + + // WGPU_EXPORT void wgpuTextureDestroy(WGPUTexture texture); + export fn wgpuTextureDestroy(texture: *gpu.Texture) void { + T.textureDestroy(texture); + } + + // WGPU_EXPORT uint32_t wgpuTextureGetDepthOrArrayLayers(WGPUTexture texture); + export fn wgpuTextureGetDepthOrArrayLayers(texture: *gpu.Texture) u32 { + return T.textureGetDepthOrArrayLayers(texture); + } + + // WGPU_EXPORT WGPUTextureDimension wgpuTextureGetDimension(WGPUTexture texture); + export fn wgpuTextureGetDimension(texture: *gpu.Texture) gpu.Texture.Dimension { + return T.textureGetDimension(texture); + } + + // WGPU_EXPORT WGPUTextureFormat wgpuTextureGetFormat(WGPUTexture texture); + export fn wgpuTextureGetFormat(texture: *gpu.Texture) gpu.Texture.Format { + return T.textureGetFormat(texture); + } + + // WGPU_EXPORT uint32_t wgpuTextureGetHeight(WGPUTexture texture); + export fn wgpuTextureGetHeight(texture: *gpu.Texture) u32 { + return T.textureGetHeight(texture); + } + + // WGPU_EXPORT uint32_t wgpuTextureGetMipLevelCount(WGPUTexture texture); + export fn wgpuTextureGetMipLevelCount(texture: *gpu.Texture) u32 { + return T.textureGetMipLevelCount(texture); + } + + // WGPU_EXPORT uint32_t wgpuTextureGetSampleCount(WGPUTexture texture); + export fn wgpuTextureGetSampleCount(texture: *gpu.Texture) u32 { + return T.textureGetSampleCount(texture); + } + + // WGPU_EXPORT WGPUTextureUsage wgpuTextureGetUsage(WGPUTexture texture); + export fn wgpuTextureGetUsage(texture: *gpu.Texture) gpu.Texture.UsageFlags { + return T.textureGetUsage(texture); + } + + // WGPU_EXPORT uint32_t wgpuTextureGetWidth(WGPUTexture texture); + export fn wgpuTextureGetWidth(texture: *gpu.Texture) u32 { + return T.textureGetWidth(texture); + } + + // WGPU_EXPORT void wgpuTextureSetLabel(WGPUTexture texture, char const * label); + export fn wgpuTextureSetLabel(texture: *gpu.Texture, label: [*:0]const u8) void { + T.textureSetLabel(texture, label); + } + + // WGPU_EXPORT void wgpuTextureReference(WGPUTexture texture); + export fn wgpuTextureReference(texture: *gpu.Texture) void { + T.textureReference(texture); + } + + // WGPU_EXPORT void wgpuTextureRelease(WGPUTexture texture); + export fn wgpuTextureRelease(texture: *gpu.Texture) void { + T.textureRelease(texture); + } + + // WGPU_EXPORT void wgpuTextureViewSetLabel(WGPUTextureView textureView, char const * label); + export fn wgpuTextureViewSetLabel(texture_view: *gpu.TextureView, label: [*:0]const u8) void { + T.textureViewSetLabel(texture_view, label); + } + + // WGPU_EXPORT void wgpuTextureViewReference(WGPUTextureView textureView); + export fn wgpuTextureViewReference(texture_view: *gpu.TextureView) void { + T.textureViewReference(texture_view); + } + + // WGPU_EXPORT void wgpuTextureViewRelease(WGPUTextureView textureView); + export fn wgpuTextureViewRelease(texture_view: *gpu.TextureView) void { + T.textureViewRelease(texture_view); + } + }; +} + +/// A stub gpu.Interface in which every function is implemented by `unreachable;` +pub const StubInterface = Interface(struct { + pub inline fn createInstance(descriptor: ?*const gpu.Instance.Descriptor) ?*gpu.Instance { + _ = descriptor; + unreachable; + } + + pub inline fn getProcAddress(device: *gpu.Device, proc_name: [*:0]const u8) ?gpu.Proc { + _ = device; + _ = proc_name; + unreachable; + } + + pub inline fn adapterCreateDevice(adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor) ?*gpu.Device { + _ = adapter; + _ = descriptor; + unreachable; + } + + pub inline fn adapterEnumerateFeatures(adapter: *gpu.Adapter, features: ?[*]gpu.FeatureName) usize { + _ = adapter; + _ = features; + unreachable; + } + + pub inline fn adapterGetInstance(adapter: *gpu.Adapter) *gpu.Instance { + _ = adapter; + unreachable; + } + + pub inline fn adapterGetLimits(adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) u32 { + _ = adapter; + _ = limits; + unreachable; + } + + pub inline fn adapterGetProperties(adapter: *gpu.Adapter, properties: *gpu.Adapter.Properties) void { + _ = adapter; + _ = properties; + unreachable; + } + + pub inline fn adapterHasFeature(adapter: *gpu.Adapter, feature: gpu.FeatureName) u32 { + _ = adapter; + _ = feature; + unreachable; + } + + pub inline fn adapterPropertiesFreeMembers(value: gpu.Adapter.Properties) void { + _ = value; + unreachable; + } + + pub inline fn adapterRequestDevice(adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor, callback: gpu.RequestDeviceCallback, userdata: ?*anyopaque) void { + _ = adapter; + _ = descriptor; + _ = callback; + _ = userdata; + unreachable; + } + + pub inline fn adapterReference(adapter: *gpu.Adapter) void { + _ = adapter; + unreachable; + } + + pub inline fn adapterRelease(adapter: *gpu.Adapter) void { + _ = adapter; + unreachable; + } + + pub inline fn bindGroupSetLabel(bind_group: *gpu.BindGroup, label: [*:0]const u8) void { + _ = bind_group; + _ = label; + unreachable; + } + + pub inline fn bindGroupReference(bind_group: *gpu.BindGroup) void { + _ = bind_group; + unreachable; + } + + pub inline fn bindGroupRelease(bind_group: *gpu.BindGroup) void { + _ = bind_group; + unreachable; + } + + pub inline fn bindGroupLayoutSetLabel(bind_group_layout: *gpu.BindGroupLayout, label: [*:0]const u8) void { + _ = bind_group_layout; + _ = label; + unreachable; + } + + pub inline fn bindGroupLayoutReference(bind_group_layout: *gpu.BindGroupLayout) void { + _ = bind_group_layout; + unreachable; + } + + pub inline fn bindGroupLayoutRelease(bind_group_layout: *gpu.BindGroupLayout) void { + _ = bind_group_layout; + unreachable; + } + + pub inline fn bufferDestroy(buffer: *gpu.Buffer) void { + _ = buffer; + unreachable; + } + + // TODO: dawn: return value not marked as nullable in dawn.json but in fact is. + pub inline fn bufferGetConstMappedRange(buffer: *gpu.Buffer, offset: usize, size: usize) ?*const anyopaque { + _ = buffer; + _ = offset; + _ = size; + unreachable; + } + + // TODO: dawn: return value not marked as nullable in dawn.json but in fact is. + pub inline fn bufferGetMappedRange(buffer: *gpu.Buffer, offset: usize, size: usize) ?*anyopaque { + _ = buffer; + _ = offset; + _ = size; + unreachable; + } + + pub inline fn bufferGetSize(buffer: *gpu.Buffer) u64 { + _ = buffer; + unreachable; + } + + pub inline fn bufferGetUsage(buffer: *gpu.Buffer) gpu.Buffer.UsageFlags { + _ = buffer; + unreachable; + } + + pub inline fn bufferMapAsync(buffer: *gpu.Buffer, mode: gpu.MapModeFlags, offset: usize, size: usize, callback: gpu.Buffer.MapCallback, userdata: ?*anyopaque) void { + _ = buffer; + _ = mode; + _ = offset; + _ = size; + _ = callback; + _ = userdata; + unreachable; + } + + pub inline fn bufferSetLabel(buffer: *gpu.Buffer, label: [*:0]const u8) void { + _ = buffer; + _ = label; + unreachable; + } + + pub inline fn bufferUnmap(buffer: *gpu.Buffer) void { + _ = buffer; + unreachable; + } + + pub inline fn bufferReference(buffer: *gpu.Buffer) void { + _ = buffer; + unreachable; + } + + pub inline fn bufferRelease(buffer: *gpu.Buffer) void { + _ = buffer; + unreachable; + } + + pub inline fn commandBufferSetLabel(command_buffer: *gpu.CommandBuffer, label: [*:0]const u8) void { + _ = command_buffer; + _ = label; + unreachable; + } + + pub inline fn commandBufferReference(command_buffer: *gpu.CommandBuffer) void { + _ = command_buffer; + unreachable; + } + + pub inline fn commandBufferRelease(command_buffer: *gpu.CommandBuffer) void { + _ = command_buffer; + unreachable; + } + + pub inline fn commandEncoderBeginComputePass(command_encoder: *gpu.CommandEncoder, descriptor: ?*const gpu.ComputePassDescriptor) *gpu.ComputePassEncoder { + _ = command_encoder; + _ = descriptor; + unreachable; + } + + pub inline fn commandEncoderBeginRenderPass(command_encoder: *gpu.CommandEncoder, descriptor: *const gpu.RenderPassDescriptor) *gpu.RenderPassEncoder { + _ = command_encoder; + _ = descriptor; + unreachable; + } + + pub inline fn commandEncoderClearBuffer(command_encoder: *gpu.CommandEncoder, buffer: *gpu.Buffer, offset: u64, size: u64) void { + _ = command_encoder; + _ = buffer; + _ = offset; + _ = size; + unreachable; + } + + pub inline fn commandEncoderCopyBufferToBuffer(command_encoder: *gpu.CommandEncoder, source: *gpu.Buffer, source_offset: u64, destination: *gpu.Buffer, destination_offset: u64, size: u64) void { + _ = command_encoder; + _ = source; + _ = source_offset; + _ = destination; + _ = destination_offset; + _ = size; + unreachable; + } + + pub inline fn commandEncoderCopyBufferToTexture(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyBuffer, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D) void { + _ = command_encoder; + _ = source; + _ = destination; + _ = copy_size; + unreachable; + } + + pub inline fn commandEncoderCopyTextureToBuffer(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyBuffer, copy_size: *const gpu.Extent3D) void { + _ = command_encoder; + _ = source; + _ = destination; + _ = copy_size; + unreachable; + } + + pub inline fn commandEncoderCopyTextureToTexture(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D) void { + _ = command_encoder; + _ = source; + _ = destination; + _ = copy_size; + unreachable; + } + + pub inline fn commandEncoderFinish(command_encoder: *gpu.CommandEncoder, descriptor: ?*const gpu.CommandBuffer.Descriptor) *gpu.CommandBuffer { + _ = command_encoder; + _ = descriptor; + unreachable; + } + + pub inline fn commandEncoderInjectValidationError(command_encoder: *gpu.CommandEncoder, message: [*:0]const u8) void { + _ = command_encoder; + _ = message; + unreachable; + } + + pub inline fn commandEncoderInsertDebugMarker(command_encoder: *gpu.CommandEncoder, marker_label: [*:0]const u8) void { + _ = command_encoder; + _ = marker_label; + unreachable; + } + + pub inline fn commandEncoderPopDebugGroup(command_encoder: *gpu.CommandEncoder) void { + _ = command_encoder; + unreachable; + } + + pub inline fn commandEncoderPushDebugGroup(command_encoder: *gpu.CommandEncoder, group_label: [*:0]const u8) void { + _ = command_encoder; + _ = group_label; + unreachable; + } + + pub inline fn commandEncoderResolveQuerySet(command_encoder: *gpu.CommandEncoder, query_set: *gpu.QuerySet, first_query: u32, query_count: u32, destination: *gpu.Buffer, destination_offset: u64) void { + _ = command_encoder; + _ = query_set; + _ = first_query; + _ = query_count; + _ = destination; + _ = destination_offset; + unreachable; + } + + pub inline fn commandEncoderSetLabel(command_encoder: *gpu.CommandEncoder, label: [*:0]const u8) void { + _ = command_encoder; + _ = label; + unreachable; + } + + pub inline fn commandEncoderWriteBuffer(command_encoder: *gpu.CommandEncoder, buffer: *gpu.Buffer, buffer_offset: u64, data: [*]const u8, size: u64) void { + _ = command_encoder; + _ = buffer; + _ = buffer_offset; + _ = data; + _ = size; + unreachable; + } + + pub inline fn commandEncoderWriteTimestamp(command_encoder: *gpu.CommandEncoder, query_set: *gpu.QuerySet, query_index: u32) void { + _ = command_encoder; + _ = query_set; + _ = query_index; + unreachable; + } + + pub inline fn commandEncoderReference(command_encoder: *gpu.CommandEncoder) void { + _ = command_encoder; + unreachable; + } + + pub inline fn commandEncoderRelease(command_encoder: *gpu.CommandEncoder) void { + _ = command_encoder; + unreachable; + } + + pub inline fn computePassEncoderDispatchWorkgroups(compute_pass_encoder: *gpu.ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) void { + _ = compute_pass_encoder; + _ = workgroup_count_x; + _ = workgroup_count_y; + _ = workgroup_count_z; + unreachable; + } + + pub inline fn computePassEncoderDispatchWorkgroupsIndirect(compute_pass_encoder: *gpu.ComputePassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + _ = compute_pass_encoder; + _ = indirect_buffer; + _ = indirect_offset; + unreachable; + } + + pub inline fn computePassEncoderEnd(compute_pass_encoder: *gpu.ComputePassEncoder) void { + _ = compute_pass_encoder; + unreachable; + } + + pub inline fn computePassEncoderInsertDebugMarker(compute_pass_encoder: *gpu.ComputePassEncoder, marker_label: [*:0]const u8) void { + _ = compute_pass_encoder; + _ = marker_label; + unreachable; + } + + pub inline fn computePassEncoderPopDebugGroup(compute_pass_encoder: *gpu.ComputePassEncoder) void { + _ = compute_pass_encoder; + unreachable; + } + + pub inline fn computePassEncoderPushDebugGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_label: [*:0]const u8) void { + _ = compute_pass_encoder; + _ = group_label; + unreachable; + } + + pub inline fn computePassEncoderSetBindGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { + _ = compute_pass_encoder; + _ = group_index; + _ = group; + _ = dynamic_offset_count; + _ = dynamic_offsets; + unreachable; + } + + pub inline fn computePassEncoderSetLabel(compute_pass_encoder: *gpu.ComputePassEncoder, label: [*:0]const u8) void { + _ = compute_pass_encoder; + _ = label; + unreachable; + } + + pub inline fn computePassEncoderSetPipeline(compute_pass_encoder: *gpu.ComputePassEncoder, pipeline: *gpu.ComputePipeline) void { + _ = compute_pass_encoder; + _ = pipeline; + unreachable; + } + + pub inline fn computePassEncoderWriteTimestamp(compute_pass_encoder: *gpu.ComputePassEncoder, query_set: *gpu.QuerySet, query_index: u32) void { + _ = compute_pass_encoder; + _ = query_set; + _ = query_index; + unreachable; + } + + pub inline fn computePassEncoderReference(compute_pass_encoder: *gpu.ComputePassEncoder) void { + _ = compute_pass_encoder; + unreachable; + } + + pub inline fn computePassEncoderRelease(compute_pass_encoder: *gpu.ComputePassEncoder) void { + _ = compute_pass_encoder; + unreachable; + } + + pub inline fn computePipelineGetBindGroupLayout(compute_pipeline: *gpu.ComputePipeline, group_index: u32) *gpu.BindGroupLayout { + _ = compute_pipeline; + _ = group_index; + unreachable; + } + + pub inline fn computePipelineSetLabel(compute_pipeline: *gpu.ComputePipeline, label: [*:0]const u8) void { + _ = compute_pipeline; + _ = label; + unreachable; + } + + pub inline fn computePipelineReference(compute_pipeline: *gpu.ComputePipeline) void { + _ = compute_pipeline; + unreachable; + } + + pub inline fn computePipelineRelease(compute_pipeline: *gpu.ComputePipeline) void { + _ = compute_pipeline; + unreachable; + } + + pub inline fn deviceCreateBindGroup(device: *gpu.Device, descriptor: *const gpu.BindGroup.Descriptor) *gpu.BindGroup { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateBindGroupLayout(device: *gpu.Device, descriptor: *const gpu.BindGroupLayout.Descriptor) *gpu.BindGroupLayout { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateBuffer(device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) *gpu.Buffer { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateCommandEncoder(device: *gpu.Device, descriptor: ?*const gpu.CommandEncoder.Descriptor) *gpu.CommandEncoder { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateComputePipeline(device: *gpu.Device, descriptor: *const gpu.ComputePipeline.Descriptor) *gpu.ComputePipeline { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateComputePipelineAsync(device: *gpu.Device, descriptor: *const gpu.ComputePipeline.Descriptor, callback: gpu.CreateComputePipelineAsyncCallback, userdata: ?*anyopaque) void { + _ = device; + _ = descriptor; + _ = callback; + _ = userdata; + unreachable; + } + + pub inline fn deviceCreateErrorBuffer(device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) *gpu.Buffer { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateErrorExternalTexture(device: *gpu.Device) *gpu.ExternalTexture { + _ = device; + unreachable; + } + + pub inline fn deviceCreateErrorTexture(device: *gpu.Device, descriptor: *const gpu.Texture.Descriptor) *gpu.Texture { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateExternalTexture(device: *gpu.Device, external_texture_descriptor: *const gpu.ExternalTexture.Descriptor) *gpu.ExternalTexture { + _ = device; + _ = external_texture_descriptor; + unreachable; + } + + pub inline fn deviceCreatePipelineLayout(device: *gpu.Device, pipeline_layout_descriptor: *const gpu.PipelineLayout.Descriptor) *gpu.PipelineLayout { + _ = device; + _ = pipeline_layout_descriptor; + unreachable; + } + + pub inline fn deviceCreateQuerySet(device: *gpu.Device, descriptor: *const gpu.QuerySet.Descriptor) *gpu.QuerySet { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateRenderBundleEncoder(device: *gpu.Device, descriptor: *const gpu.RenderBundleEncoder.Descriptor) *gpu.RenderBundleEncoder { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateRenderPipeline(device: *gpu.Device, descriptor: *const gpu.RenderPipeline.Descriptor) *gpu.RenderPipeline { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateRenderPipelineAsync(device: *gpu.Device, descriptor: *const gpu.RenderPipeline.Descriptor, callback: gpu.CreateRenderPipelineAsyncCallback, userdata: ?*anyopaque) void { + _ = device; + _ = descriptor; + _ = callback; + _ = userdata; + unreachable; + } + + // TODO(self-hosted): this cannot be marked as inline for some reason. + // https://github.com/ziglang/zig/issues/12545 + pub fn deviceCreateSampler(device: *gpu.Device, descriptor: ?*const gpu.Sampler.Descriptor) *gpu.Sampler { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateShaderModule(device: *gpu.Device, descriptor: *const gpu.ShaderModule.Descriptor) *gpu.ShaderModule { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateSwapChain(device: *gpu.Device, surface: ?*gpu.Surface, descriptor: *const gpu.SwapChain.Descriptor) *gpu.SwapChain { + _ = device; + _ = surface; + _ = descriptor; + unreachable; + } + + pub inline fn deviceCreateTexture(device: *gpu.Device, descriptor: *const gpu.Texture.Descriptor) *gpu.Texture { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceDestroy(device: *gpu.Device) void { + _ = device; + unreachable; + } + + pub inline fn deviceEnumerateFeatures(device: *gpu.Device, features: ?[*]gpu.FeatureName) usize { + _ = device; + _ = features; + unreachable; + } + + pub inline fn deviceGetLimits(device: *gpu.Device, limits: *gpu.SupportedLimits) u32 { + _ = device; + _ = limits; + unreachable; + } + + pub inline fn deviceGetQueue(device: *gpu.Device) *gpu.Queue { + _ = device; + unreachable; + } + + pub inline fn deviceHasFeature(device: *gpu.Device, feature: gpu.FeatureName) u32 { + _ = device; + _ = feature; + unreachable; + } + + pub inline fn deviceImportSharedFence(device: *gpu.Device, descriptor: *const gpu.SharedFence.Descriptor) *gpu.SharedFence { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceImportSharedTextureMemory(device: *gpu.Device, descriptor: *const gpu.SharedTextureMemory.Descriptor) *gpu.SharedTextureMemory { + _ = device; + _ = descriptor; + unreachable; + } + + pub inline fn deviceInjectError(device: *gpu.Device, typ: gpu.ErrorType, message: [*:0]const u8) void { + _ = device; + _ = typ; + _ = message; + unreachable; + } + + pub inline fn deviceLoseForTesting(device: *gpu.Device) void { + _ = device; + unreachable; + } + + pub inline fn devicePopErrorScope(device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) void { + _ = device; + _ = callback; + _ = userdata; + unreachable; + } + + pub inline fn devicePushErrorScope(device: *gpu.Device, filter: gpu.ErrorFilter) void { + _ = device; + _ = filter; + unreachable; + } + + pub inline fn deviceSetDeviceLostCallback(device: *gpu.Device, callback: ?gpu.Device.LostCallback, userdata: ?*anyopaque) void { + _ = device; + _ = callback; + _ = userdata; + unreachable; + } + + pub inline fn deviceSetLabel(device: *gpu.Device, label: [*:0]const u8) void { + _ = device; + _ = label; + unreachable; + } + + pub inline fn deviceSetLoggingCallback(device: *gpu.Device, callback: ?gpu.LoggingCallback, userdata: ?*anyopaque) void { + _ = device; + _ = callback; + _ = userdata; + unreachable; + } + + pub inline fn deviceSetUncapturedErrorCallback(device: *gpu.Device, callback: ?gpu.ErrorCallback, userdata: ?*anyopaque) void { + _ = device; + _ = callback; + _ = userdata; + unreachable; + } + + pub inline fn deviceTick(device: *gpu.Device) void { + _ = device; + unreachable; + } + + pub inline fn machDeviceWaitForCommandsToBeScheduled(device: *gpu.Device) void { + _ = device; + unreachable; + } + + pub inline fn deviceReference(device: *gpu.Device) void { + _ = device; + unreachable; + } + + pub inline fn deviceRelease(device: *gpu.Device) void { + _ = device; + unreachable; + } + + pub inline fn externalTextureDestroy(external_texture: *gpu.ExternalTexture) void { + _ = external_texture; + unreachable; + } + + pub inline fn externalTextureSetLabel(external_texture: *gpu.ExternalTexture, label: [*:0]const u8) void { + _ = external_texture; + _ = label; + unreachable; + } + + pub inline fn externalTextureReference(external_texture: *gpu.ExternalTexture) void { + _ = external_texture; + unreachable; + } + + pub inline fn externalTextureRelease(external_texture: *gpu.ExternalTexture) void { + _ = external_texture; + unreachable; + } + + pub inline fn instanceCreateSurface(instance: *gpu.Instance, descriptor: *const gpu.Surface.Descriptor) *gpu.Surface { + _ = instance; + _ = descriptor; + unreachable; + } + + pub inline fn instanceProcessEvents(instance: *gpu.Instance) void { + _ = instance; + unreachable; + } + + pub inline fn instanceRequestAdapter(instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) void { + _ = instance; + _ = options; + _ = callback; + _ = userdata; + unreachable; + } + + pub inline fn instanceReference(instance: *gpu.Instance) void { + _ = instance; + unreachable; + } + + pub inline fn instanceRelease(instance: *gpu.Instance) void { + _ = instance; + unreachable; + } + + pub inline fn pipelineLayoutSetLabel(pipeline_layout: *gpu.PipelineLayout, label: [*:0]const u8) void { + _ = pipeline_layout; + _ = label; + unreachable; + } + + pub inline fn pipelineLayoutReference(pipeline_layout: *gpu.PipelineLayout) void { + _ = pipeline_layout; + unreachable; + } + + pub inline fn pipelineLayoutRelease(pipeline_layout: *gpu.PipelineLayout) void { + _ = pipeline_layout; + unreachable; + } + + pub inline fn querySetDestroy(query_set: *gpu.QuerySet) void { + _ = query_set; + unreachable; + } + + pub inline fn querySetGetCount(query_set: *gpu.QuerySet) u32 { + _ = query_set; + unreachable; + } + + pub inline fn querySetGetType(query_set: *gpu.QuerySet) gpu.QueryType { + _ = query_set; + unreachable; + } + + pub inline fn querySetSetLabel(query_set: *gpu.QuerySet, label: [*:0]const u8) void { + _ = query_set; + _ = label; + unreachable; + } + + pub inline fn querySetReference(query_set: *gpu.QuerySet) void { + _ = query_set; + unreachable; + } + + pub inline fn querySetRelease(query_set: *gpu.QuerySet) void { + _ = query_set; + unreachable; + } + + pub inline fn queueCopyTextureForBrowser(queue: *gpu.Queue, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D, options: *const gpu.CopyTextureForBrowserOptions) void { + _ = queue; + _ = source; + _ = destination; + _ = copy_size; + _ = options; + unreachable; + } + + pub inline fn queueOnSubmittedWorkDone(queue: *gpu.Queue, signal_value: u64, callback: gpu.Queue.WorkDoneCallback, userdata: ?*anyopaque) void { + _ = queue; + _ = signal_value; + _ = callback; + _ = userdata; + unreachable; + } + + pub inline fn queueSetLabel(queue: *gpu.Queue, label: [*:0]const u8) void { + _ = queue; + _ = label; + unreachable; + } + + pub inline fn queueSubmit(queue: *gpu.Queue, command_count: usize, commands: [*]const *const gpu.CommandBuffer) void { + _ = queue; + _ = command_count; + _ = commands; + unreachable; + } + + pub inline fn queueWriteBuffer(queue: *gpu.Queue, buffer: *gpu.Buffer, buffer_offset: u64, data: *const anyopaque, size: usize) void { + _ = queue; + _ = buffer; + _ = buffer_offset; + _ = data; + _ = size; + unreachable; + } + + pub inline fn queueWriteTexture(queue: *gpu.Queue, destination: *const gpu.ImageCopyTexture, data: *const anyopaque, data_size: usize, data_layout: *const gpu.Texture.DataLayout, write_size: *const gpu.Extent3D) void { + _ = queue; + _ = destination; + _ = data; + _ = data_size; + _ = data_layout; + _ = write_size; + unreachable; + } + + pub inline fn queueReference(queue: *gpu.Queue) void { + _ = queue; + unreachable; + } + + pub inline fn queueRelease(queue: *gpu.Queue) void { + _ = queue; + unreachable; + } + + pub inline fn renderBundleSetLabel(render_bundle: *gpu.RenderBundle, label: [*:0]const u8) void { + _ = render_bundle; + _ = label; + unreachable; + } + + pub inline fn renderBundleReference(render_bundle: *gpu.RenderBundle) void { + _ = render_bundle; + unreachable; + } + + pub inline fn renderBundleRelease(render_bundle: *gpu.RenderBundle) void { + _ = render_bundle; + unreachable; + } + + pub inline fn renderBundleEncoderDraw(render_bundle_encoder: *gpu.RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { + _ = render_bundle_encoder; + _ = vertex_count; + _ = instance_count; + _ = first_vertex; + _ = first_instance; + unreachable; + } + + pub inline fn renderBundleEncoderDrawIndexed(render_bundle_encoder: *gpu.RenderBundleEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { + _ = render_bundle_encoder; + _ = index_count; + _ = instance_count; + _ = first_index; + _ = base_vertex; + _ = first_instance; + unreachable; + } + + pub inline fn renderBundleEncoderDrawIndexedIndirect(render_bundle_encoder: *gpu.RenderBundleEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + _ = render_bundle_encoder; + _ = indirect_buffer; + _ = indirect_offset; + unreachable; + } + + pub inline fn renderBundleEncoderDrawIndirect(render_bundle_encoder: *gpu.RenderBundleEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + _ = render_bundle_encoder; + _ = indirect_buffer; + _ = indirect_offset; + unreachable; + } + + pub inline fn renderBundleEncoderFinish(render_bundle_encoder: *gpu.RenderBundleEncoder, descriptor: ?*const gpu.RenderBundle.Descriptor) *gpu.RenderBundle { + _ = render_bundle_encoder; + _ = descriptor; + unreachable; + } + + pub inline fn renderBundleEncoderInsertDebugMarker(render_bundle_encoder: *gpu.RenderBundleEncoder, marker_label: [*:0]const u8) void { + _ = render_bundle_encoder; + _ = marker_label; + unreachable; + } + + pub inline fn renderBundleEncoderPopDebugGroup(render_bundle_encoder: *gpu.RenderBundleEncoder) void { + _ = render_bundle_encoder; + unreachable; + } + + pub inline fn renderBundleEncoderPushDebugGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_label: [*:0]const u8) void { + _ = render_bundle_encoder; + _ = group_label; + unreachable; + } + + pub inline fn renderBundleEncoderSetBindGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { + _ = render_bundle_encoder; + _ = group_index; + _ = group; + _ = dynamic_offset_count; + _ = dynamic_offsets; + unreachable; + } + + pub inline fn renderBundleEncoderSetIndexBuffer(render_bundle_encoder: *gpu.RenderBundleEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) void { + _ = render_bundle_encoder; + _ = buffer; + _ = format; + _ = offset; + _ = size; + unreachable; + } + + pub inline fn renderBundleEncoderSetLabel(render_bundle_encoder: *gpu.RenderBundleEncoder, label: [*:0]const u8) void { + _ = render_bundle_encoder; + _ = label; + unreachable; + } + + pub inline fn renderBundleEncoderSetPipeline(render_bundle_encoder: *gpu.RenderBundleEncoder, pipeline: *gpu.RenderPipeline) void { + _ = render_bundle_encoder; + _ = pipeline; + unreachable; + } + + pub inline fn renderBundleEncoderSetVertexBuffer(render_bundle_encoder: *gpu.RenderBundleEncoder, slot: u32, buffer: *gpu.Buffer, offset: u64, size: u64) void { + _ = render_bundle_encoder; + _ = slot; + _ = buffer; + _ = offset; + _ = size; + unreachable; + } + + pub inline fn renderBundleEncoderReference(render_bundle_encoder: *gpu.RenderBundleEncoder) void { + _ = render_bundle_encoder; + unreachable; + } + + pub inline fn renderBundleEncoderRelease(render_bundle_encoder: *gpu.RenderBundleEncoder) void { + _ = render_bundle_encoder; + unreachable; + } + + pub inline fn renderPassEncoderBeginOcclusionQuery(render_pass_encoder: *gpu.RenderPassEncoder, query_index: u32) void { + _ = render_pass_encoder; + _ = query_index; + unreachable; + } + + pub inline fn renderPassEncoderDraw(render_pass_encoder: *gpu.RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { + _ = render_pass_encoder; + _ = vertex_count; + _ = instance_count; + _ = first_vertex; + _ = first_instance; + unreachable; + } + + pub inline fn renderPassEncoderDrawIndexed(render_pass_encoder: *gpu.RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { + _ = render_pass_encoder; + _ = index_count; + _ = instance_count; + _ = first_index; + _ = base_vertex; + _ = first_instance; + unreachable; + } + + pub inline fn renderPassEncoderDrawIndexedIndirect(render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + _ = render_pass_encoder; + _ = indirect_buffer; + _ = indirect_offset; + unreachable; + } + + pub inline fn renderPassEncoderDrawIndirect(render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { + _ = render_pass_encoder; + _ = indirect_buffer; + _ = indirect_offset; + unreachable; + } + + pub inline fn renderPassEncoderEnd(render_pass_encoder: *gpu.RenderPassEncoder) void { + _ = render_pass_encoder; + unreachable; + } + + pub inline fn renderPassEncoderEndOcclusionQuery(render_pass_encoder: *gpu.RenderPassEncoder) void { + _ = render_pass_encoder; + unreachable; + } + + pub inline fn renderPassEncoderExecuteBundles(render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const gpu.RenderBundle) void { + _ = render_pass_encoder; + _ = bundles_count; + _ = bundles; + unreachable; + } + + pub inline fn renderPassEncoderInsertDebugMarker(render_pass_encoder: *gpu.RenderPassEncoder, marker_label: [*:0]const u8) void { + _ = render_pass_encoder; + _ = marker_label; + unreachable; + } + + pub inline fn renderPassEncoderPopDebugGroup(render_pass_encoder: *gpu.RenderPassEncoder) void { + _ = render_pass_encoder; + unreachable; + } + + pub inline fn renderPassEncoderPushDebugGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_label: [*:0]const u8) void { + _ = render_pass_encoder; + _ = group_label; + unreachable; + } + + pub inline fn renderPassEncoderSetBindGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { + _ = render_pass_encoder; + _ = group_index; + _ = group; + _ = dynamic_offset_count; + _ = dynamic_offsets; + unreachable; + } + + pub inline fn renderPassEncoderSetBlendConstant(render_pass_encoder: *gpu.RenderPassEncoder, color: *const gpu.Color) void { + _ = render_pass_encoder; + _ = color; + unreachable; + } + + pub inline fn renderPassEncoderSetIndexBuffer(render_pass_encoder: *gpu.RenderPassEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) void { + _ = render_pass_encoder; + _ = buffer; + _ = format; + _ = offset; + _ = size; + unreachable; + } + + pub inline fn renderPassEncoderSetLabel(render_pass_encoder: *gpu.RenderPassEncoder, label: [*:0]const u8) void { + _ = render_pass_encoder; + _ = label; + unreachable; + } + + pub inline fn renderPassEncoderSetPipeline(render_pass_encoder: *gpu.RenderPassEncoder, pipeline: *gpu.RenderPipeline) void { + _ = render_pass_encoder; + _ = pipeline; + unreachable; + } + + pub inline fn renderPassEncoderSetScissorRect(render_pass_encoder: *gpu.RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) void { + _ = render_pass_encoder; + _ = x; + _ = y; + _ = width; + _ = height; + unreachable; + } + + pub inline fn renderPassEncoderSetStencilReference(render_pass_encoder: *gpu.RenderPassEncoder, reference: u32) void { + _ = render_pass_encoder; + _ = reference; + unreachable; + } + + pub inline fn renderPassEncoderSetVertexBuffer(render_pass_encoder: *gpu.RenderPassEncoder, slot: u32, buffer: *gpu.Buffer, offset: u64, size: u64) void { + _ = render_pass_encoder; + _ = slot; + _ = buffer; + _ = offset; + _ = size; + unreachable; + } + + pub inline fn renderPassEncoderSetViewport(render_pass_encoder: *gpu.RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32) void { + _ = render_pass_encoder; + _ = x; + _ = y; + _ = width; + _ = height; + _ = min_depth; + _ = max_depth; + unreachable; + } + + pub inline fn renderPassEncoderWriteTimestamp(render_pass_encoder: *gpu.RenderPassEncoder, query_set: *gpu.QuerySet, query_index: u32) void { + _ = render_pass_encoder; + _ = query_set; + _ = query_index; + unreachable; + } + + pub inline fn renderPassEncoderReference(render_pass_encoder: *gpu.RenderPassEncoder) void { + _ = render_pass_encoder; + unreachable; + } + + pub inline fn renderPassEncoderRelease(render_pass_encoder: *gpu.RenderPassEncoder) void { + _ = render_pass_encoder; + unreachable; + } + + pub inline fn renderPipelineGetBindGroupLayout(render_pipeline: *gpu.RenderPipeline, group_index: u32) *gpu.BindGroupLayout { + _ = render_pipeline; + _ = group_index; + unreachable; + } + + pub inline fn renderPipelineSetLabel(render_pipeline: *gpu.RenderPipeline, label: [*:0]const u8) void { + _ = render_pipeline; + _ = label; + unreachable; + } + + pub inline fn renderPipelineReference(render_pipeline: *gpu.RenderPipeline) void { + _ = render_pipeline; + unreachable; + } + + pub inline fn renderPipelineRelease(render_pipeline: *gpu.RenderPipeline) void { + _ = render_pipeline; + unreachable; + } + + pub inline fn samplerSetLabel(sampler: *gpu.Sampler, label: [*:0]const u8) void { + _ = sampler; + _ = label; + unreachable; + } + + pub inline fn samplerReference(sampler: *gpu.Sampler) void { + _ = sampler; + unreachable; + } + + pub inline fn samplerRelease(sampler: *gpu.Sampler) void { + _ = sampler; + unreachable; + } + + pub inline fn shaderModuleGetCompilationInfo(shader_module: *gpu.ShaderModule, callback: gpu.CompilationInfoCallback, userdata: ?*anyopaque) void { + _ = shader_module; + _ = callback; + _ = userdata; + unreachable; + } + + pub inline fn shaderModuleSetLabel(shader_module: *gpu.ShaderModule, label: [*:0]const u8) void { + _ = shader_module; + _ = label; + unreachable; + } + + pub inline fn shaderModuleReference(shader_module: *gpu.ShaderModule) void { + _ = shader_module; + unreachable; + } + + pub inline fn shaderModuleRelease(shader_module: *gpu.ShaderModule) void { + _ = shader_module; + unreachable; + } + + pub inline fn sharedFenceExportInfo(shared_fence: *gpu.SharedFence, info: *gpu.SharedFence.ExportInfo) void { + _ = shared_fence; + _ = info; + unreachable; + } + + pub inline fn sharedFenceReference(shared_fence: *gpu.SharedFence) void { + _ = shared_fence; + unreachable; + } + + pub inline fn sharedFenceRelease(shared_fence: *gpu.SharedFence) void { + _ = shared_fence; + unreachable; + } + + pub inline fn sharedTextureMemoryBeginAccess(shared_texture_memory: *gpu.SharedTextureMemory, texture: *gpu.Texture, descriptor: *const gpu.SharedTextureMemory.BeginAccessDescriptor) void { + _ = shared_texture_memory; + _ = texture; + _ = descriptor; + unreachable; + } + + pub inline fn sharedTextureMemoryCreateTexture(shared_texture_memory: *gpu.SharedTextureMemory, descriptor: *const gpu.Texture.Descriptor) *gpu.Texture { + _ = shared_texture_memory; + _ = descriptor; + unreachable; + } + + pub inline fn sharedTextureMemoryEndAccess(shared_texture_memory: *gpu.SharedTextureMemory, texture: *gpu.Texture, descriptor: *gpu.SharedTextureMemory.EndAccessState) void { + _ = shared_texture_memory; + _ = texture; + _ = descriptor; + unreachable; + } + + pub inline fn sharedTextureMemoryEndAccessStateFreeMembers(value: gpu.SharedTextureMemory.EndAccessState) void { + _ = value; + unreachable; + } + + pub inline fn sharedTextureMemoryGetProperties(shared_texture_memory: *gpu.SharedTextureMemory, properties: *gpu.SharedTextureMemory.Properties) void { + _ = shared_texture_memory; + _ = properties; + unreachable; + } + + pub inline fn sharedTextureMemorySetLabel(shared_texture_memory: *gpu.SharedTextureMemory, label: [*:0]const u8) void { + _ = shared_texture_memory; + _ = label; + unreachable; + } + + pub inline fn sharedTextureMemoryReference(shared_texture_memory: *gpu.SharedTextureMemory) void { + _ = shared_texture_memory; + unreachable; + } + + pub inline fn sharedTextureMemoryRelease(shared_texture_memory: *gpu.SharedTextureMemory) void { + _ = shared_texture_memory; + unreachable; + } + + pub inline fn surfaceReference(surface: *gpu.Surface) void { + _ = surface; + unreachable; + } + + pub inline fn surfaceRelease(surface: *gpu.Surface) void { + _ = surface; + unreachable; + } + + pub inline fn swapChainGetCurrentTexture(swap_chain: *gpu.SwapChain) ?*gpu.Texture { + _ = swap_chain; + unreachable; + } + + pub inline fn swapChainGetCurrentTextureView(swap_chain: *gpu.SwapChain) ?*gpu.TextureView { + _ = swap_chain; + unreachable; + } + + pub inline fn swapChainPresent(swap_chain: *gpu.SwapChain) void { + _ = swap_chain; + unreachable; + } + + pub inline fn swapChainReference(swap_chain: *gpu.SwapChain) void { + _ = swap_chain; + unreachable; + } + + pub inline fn swapChainRelease(swap_chain: *gpu.SwapChain) void { + _ = swap_chain; + unreachable; + } + + pub inline fn textureCreateView(texture: *gpu.Texture, descriptor: ?*const gpu.TextureView.Descriptor) *gpu.TextureView { + _ = texture; + _ = descriptor; + unreachable; + } + + pub inline fn textureDestroy(texture: *gpu.Texture) void { + _ = texture; + unreachable; + } + + pub inline fn textureGetDepthOrArrayLayers(texture: *gpu.Texture) u32 { + _ = texture; + unreachable; + } + + pub inline fn textureGetDimension(texture: *gpu.Texture) gpu.Texture.Dimension { + _ = texture; + unreachable; + } + + pub inline fn textureGetFormat(texture: *gpu.Texture) gpu.Texture.Format { + _ = texture; + unreachable; + } + + pub inline fn textureGetHeight(texture: *gpu.Texture) u32 { + _ = texture; + unreachable; + } + + pub inline fn textureGetMipLevelCount(texture: *gpu.Texture) u32 { + _ = texture; + unreachable; + } + + pub inline fn textureGetSampleCount(texture: *gpu.Texture) u32 { + _ = texture; + unreachable; + } + + pub inline fn textureGetUsage(texture: *gpu.Texture) gpu.Texture.UsageFlags { + _ = texture; + unreachable; + } + + pub inline fn textureGetWidth(texture: *gpu.Texture) u32 { + _ = texture; + unreachable; + } + + pub inline fn textureSetLabel(texture: *gpu.Texture, label: [*:0]const u8) void { + _ = texture; + _ = label; + unreachable; + } + + pub inline fn textureReference(texture: *gpu.Texture) void { + _ = texture; + unreachable; + } + + pub inline fn textureRelease(texture: *gpu.Texture) void { + _ = texture; + unreachable; + } + + pub inline fn textureViewSetLabel(texture_view: *gpu.TextureView, label: [*:0]const u8) void { + _ = texture_view; + _ = label; + unreachable; + } + + pub inline fn textureViewReference(texture_view: *gpu.TextureView) void { + _ = texture_view; + unreachable; + } + + pub inline fn textureViewRelease(texture_view: *gpu.TextureView) void { + _ = texture_view; + unreachable; + } +}); + +test "stub" { + _ = StubInterface; +} diff --git a/src/gpu/mach_dawn.cpp b/src/gpu/mach_dawn.cpp new file mode 100644 index 00000000..4d637874 --- /dev/null +++ b/src/gpu/mach_dawn.cpp @@ -0,0 +1,28 @@ +#include +#include "mach_dawn.h" + +#if defined(__APPLE__) + namespace dawn::native::metal { + DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device); + } // namespace dawn::native +#endif // defined(__APPLE__) + +#ifdef __cplusplus +extern "C" { +#endif + +MACH_EXPORT const DawnProcTable machDawnGetProcTable() { + return dawn::native::GetProcs(); +} + +MACH_EXPORT void machDawnDeviceWaitForCommandsToBeScheduled(WGPUDevice device) { + #if defined(__APPLE__) + return dawn::native::metal::WaitForCommandsToBeScheduled(device); + #else + return; + #endif // defined(__APPLE__) +} + +#ifdef __cplusplus +} // extern "C" +#endif \ No newline at end of file diff --git a/src/gpu/mach_dawn.h b/src/gpu/mach_dawn.h new file mode 100644 index 00000000..d6b56097 --- /dev/null +++ b/src/gpu/mach_dawn.h @@ -0,0 +1,36 @@ +#ifndef MACH_DAWN_C_H_ +#define MACH_DAWN_C_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(MACH_DAWN_C_SHARED_LIBRARY) +# if defined(_WIN32) +# if defined(MACH_DAWN_C_IMPLEMENTATION) +# define MACH_EXPORT __declspec(dllexport) +# else +# define MACH_EXPORT __declspec(dllimport) +# endif +# else // defined(_WIN32) +# if defined(MACH_DAWN_C_IMPLEMENTATION) +# define MACH_EXPORT __attribute__((visibility("default"))) +# else +# define MACH_EXPORT +# endif +# endif // defined(_WIN32) +#else // defined(MACH_DAWN_C_SHARED_LIBRARY) +# define MACH_EXPORT +#endif // defined(MACH_DAWN_C_SHARED_LIBRARY) + +#include +#include + +MACH_EXPORT const DawnProcTable machDawnGetProcTable(); +MACH_EXPORT void machDawnDeviceWaitForCommandsToBeScheduled(WGPUDevice device); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // MACH_DAWN_C_H_ diff --git a/src/gpu/main.zig b/src/gpu/main.zig new file mode 100644 index 00000000..16e8f6af --- /dev/null +++ b/src/gpu/main.zig @@ -0,0 +1,1025 @@ +const std = @import("std"); +const testing = std.testing; + +pub const Adapter = @import("adapter.zig").Adapter; +pub const BindGroup = @import("bind_group.zig").BindGroup; +pub const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; +pub const Buffer = @import("buffer.zig").Buffer; +pub const CommandBuffer = @import("command_buffer.zig").CommandBuffer; +pub const CommandEncoder = @import("command_encoder.zig").CommandEncoder; +pub const ComputePassEncoder = @import("compute_pass_encoder.zig").ComputePassEncoder; +pub const ComputePipeline = @import("compute_pipeline.zig").ComputePipeline; +pub const Device = @import("device.zig").Device; +pub const ExternalTexture = @import("external_texture.zig").ExternalTexture; +pub const Instance = @import("instance.zig").Instance; +pub const PipelineLayout = @import("pipeline_layout.zig").PipelineLayout; +pub const QuerySet = @import("query_set.zig").QuerySet; +pub const Queue = @import("queue.zig").Queue; +pub const RenderBundle = @import("render_bundle.zig").RenderBundle; +pub const RenderBundleEncoder = @import("render_bundle_encoder.zig").RenderBundleEncoder; +pub const RenderPassEncoder = @import("render_pass_encoder.zig").RenderPassEncoder; +pub const RenderPipeline = @import("render_pipeline.zig").RenderPipeline; +pub const Sampler = @import("sampler.zig").Sampler; +pub const ShaderModule = @import("shader_module.zig").ShaderModule; +pub const SharedTextureMemory = @import("shared_texture_memory.zig").SharedTextureMemory; +pub const SharedFence = @import("shared_fence.zig").SharedFence; +pub const Surface = @import("surface.zig").Surface; +pub const SwapChain = @import("swap_chain.zig").SwapChain; +pub const Texture = @import("texture.zig").Texture; +pub const TextureView = @import("texture_view.zig").TextureView; + +pub const dawn = @import("dawn.zig"); +const instance = @import("instance.zig"); +const device = @import("device.zig"); +const interface = @import("interface.zig"); + +pub const Impl = interface.Impl; +pub const StubInterface = interface.StubInterface; +pub const Export = interface.Export; +pub const Interface = interface.Interface; + +pub inline fn createInstance(descriptor: ?*const instance.Instance.Descriptor) ?*instance.Instance { + return Impl.createInstance(descriptor); +} + +pub inline fn getProcAddress(_device: *device.Device, proc_name: [*:0]const u8) ?Proc { + return Impl.getProcAddress(_device, proc_name); +} + +pub const array_layer_count_undefined = 0xffffffff; +pub const copy_stride_undefined = 0xffffffff; +pub const limit_u32_undefined = 0xffffffff; +pub const limit_u64_undefined = 0xffffffffffffffff; +pub const mip_level_count_undefined = 0xffffffff; +pub const whole_map_size = std.math.maxInt(usize); +pub const whole_size = 0xffffffffffffffff; + +/// Generic function pointer type, used for returning API function pointers. Must be +/// cast to the right `fn (...) callconv(.C) T` type before use. +pub const Proc = *const fn () callconv(.C) void; + +/// 32-bit unsigned boolean type, as used in webgpu.h +pub const Bool32 = enum(u32) { + false, + true, + + pub inline fn from(v: bool) @This() { + return if (v) .true else .false; + } +}; + +pub const ComputePassTimestampWrite = extern struct { + query_set: *QuerySet, + query_index: u32, + location: ComputePassTimestampLocation, +}; + +pub const RenderPassDepthStencilAttachment = extern struct { + view: *TextureView, + depth_load_op: LoadOp = .undefined, + depth_store_op: StoreOp = .undefined, + depth_clear_value: f32 = 0, + depth_read_only: Bool32 = .false, + stencil_load_op: LoadOp = .undefined, + stencil_store_op: StoreOp = .undefined, + stencil_clear_value: u32 = 0, + stencil_read_only: Bool32 = .false, +}; + +pub const RenderPassTimestampWrite = extern struct { + query_set: *QuerySet, + query_index: u32, + location: RenderPassTimestampLocation, +}; + +pub const RequestAdapterOptions = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + dawn_toggles_descriptor: *const dawn.TogglesDescriptor, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + compatible_surface: ?*Surface = null, + power_preference: PowerPreference = .undefined, + backend_type: BackendType = .undefined, + force_fallback_adapter: Bool32 = .false, + compatibility_mode: Bool32 = .false, +}; + +pub const ComputePassDescriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + timestamp_write_count: usize = 0, + timestamp_writes: ?[*]const ComputePassTimestampWrite = null, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + timestamp_writes: ?[]const ComputePassTimestampWrite = null, + }) ComputePassDescriptor { + return .{ + .next_in_chain = v.next_in_chain, + .label = v.label, + .timestamp_write_count = if (v.timestamp_writes) |e| e.len else 0, + .timestamp_writes = if (v.timestamp_writes) |e| e.ptr else null, + }; + } +}; + +pub const RenderPassDescriptor = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + max_draw_count: *const RenderPassDescriptorMaxDrawCount, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*:0]const u8 = null, + color_attachment_count: usize = 0, + color_attachments: ?[*]const RenderPassColorAttachment = null, + depth_stencil_attachment: ?*const RenderPassDepthStencilAttachment = null, + occlusion_query_set: ?*QuerySet = null, + timestamp_write_count: usize = 0, + timestamp_writes: ?[*]const RenderPassTimestampWrite = null, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*:0]const u8 = null, + color_attachments: ?[]const RenderPassColorAttachment = null, + depth_stencil_attachment: ?*const RenderPassDepthStencilAttachment = null, + occlusion_query_set: ?*QuerySet = null, + timestamp_writes: ?[]const RenderPassTimestampWrite = null, + }) RenderPassDescriptor { + return .{ + .next_in_chain = v.next_in_chain, + .label = v.label, + .color_attachment_count = if (v.color_attachments) |e| e.len else 0, + .color_attachments = if (v.color_attachments) |e| e.ptr else null, + .depth_stencil_attachment = v.depth_stencil_attachment, + .occlusion_query_set = v.occlusion_query_set, + .timestamp_write_count = if (v.timestamp_writes) |e| e.len else 0, + .timestamp_writes = if (v.timestamp_writes) |e| e.ptr else null, + }; + } +}; + +pub const AlphaMode = enum(u32) { premultiplied = 0x00000000, unpremultiplied = 0x00000001, opaq = 0x00000002 }; + +pub const BackendType = enum(u32) { + undefined, + null, + webgpu, + d3d11, + d3d12, + metal, + vulkan, + opengl, + opengles, + + pub fn name(t: BackendType) []const u8 { + return switch (t) { + .undefined => "Undefined", + .null => "Null", + .webgpu => "WebGPU", + .d3d11 => "D3D11", + .d3d12 => "D3D12", + .metal => "Metal", + .vulkan => "Vulkan", + .opengl => "OpenGL", + .opengles => "OpenGLES", + }; + } +}; + +pub const BlendFactor = enum(u32) { + zero = 0x00000000, + one = 0x00000001, + src = 0x00000002, + one_minus_src = 0x00000003, + src_alpha = 0x00000004, + one_minus_src_alpha = 0x00000005, + dst = 0x00000006, + one_minus_dst = 0x00000007, + dst_alpha = 0x00000008, + one_minus_dst_alpha = 0x00000009, + src_alpha_saturated = 0x0000000A, + constant = 0x0000000B, + one_minus_constant = 0x0000000C, + src1 = 0x0000000D, + one_minus_src1 = 0x0000000E, + src1_alpha = 0x0000000F, + one_minus_src1_alpha = 0x00000010, +}; + +pub const BlendOperation = enum(u32) { + add = 0x00000000, + subtract = 0x00000001, + reverse_subtract = 0x00000002, + min = 0x00000003, + max = 0x00000004, +}; + +pub const CompareFunction = enum(u32) { + undefined = 0x00000000, + never = 0x00000001, + less = 0x00000002, + less_equal = 0x00000003, + greater = 0x00000004, + greater_equal = 0x00000005, + equal = 0x00000006, + not_equal = 0x00000007, + always = 0x00000008, +}; + +pub const CompilationInfoRequestStatus = enum(u32) { + success = 0x00000000, + err = 0x00000001, + device_lost = 0x00000002, + unknown = 0x00000003, +}; + +pub const CompilationMessageType = enum(u32) { + err = 0x00000000, + warning = 0x00000001, + info = 0x00000002, +}; + +pub const ComputePassTimestampLocation = enum(u32) { + beginning = 0x00000000, + end = 0x00000001, +}; + +pub const CreatePipelineAsyncStatus = enum(u32) { + success = 0x00000000, + validation_error = 0x00000001, + internal_error = 0x00000002, + device_lost = 0x00000003, + device_destroyed = 0x00000004, + unknown = 0x00000005, +}; + +pub const CullMode = enum(u32) { + none = 0x00000000, + front = 0x00000001, + back = 0x00000002, +}; + +pub const ErrorFilter = enum(u32) { + validation = 0x00000000, + out_of_memory = 0x00000001, + internal = 0x00000002, +}; + +pub const ErrorType = enum(u32) { + no_error = 0x00000000, + validation = 0x00000001, + out_of_memory = 0x00000002, + internal = 0x00000003, + unknown = 0x00000004, + device_lost = 0x00000005, +}; + +pub const FeatureName = enum(u32) { + undefined = 0x00000000, + depth_clip_control = 0x00000001, + depth32_float_stencil8 = 0x00000002, + timestamp_query = 0x00000003, + pipeline_statistics_query = 0x00000004, + texture_compression_bc = 0x00000005, + texture_compression_etc2 = 0x00000006, + texture_compression_astc = 0x00000007, + indirect_first_instance = 0x00000008, + shader_f16 = 0x00000009, + rg11_b10_ufloat_renderable = 0x0000000A, + bgra8_unorm_storage = 0x0000000B, + float32_filterable = 0x0000000C, + dawn_internal_usages = 0x000003ea, + dawn_multi_planar_formats = 0x000003eb, + dawn_native = 0x000003ec, + chromium_experimental_dp4a = 0x000003ed, + timestamp_query_inside_passes = 0x000003EE, + implicit_device_synchronization = 0x000003EF, + surface_capabilities = 0x000003F0, + transient_attachments = 0x000003F1, + msaa_render_to_single_sampled = 0x000003F2, + dual_source_blending = 0x000003F3, + d3d11_multithread_protected = 0x000003F4, + anglet_exture_sharing = 0x000003F5, + shared_texture_memory_vk_image_descriptor = 0x0000044C, + shared_texture_memory_vk_dedicated_allocation_descriptor = 0x0000044D, + shared_texture_memory_a_hardware_buffer_descriptor = 0x0000044_E, + shared_texture_memory_dma_buf_descriptor = 0x0000044F, + shared_texture_memory_opaque_fd_descriptor = 0x00000450, + shared_texture_memory_zircon_handle_descriptor = 0x00000451, + shared_texture_memory_dxgi_shared_handle_descriptor = 0x00000452, + shared_texture_memory_d3_d11_texture2_d_descriptor = 0x00000453, + shared_texture_memory_io_surface_descriptor = 0x00000454, + shared_texture_memory_egl_image_descriptor = 0x00000455, + shared_texture_memory_initialized_begin_state = 0x000004B0, + shared_texture_memory_initialized_end_state = 0x000004B1, + shared_texture_memory_vk_image_layout_begin_state = 0x000004B2, + shared_texture_memory_vk_image_layout_end_state = 0x000004B3, + shared_fence_vk_semaphore_opaque_fd_descriptor = 0x000004B4, + shared_fence_vk_semaphore_opaque_fd_export_info = 0x000004B5, + shared_fence_vk_semaphore_sync_fd_descriptor = 0x000004B6, + shared_fence_vk_semaphore_sync_fd_export_info = 0x000004B7, + shared_fence_vk_semaphore_zircon_handle_descriptor = 0x000004B8, + shared_fence_vk_semaphore_zircon_handle_export_info = 0x000004B9, + shared_fence_dxgi_shared_handle_descriptor = 0x000004BA, + shared_fence_dxgi_shared_handle_export_info = 0x000004BB, + shared_fence_mtl_shared_event_descriptor = 0x000004BC, + shared_fence_mtl_shared_event_export_info = 0x000004BD, +}; + +pub const FilterMode = enum(u32) { + nearest = 0x00000000, + linear = 0x00000001, +}; + +pub const MipmapFilterMode = enum(u32) { + nearest = 0x00000000, + linear = 0x00000001, +}; + +pub const FrontFace = enum(u32) { + ccw = 0x00000000, + cw = 0x00000001, +}; + +pub const IndexFormat = enum(u32) { + undefined = 0x00000000, + uint16 = 0x00000001, + uint32 = 0x00000002, +}; + +pub const LoadOp = enum(u32) { + undefined = 0x00000000, + clear = 0x00000001, + load = 0x00000002, +}; + +pub const LoggingType = enum(u32) { + verbose = 0x00000000, + info = 0x00000001, + warning = 0x00000002, + err = 0x00000003, +}; + +pub const PipelineStatisticName = enum(u32) { + vertex_shader_invocations = 0x00000000, + clipper_invocations = 0x00000001, + clipper_primitives_out = 0x00000002, + fragment_shader_invocations = 0x00000003, + compute_shader_invocations = 0x00000004, +}; + +pub const PowerPreference = enum(u32) { + undefined = 0x00000000, + low_power = 0x00000001, + high_performance = 0x00000002, +}; + +pub const PresentMode = enum(u32) { + immediate = 0x00000000, + mailbox = 0x00000001, + fifo = 0x00000002, +}; + +pub const PrimitiveTopology = enum(u32) { + point_list = 0x00000000, + line_list = 0x00000001, + line_strip = 0x00000002, + triangle_list = 0x00000003, + triangle_strip = 0x00000004, +}; + +pub const QueryType = enum(u32) { + occlusion = 0x00000000, + pipeline_statistics = 0x00000001, + timestamp = 0x00000002, +}; + +pub const RenderPassTimestampLocation = enum(u32) { + beginning = 0x00000000, + end = 0x00000001, +}; + +pub const RequestAdapterStatus = enum(u32) { + success = 0x00000000, + unavailable = 0x00000001, + err = 0x00000002, + unknown = 0x00000003, +}; + +pub const RequestDeviceStatus = enum(u32) { + success = 0x00000000, + err = 0x00000001, + unknown = 0x00000002, +}; + +pub const SType = enum(u32) { + invalid = 0x00000000, + surface_descriptor_from_metal_layer = 0x00000001, + surface_descriptor_from_windows_hwnd = 0x00000002, + surface_descriptor_from_xlib_window = 0x00000003, + surface_descriptor_from_canvas_html_selector = 0x00000004, + shader_module_spirv_descriptor = 0x00000005, + shader_module_wgsl_descriptor = 0x00000006, + primitive_depth_clip_control = 0x00000007, + surface_descriptor_from_wayland_surface = 0x00000008, + surface_descriptor_from_android_native_window = 0x00000009, + surface_descriptor_from_windows_core_window = 0x0000000B, + external_texture_binding_entry = 0x0000000C, + external_texture_binding_layout = 0x0000000D, + surface_descriptor_from_windows_swap_chain_panel = 0x0000000E, + render_pass_descriptor_max_draw_count = 0x0000000F, + dawn_texture_internal_usage_descriptor = 0x000003E8, + dawn_encoder_internal_usage_descriptor = 0x000003EB, + dawn_instance_descriptor = 0x000003EC, + dawn_cache_device_descriptor = 0x000003ED, + dawn_adapter_properties_power_preference = 0x000003EE, + dawn_buffer_descriptor_error_info_from_wire_client = 0x000003EF, + dawn_toggles_descriptor = 0x000003F0, + dawn_shader_module_spirv_options_descriptor = 0x000003F1, + request_adapter_options_luid = 0x000003F2, + request_adapter_options_get_gl_proc = 0x000003F3, + dawn_multisample_state_render_to_single_sampled = 0x000003F4, + dawn_render_pass_color_attachment_render_to_single_sampled = 0x000003F5, + shared_texture_memory_vk_image_descriptor = 0x0000044C, + shared_texture_memory_vk_dedicated_allocation_descriptor = 0x0000044D, + shared_texture_memory_a_hardware_buffer_descriptor = 0x0000044E, + shared_texture_memory_dma_buf_descriptor = 0x0000044F, + shared_texture_memory_opaque_fd_descriptor = 0x00000450, + shared_texture_memory_zircon_handle_descriptor = 0x00000451, + shared_texture_memory_dxgi_shared_handle_descriptor = 0x00000452, + shared_texture_memory_d3d11_texture_2d_descriptor = 0x00000453, + shared_texture_memory_io_surface_descriptor = 0x00000454, + shared_texture_memory_egl_image_descriptor = 0x00000455, + shared_texture_memory_initialized_begin_state = 0x000004B0, + shared_texture_memory_initialized_end_state = 0x000004B1, + shared_texture_memory_vk_image_layout_begin_state = 0x000004B2, + shared_texture_memory_vk_image_layout_end_state = 0x000004B3, + shared_fence_vk_semaphore_opaque_fd_descriptor = 0x000004B4, + shared_fence_vk_semaphore_opaque_fd_export_info = 0x000004B5, + shared_fence_vk_semaphore_syncfd_descriptor = 0x000004B6, + shared_fence_vk_semaphore_sync_fd_export_info = 0x000004B7, + shared_fence_vk_semaphore_zircon_handle_descriptor = 0x000004B8, + shared_fence_vk_semaphore_zircon_handle_export_info = 0x000004B9, + shared_fence_dxgi_shared_handle_descriptor = 0x000004BA, + shared_fence_dxgi_shared_handle_export_info = 0x000004BB, + shared_fence_mtl_shared_event_descriptor = 0x000004BC, + shared_fence_mtl_shared_event_export_info = 0x000004BD, +}; + +pub const StencilOperation = enum(u32) { + keep = 0x00000000, + zero = 0x00000001, + replace = 0x00000002, + invert = 0x00000003, + increment_clamp = 0x00000004, + decrement_clamp = 0x00000005, + increment_wrap = 0x00000006, + decrement_wrap = 0x00000007, +}; + +pub const StorageTextureAccess = enum(u32) { + undefined = 0x00000000, + write_only = 0x00000001, +}; + +pub const StoreOp = enum(u32) { + undefined = 0x00000000, + store = 0x00000001, + discard = 0x00000002, +}; + +pub const VertexFormat = enum(u32) { + undefined = 0x00000000, + uint8x2 = 0x00000001, + uint8x4 = 0x00000002, + sint8x2 = 0x00000003, + sint8x4 = 0x00000004, + unorm8x2 = 0x00000005, + unorm8x4 = 0x00000006, + snorm8x2 = 0x00000007, + snorm8x4 = 0x00000008, + uint16x2 = 0x00000009, + uint16x4 = 0x0000000a, + sint16x2 = 0x0000000b, + sint16x4 = 0x0000000c, + unorm16x2 = 0x0000000d, + unorm16x4 = 0x0000000e, + snorm16x2 = 0x0000000f, + snorm16x4 = 0x00000010, + float16x2 = 0x00000011, + float16x4 = 0x00000012, + float32 = 0x00000013, + float32x2 = 0x00000014, + float32x3 = 0x00000015, + float32x4 = 0x00000016, + uint32 = 0x00000017, + uint32x2 = 0x00000018, + uint32x3 = 0x00000019, + uint32x4 = 0x0000001a, + sint32 = 0x0000001b, + sint32x2 = 0x0000001c, + sint32x3 = 0x0000001d, + sint32x4 = 0x0000001e, +}; + +pub const VertexStepMode = enum(u32) { + vertex = 0x00000000, + instance = 0x00000001, + vertex_buffer_not_used = 0x00000002, +}; + +pub const ColorWriteMaskFlags = packed struct(u32) { + red: bool = false, + green: bool = false, + blue: bool = false, + alpha: bool = false, + + _padding: u28 = 0, + + comptime { + std.debug.assert( + @sizeOf(@This()) == @sizeOf(u32) and + @bitSizeOf(@This()) == @bitSizeOf(u32), + ); + } + + pub const all = ColorWriteMaskFlags{ + .red = true, + .green = true, + .blue = true, + .alpha = true, + }; + + pub fn equal(a: ColorWriteMaskFlags, b: ColorWriteMaskFlags) bool { + return @as(u4, @truncate(@as(u32, @bitCast(a)))) == @as(u4, @truncate(@as(u32, @bitCast(b)))); + } +}; + +pub const MapModeFlags = packed struct(u32) { + read: bool = false, + write: bool = false, + + _padding: u30 = 0, + + comptime { + std.debug.assert( + @sizeOf(@This()) == @sizeOf(u32) and + @bitSizeOf(@This()) == @bitSizeOf(u32), + ); + } + + pub const undef = MapModeFlags{}; + + pub fn equal(a: MapModeFlags, b: MapModeFlags) bool { + return @as(u2, @truncate(@as(u32, @bitCast(a)))) == @as(u2, @truncate(@as(u32, @bitCast(b)))); + } +}; + +pub const ShaderStageFlags = packed struct(u32) { + vertex: bool = false, + fragment: bool = false, + compute: bool = false, + + _padding: u29 = 0, + + comptime { + std.debug.assert( + @sizeOf(@This()) == @sizeOf(u32) and + @bitSizeOf(@This()) == @bitSizeOf(u32), + ); + } + + pub const none = ShaderStageFlags{}; + + pub fn equal(a: ShaderStageFlags, b: ShaderStageFlags) bool { + return @as(u3, @truncate(@as(u32, @bitCast(a)))) == @as(u3, @truncate(@as(u32, @bitCast(b)))); + } +}; + +pub const ChainedStruct = extern struct { + // TODO: dawn: not marked as nullable in dawn.json but in fact is. + next: ?*const ChainedStruct = null, + s_type: SType, +}; + +pub const ChainedStructOut = extern struct { + // TODO: dawn: not marked as nullable in dawn.json but in fact is. + next: ?*ChainedStructOut = null, + s_type: SType, +}; + +pub const BlendComponent = extern struct { + operation: BlendOperation = .add, + src_factor: BlendFactor = .one, + dst_factor: BlendFactor = .zero, +}; + +pub const Color = extern struct { + r: f64, + g: f64, + b: f64, + a: f64, +}; + +pub const Extent2D = extern struct { + width: u32, + height: u32, +}; + +pub const Extent3D = extern struct { + width: u32, + height: u32 = 1, + depth_or_array_layers: u32 = 1, +}; + +pub const Limits = extern struct { + max_texture_dimension_1d: u32 = limit_u32_undefined, + max_texture_dimension_2d: u32 = limit_u32_undefined, + max_texture_dimension_3d: u32 = limit_u32_undefined, + max_texture_array_layers: u32 = limit_u32_undefined, + max_bind_groups: u32 = limit_u32_undefined, + max_bind_groups_plus_vertex_buffers: u32 = limit_u32_undefined, + max_bindings_per_bind_group: u32 = limit_u32_undefined, + max_dynamic_uniform_buffers_per_pipeline_layout: u32 = limit_u32_undefined, + max_dynamic_storage_buffers_per_pipeline_layout: u32 = limit_u32_undefined, + max_sampled_textures_per_shader_stage: u32 = limit_u32_undefined, + max_samplers_per_shader_stage: u32 = limit_u32_undefined, + max_storage_buffers_per_shader_stage: u32 = limit_u32_undefined, + max_storage_textures_per_shader_stage: u32 = limit_u32_undefined, + max_uniform_buffers_per_shader_stage: u32 = limit_u32_undefined, + max_uniform_buffer_binding_size: u64 = limit_u64_undefined, + max_storage_buffer_binding_size: u64 = limit_u64_undefined, + min_uniform_buffer_offset_alignment: u32 = limit_u32_undefined, + min_storage_buffer_offset_alignment: u32 = limit_u32_undefined, + max_vertex_buffers: u32 = limit_u32_undefined, + max_buffer_size: u64 = limit_u64_undefined, + max_vertex_attributes: u32 = limit_u32_undefined, + max_vertex_buffer_array_stride: u32 = limit_u32_undefined, + max_inter_stage_shader_components: u32 = limit_u32_undefined, + max_inter_stage_shader_variables: u32 = limit_u32_undefined, + max_color_attachments: u32 = limit_u32_undefined, + max_color_attachment_bytes_per_sample: u32 = limit_u32_undefined, + max_compute_workgroup_storage_size: u32 = limit_u32_undefined, + max_compute_invocations_per_workgroup: u32 = limit_u32_undefined, + max_compute_workgroup_size_x: u32 = limit_u32_undefined, + max_compute_workgroup_size_y: u32 = limit_u32_undefined, + max_compute_workgroup_size_z: u32 = limit_u32_undefined, + max_compute_workgroups_per_dimension: u32 = limit_u32_undefined, +}; + +pub const Origin2D = extern struct { + x: u32 = 0, + y: u32 = 0, +}; + +pub const Origin3D = extern struct { + x: u32 = 0, + y: u32 = 0, + z: u32 = 0, +}; + +pub const CompilationMessage = extern struct { + next_in_chain: ?*const ChainedStruct = null, + message: ?[*:0]const u8 = null, + type: CompilationMessageType, + line_num: u64, + line_pos: u64, + offset: u64, + length: u64, + utf16_line_pos: u64, + utf16_offset: u64, + utf16_length: u64, +}; + +pub const ConstantEntry = extern struct { + next_in_chain: ?*const ChainedStruct = null, + key: [*:0]const u8, + value: f64, +}; + +pub const CopyTextureForBrowserOptions = extern struct { + next_in_chain: ?*const ChainedStruct = null, + flip_y: Bool32 = .false, + needs_color_space_conversion: Bool32 = .false, + src_alpha_mode: AlphaMode = .unpremultiplied, + src_transfer_function_parameters: ?*const [7]f32 = null, + conversion_matrix: ?*const [9]f32 = null, + dst_transfer_function_parameters: ?*const [7]f32 = null, + dst_alpha_mode: AlphaMode = .unpremultiplied, + internal_usage: Bool32 = .false, +}; + +pub const MultisampleState = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + dawn_multisample_state_render_to_single_sampled: *const dawn.MultisampleStateRenderToSingleSampled, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + count: u32 = 1, + mask: u32 = 0xFFFFFFFF, + alpha_to_coverage_enabled: Bool32 = .false, +}; + +pub const PrimitiveDepthClipControl = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .primitive_depth_clip_control }, + unclipped_depth: Bool32 = .false, +}; + +pub const PrimitiveState = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + primitive_depth_clip_control: *const PrimitiveDepthClipControl, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + topology: PrimitiveTopology = .triangle_list, + strip_index_format: IndexFormat = .undefined, + front_face: FrontFace = .ccw, + cull_mode: CullMode = .none, +}; + +pub const RenderPassDescriptorMaxDrawCount = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .render_pass_descriptor_max_draw_count }, + max_draw_count: u64 = 50000000, +}; + +pub const StencilFaceState = extern struct { + compare: CompareFunction = .always, + fail_op: StencilOperation = .keep, + depth_fail_op: StencilOperation = .keep, + pass_op: StencilOperation = .keep, +}; + +pub const StorageTextureBindingLayout = extern struct { + next_in_chain: ?*const ChainedStruct = null, + access: StorageTextureAccess = .undefined, + format: Texture.Format = .undefined, + view_dimension: TextureView.Dimension = .dimension_undefined, +}; + +pub const VertexAttribute = extern struct { + format: VertexFormat, + offset: u64, + shader_location: u32, +}; + +pub const BlendState = extern struct { + color: BlendComponent = .{}, + alpha: BlendComponent = .{}, +}; + +pub const CompilationInfo = extern struct { + next_in_chain: ?*const ChainedStruct = null, + message_count: usize, + messages: ?[*]const CompilationMessage = null, + + /// Helper to get messages as a slice. + pub fn getMessages(info: CompilationInfo) ?[]const CompilationMessage { + if (info.messages) |messages| { + return messages[0..info.message_count]; + } + return null; + } +}; + +pub const DepthStencilState = extern struct { + next_in_chain: ?*const ChainedStruct = null, + format: Texture.Format, + depth_write_enabled: Bool32 = .false, + depth_compare: CompareFunction = .always, + stencil_front: StencilFaceState = .{}, + stencil_back: StencilFaceState = .{}, + stencil_read_mask: u32 = 0xFFFFFFFF, + stencil_write_mask: u32 = 0xFFFFFFFF, + depth_bias: i32 = 0, + depth_bias_slope_scale: f32 = 0.0, + depth_bias_clamp: f32 = 0.0, +}; + +pub const ImageCopyBuffer = extern struct { + next_in_chain: ?*const ChainedStruct = null, + layout: Texture.DataLayout, + buffer: *Buffer, +}; + +pub const ImageCopyExternalTexture = extern struct { + next_in_chain: ?*const ChainedStruct = null, + external_texture: *ExternalTexture, + origin: Origin3D, + natural_size: Extent2D, +}; + +pub const ImageCopyTexture = extern struct { + next_in_chain: ?*const ChainedStruct = null, + texture: *Texture, + mip_level: u32 = 0, + origin: Origin3D = .{}, + aspect: Texture.Aspect = .all, +}; + +pub const ProgrammableStageDescriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + module: *ShaderModule, + entry_point: [*:0]const u8, + constant_count: usize = 0, + constants: ?[*]const ConstantEntry = null, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: ?*const ChainedStruct = null, + module: *ShaderModule, + entry_point: [*:0]const u8, + constants: ?[]const ConstantEntry = null, + }) ProgrammableStageDescriptor { + return .{ + .next_in_chain = v.next_in_chain, + .module = v.module, + .entry_point = v.entry_point, + .constant_count = if (v.constants) |e| e.len else 0, + .constants = if (v.constants) |e| e.ptr else null, + }; + } +}; + +pub const RenderPassColorAttachment = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + dawn_render_pass_color_attachment_render_to_single_sampled: *const dawn.RenderPassColorAttachmentRenderToSingleSampled, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + view: ?*TextureView = null, + resolve_target: ?*TextureView = null, + load_op: LoadOp, + store_op: StoreOp, + clear_value: Color, +}; + +pub const RequiredLimits = extern struct { + next_in_chain: ?*const ChainedStruct = null, + limits: Limits, +}; + +/// Used to query limits from a Device or Adapter. Can be used as follows: +/// +/// ``` +/// var supported: gpu.SupportedLimits = .{}; +/// if (!adapter.getLimits(&supported)) @panic("unsupported options"); +/// ``` +/// +/// Note that `getLimits` can only fail if `next_in_chain` options are invalid. +pub const SupportedLimits = extern struct { + next_in_chain: ?*ChainedStructOut = null, + limits: Limits = undefined, +}; + +pub const VertexBufferLayout = extern struct { + array_stride: u64, + step_mode: VertexStepMode = .vertex, + attribute_count: usize, + attributes: ?[*]const VertexAttribute = null, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + array_stride: u64, + step_mode: VertexStepMode = .vertex, + attributes: ?[]const VertexAttribute = null, + }) VertexBufferLayout { + return .{ + .array_stride = v.array_stride, + .step_mode = v.step_mode, + .attribute_count = if (v.attributes) |e| e.len else 0, + .attributes = if (v.attributes) |e| e.ptr else null, + }; + } +}; + +pub const ColorTargetState = extern struct { + next_in_chain: ?*const ChainedStruct = null, + format: Texture.Format, + blend: ?*const BlendState = null, + write_mask: ColorWriteMaskFlags = ColorWriteMaskFlags.all, +}; + +pub const VertexState = extern struct { + next_in_chain: ?*const ChainedStruct = null, + module: *ShaderModule, + entry_point: [*:0]const u8, + constant_count: usize = 0, + constants: ?[*]const ConstantEntry = null, + buffer_count: usize = 0, + buffers: ?[*]const VertexBufferLayout = null, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: ?*const ChainedStruct = null, + module: *ShaderModule, + entry_point: [*:0]const u8, + constants: ?[]const ConstantEntry = null, + buffers: ?[]const VertexBufferLayout = null, + }) VertexState { + return .{ + .next_in_chain = v.next_in_chain, + .module = v.module, + .entry_point = v.entry_point, + .constant_count = if (v.constants) |e| e.len else 0, + .constants = if (v.constants) |e| e.ptr else null, + .buffer_count = if (v.buffers) |e| e.len else 0, + .buffers = if (v.buffers) |e| e.ptr else null, + }; + } +}; + +pub const FragmentState = extern struct { + next_in_chain: ?*const ChainedStruct = null, + module: *ShaderModule, + entry_point: [*:0]const u8, + constant_count: usize = 0, + constants: ?[*]const ConstantEntry = null, + target_count: usize, + targets: ?[*]const ColorTargetState = null, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: ?*const ChainedStruct = null, + module: *ShaderModule, + entry_point: [*:0]const u8, + constants: ?[]const ConstantEntry = null, + targets: ?[]const ColorTargetState = null, + }) FragmentState { + return .{ + .next_in_chain = v.next_in_chain, + .module = v.module, + .entry_point = v.entry_point, + .constant_count = if (v.constants) |e| e.len else 0, + .constants = if (v.constants) |e| e.ptr else null, + .target_count = if (v.targets) |e| e.len else 0, + .targets = if (v.targets) |e| e.ptr else null, + }; + } +}; + +test "BackendType name" { + try testing.expectEqualStrings("Vulkan", BackendType.vulkan.name()); +} + +test "enum name" { + try testing.expectEqualStrings("front", @tagName(CullMode.front)); +} + +pub const CompilationInfoCallback = *const fn ( + status: CompilationInfoRequestStatus, + compilation_info: *const CompilationInfo, + userdata: ?*anyopaque, +) callconv(.C) void; + +pub const ErrorCallback = *const fn ( + typ: ErrorType, + message: [*:0]const u8, + userdata: ?*anyopaque, +) callconv(.C) void; + +pub const LoggingCallback = *const fn ( + typ: LoggingType, + message: [*:0]const u8, + userdata: ?*anyopaque, +) callconv(.C) void; + +pub const RequestDeviceCallback = *const fn ( + status: RequestDeviceStatus, + device: *Device, + message: ?[*:0]const u8, + userdata: ?*anyopaque, +) callconv(.C) void; + +pub const RequestAdapterCallback = *const fn ( + status: RequestAdapterStatus, + adapter: ?*Adapter, + message: ?[*:0]const u8, + userdata: ?*anyopaque, +) callconv(.C) void; + +pub const CreateComputePipelineAsyncCallback = *const fn ( + status: CreatePipelineAsyncStatus, + compute_pipeline: ?*ComputePipeline, + message: ?[*:0]const u8, + userdata: ?*anyopaque, +) callconv(.C) void; + +pub const CreateRenderPipelineAsyncCallback = *const fn ( + status: CreatePipelineAsyncStatus, + pipeline: ?*RenderPipeline, + message: ?[*:0]const u8, + userdata: ?*anyopaque, +) callconv(.C) void; + +test { + std.testing.refAllDeclsRecursive(@This()); +} diff --git a/src/gpu/pipeline_layout.zig b/src/gpu/pipeline_layout.zig new file mode 100644 index 00000000..3b4185a5 --- /dev/null +++ b/src/gpu/pipeline_layout.zig @@ -0,0 +1,38 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; +const Impl = @import("interface.zig").Impl; + +pub const PipelineLayout = opaque { + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + bind_group_layout_count: usize = 0, + bind_group_layouts: ?[*]const *BindGroupLayout = null, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + bind_group_layouts: ?[]const *BindGroupLayout = null, + }) Descriptor { + return .{ + .next_in_chain = v.next_in_chain, + .label = v.label, + .bind_group_layout_count = if (v.bind_group_layouts) |e| e.len else 0, + .bind_group_layouts = if (v.bind_group_layouts) |e| e.ptr else null, + }; + } + }; + + pub inline fn setLabel(pipeline_layout: *PipelineLayout, label: [*:0]const u8) void { + Impl.pipelineLayoutSetLabel(pipeline_layout, label); + } + + pub inline fn reference(pipeline_layout: *PipelineLayout) void { + Impl.pipelineLayoutReference(pipeline_layout); + } + + pub inline fn release(pipeline_layout: *PipelineLayout) void { + Impl.pipelineLayoutRelease(pipeline_layout); + } +}; diff --git a/src/gpu/query_set.zig b/src/gpu/query_set.zig new file mode 100644 index 00000000..e0fa58b5 --- /dev/null +++ b/src/gpu/query_set.zig @@ -0,0 +1,57 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const PipelineStatisticName = @import("main.zig").PipelineStatisticName; +const QueryType = @import("main.zig").QueryType; +const Impl = @import("interface.zig").Impl; + +pub const QuerySet = opaque { + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + type: QueryType, + count: u32, + pipeline_statistics: ?[*]const PipelineStatisticName = null, + pipeline_statistics_count: usize = 0, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + type: QueryType, + count: u32, + pipeline_statistics: ?[]const PipelineStatisticName = null, + }) Descriptor { + return .{ + .next_in_chain = v.next_in_chain, + .label = v.label, + .type = v.type, + .count = v.count, + .pipeline_statistics_count = if (v.pipeline_statistics) |e| e.len else 0, + .pipeline_statistics = if (v.pipeline_statistics) |e| e.ptr else null, + }; + } + }; + + pub inline fn destroy(query_set: *QuerySet) void { + Impl.querySetDestroy(query_set); + } + + pub inline fn getCount(query_set: *QuerySet) u32 { + return Impl.querySetGetCount(query_set); + } + + pub inline fn getType(query_set: *QuerySet) QueryType { + return Impl.querySetGetType(query_set); + } + + pub inline fn setLabel(query_set: *QuerySet, label: [*:0]const u8) void { + Impl.querySetSetLabel(query_set, label); + } + + pub inline fn reference(query_set: *QuerySet) void { + Impl.querySetReference(query_set); + } + + pub inline fn release(query_set: *QuerySet) void { + Impl.querySetRelease(query_set); + } +}; diff --git a/src/gpu/queue.zig b/src/gpu/queue.zig new file mode 100644 index 00000000..18f2921c --- /dev/null +++ b/src/gpu/queue.zig @@ -0,0 +1,101 @@ +const std = @import("std"); +const CommandBuffer = @import("command_buffer.zig").CommandBuffer; +const Buffer = @import("buffer.zig").Buffer; +const Texture = @import("texture.zig").Texture; +const ImageCopyTexture = @import("main.zig").ImageCopyTexture; +const ImageCopyExternalTexture = @import("main.zig").ImageCopyExternalTexture; +const ChainedStruct = @import("main.zig").ChainedStruct; +const Extent3D = @import("main.zig").Extent3D; +const CopyTextureForBrowserOptions = @import("main.zig").CopyTextureForBrowserOptions; +const Impl = @import("interface.zig").Impl; + +pub const Queue = opaque { + pub const WorkDoneCallback = *const fn ( + status: WorkDoneStatus, + userdata: ?*anyopaque, + ) callconv(.C) void; + + pub const WorkDoneStatus = enum(u32) { + success = 0x00000000, + err = 0x00000001, + unknown = 0x00000002, + device_lost = 0x00000003, + }; + + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + }; + + pub inline fn copyExternalTextureForBrowser(queue: *Queue, source: *const ImageCopyExternalTexture, destination: *const ImageCopyTexture, copy_size: *const Extent3D, options: *const CopyTextureForBrowserOptions) void { + Impl.queueCopyExternalTextureForBrowser(queue, source, destination, copy_size, options); + } + + pub inline fn copyTextureForBrowser(queue: *Queue, source: *const ImageCopyTexture, destination: *const ImageCopyTexture, copy_size: *const Extent3D, options: *const CopyTextureForBrowserOptions) void { + Impl.queueCopyTextureForBrowser(queue, source, destination, copy_size, options); + } + + // TODO: dawn: does not allow unsetting this callback to null + pub inline fn onSubmittedWorkDone( + queue: *Queue, + signal_value: u64, + context: anytype, + comptime callback: fn (ctx: @TypeOf(context), status: WorkDoneStatus) callconv(.Inline) void, + ) void { + const Context = @TypeOf(context); + const Helper = struct { + pub fn cCallback(status: WorkDoneStatus, userdata: ?*anyopaque) callconv(.C) void { + callback(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), status); + } + }; + Impl.queueOnSubmittedWorkDone(queue, signal_value, Helper.cCallback, if (Context == void) null else context); + } + + pub inline fn setLabel(queue: *Queue, label: [*:0]const u8) void { + Impl.queueSetLabel(queue, label); + } + + pub inline fn submit(queue: *Queue, commands: []const *const CommandBuffer) void { + Impl.queueSubmit(queue, commands.len, commands.ptr); + } + + pub inline fn writeBuffer( + queue: *Queue, + buffer: *Buffer, + buffer_offset_bytes: u64, + data_slice: anytype, + ) void { + Impl.queueWriteBuffer( + queue, + buffer, + buffer_offset_bytes, + @as(*const anyopaque, @ptrCast(std.mem.sliceAsBytes(data_slice).ptr)), + data_slice.len * @sizeOf(std.meta.Elem(@TypeOf(data_slice))), + ); + } + + pub inline fn writeTexture( + queue: *Queue, + destination: *const ImageCopyTexture, + data_layout: *const Texture.DataLayout, + write_size: *const Extent3D, + data_slice: anytype, + ) void { + Impl.queueWriteTexture( + queue, + destination, + @as(*const anyopaque, @ptrCast(std.mem.sliceAsBytes(data_slice).ptr)), + @as(usize, @intCast(data_slice.len)) * @sizeOf(std.meta.Elem(@TypeOf(data_slice))), + data_layout, + write_size, + ); + } + + pub inline fn reference(queue: *Queue) void { + Impl.queueReference(queue); + } + + pub inline fn release(queue: *Queue) void { + Impl.queueRelease(queue); + } +}; diff --git a/src/gpu/render_bundle.zig b/src/gpu/render_bundle.zig new file mode 100644 index 00000000..36914e71 --- /dev/null +++ b/src/gpu/render_bundle.zig @@ -0,0 +1,21 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const Impl = @import("interface.zig").Impl; + +pub const RenderBundle = opaque { + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + }; + + pub inline fn setLabel(render_bundle: *RenderBundle, label: [*:0]const u8) void { + Impl.renderBundleSetLabel(render_bundle, label); + } + + pub inline fn reference(render_bundle: *RenderBundle) void { + Impl.renderBundleReference(render_bundle); + } + + pub inline fn release(render_bundle: *RenderBundle) void { + Impl.renderBundleRelease(render_bundle); + } +}; diff --git a/src/gpu/render_bundle_encoder.zig b/src/gpu/render_bundle_encoder.zig new file mode 100644 index 00000000..1d7b0399 --- /dev/null +++ b/src/gpu/render_bundle_encoder.zig @@ -0,0 +1,122 @@ +const Texture = @import("texture.zig").Texture; +const Buffer = @import("buffer.zig").Buffer; +const BindGroup = @import("bind_group.zig").BindGroup; +const RenderPipeline = @import("render_pipeline.zig").RenderPipeline; +const RenderBundle = @import("render_bundle.zig").RenderBundle; +const Bool32 = @import("main.zig").Bool32; +const ChainedStruct = @import("main.zig").ChainedStruct; +const IndexFormat = @import("main.zig").IndexFormat; +const Impl = @import("interface.zig").Impl; + +pub const RenderBundleEncoder = opaque { + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + color_formats_count: usize = 0, + color_formats: ?[*]const Texture.Format = null, + depth_stencil_format: Texture.Format = .undefined, + sample_count: u32 = 1, + depth_read_only: Bool32 = .false, + stencil_read_only: Bool32 = .false, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + color_formats: ?[]const Texture.Format = null, + depth_stencil_format: Texture.Format = .undefined, + sample_count: u32 = 1, + depth_read_only: bool = false, + stencil_read_only: bool = false, + }) Descriptor { + return .{ + .next_in_chain = v.next_in_chain, + .label = v.label, + .color_formats_count = if (v.color_formats) |e| e.len else 0, + .color_formats = if (v.color_formats) |e| e.ptr else null, + .depth_stencil_format = v.depth_stencil_format, + .sample_count = v.sample_count, + .depth_read_only = Bool32.from(v.depth_read_only), + .stencil_read_only = Bool32.from(v.stencil_read_only), + }; + } + }; + + /// Default `instance_count`: 1 + /// Default `first_vertex`: 0 + /// Default `first_instance`: 0 + pub inline fn draw(render_bundle_encoder: *RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { + Impl.renderBundleEncoderDraw(render_bundle_encoder, vertex_count, instance_count, first_vertex, first_instance); + } + + /// Default `instance_count`: 1 + /// Default `first_index`: 0 + /// Default `base_vertex`: 0 + /// Default `first_instance`: 0 + pub inline fn drawIndexed(render_bundle_encoder: *RenderBundleEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { + Impl.renderBundleEncoderDrawIndexed(render_bundle_encoder, index_count, instance_count, first_index, base_vertex, first_instance); + } + + pub inline fn drawIndexedIndirect(render_bundle_encoder: *RenderBundleEncoder, indirect_buffer: *Buffer, indirect_offset: u64) void { + Impl.renderBundleEncoderDrawIndexedIndirect(render_bundle_encoder, indirect_buffer, indirect_offset); + } + + pub inline fn drawIndirect(render_bundle_encoder: *RenderBundleEncoder, indirect_buffer: *Buffer, indirect_offset: u64) void { + Impl.renderBundleEncoderDrawIndirect(render_bundle_encoder, indirect_buffer, indirect_offset); + } + + pub inline fn finish(render_bundle_encoder: *RenderBundleEncoder, descriptor: ?*const RenderBundle.Descriptor) *RenderBundle { + return Impl.renderBundleEncoderFinish(render_bundle_encoder, descriptor); + } + + pub inline fn insertDebugMarker(render_bundle_encoder: *RenderBundleEncoder, marker_label: [*:0]const u8) void { + Impl.renderBundleEncoderInsertDebugMarker(render_bundle_encoder, marker_label); + } + + pub inline fn popDebugGroup(render_bundle_encoder: *RenderBundleEncoder) void { + Impl.renderBundleEncoderPopDebugGroup(render_bundle_encoder); + } + + pub inline fn pushDebugGroup(render_bundle_encoder: *RenderBundleEncoder, group_label: [*:0]const u8) void { + Impl.renderBundleEncoderPushDebugGroup(render_bundle_encoder, group_label); + } + + /// Default `dynamic_offsets`: `null` + pub inline fn setBindGroup(render_bundle_encoder: *RenderBundleEncoder, group_index: u32, group: *BindGroup, dynamic_offsets: ?[]const u32) void { + Impl.renderBundleEncoderSetBindGroup( + render_bundle_encoder, + group_index, + group, + if (dynamic_offsets) |v| v.len else 0, + if (dynamic_offsets) |v| v.ptr else null, + ); + } + + /// Default `offset`: 0 + /// Default `size`: `gpu.whole_size` + pub inline fn setIndexBuffer(render_bundle_encoder: *RenderBundleEncoder, buffer: *Buffer, format: IndexFormat, offset: u64, size: u64) void { + Impl.renderBundleEncoderSetIndexBuffer(render_bundle_encoder, buffer, format, offset, size); + } + + pub inline fn setLabel(render_bundle_encoder: *RenderBundleEncoder, label: [*:0]const u8) void { + Impl.renderBundleEncoderSetLabel(render_bundle_encoder, label); + } + + pub inline fn setPipeline(render_bundle_encoder: *RenderBundleEncoder, pipeline: *RenderPipeline) void { + Impl.renderBundleEncoderSetPipeline(render_bundle_encoder, pipeline); + } + + /// Default `offset`: 0 + /// Default `size`: `gpu.whole_size` + pub inline fn setVertexBuffer(render_bundle_encoder: *RenderBundleEncoder, slot: u32, buffer: *Buffer, offset: u64, size: u64) void { + Impl.renderBundleEncoderSetVertexBuffer(render_bundle_encoder, slot, buffer, offset, size); + } + + pub inline fn reference(render_bundle_encoder: *RenderBundleEncoder) void { + Impl.renderBundleEncoderReference(render_bundle_encoder); + } + + pub inline fn release(render_bundle_encoder: *RenderBundleEncoder) void { + Impl.renderBundleEncoderRelease(render_bundle_encoder); + } +}; diff --git a/src/gpu/render_pass_encoder.zig b/src/gpu/render_pass_encoder.zig new file mode 100644 index 00000000..ff8a3a4c --- /dev/null +++ b/src/gpu/render_pass_encoder.zig @@ -0,0 +1,128 @@ +const Buffer = @import("buffer.zig").Buffer; +const RenderBundle = @import("render_bundle.zig").RenderBundle; +const BindGroup = @import("bind_group.zig").BindGroup; +const RenderPipeline = @import("render_pipeline.zig").RenderPipeline; +const QuerySet = @import("query_set.zig").QuerySet; +const Color = @import("main.zig").Color; +const IndexFormat = @import("main.zig").IndexFormat; +const Impl = @import("interface.zig").Impl; + +pub const RenderPassEncoder = opaque { + pub inline fn beginOcclusionQuery(render_pass_encoder: *RenderPassEncoder, query_index: u32) void { + Impl.renderPassEncoderBeginOcclusionQuery(render_pass_encoder, query_index); + } + + /// Default `instance_count`: 1 + /// Default `first_vertex`: 0 + /// Default `first_instance`: 0 + pub inline fn draw(render_pass_encoder: *RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { + Impl.renderPassEncoderDraw(render_pass_encoder, vertex_count, instance_count, first_vertex, first_instance); + } + + /// Default `instance_count`: 1 + /// Default `first_index`: 0 + /// Default `base_vertex`: 0 + /// Default `first_instance`: 0 + pub inline fn drawIndexed(render_pass_encoder: *RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { + Impl.renderPassEncoderDrawIndexed(render_pass_encoder, index_count, instance_count, first_index, base_vertex, first_instance); + } + + pub inline fn drawIndexedIndirect(render_pass_encoder: *RenderPassEncoder, indirect_buffer: *Buffer, indirect_offset: u64) void { + Impl.renderPassEncoderDrawIndexedIndirect(render_pass_encoder, indirect_buffer, indirect_offset); + } + + pub inline fn drawIndirect(render_pass_encoder: *RenderPassEncoder, indirect_buffer: *Buffer, indirect_offset: u64) void { + Impl.renderPassEncoderDrawIndirect(render_pass_encoder, indirect_buffer, indirect_offset); + } + + pub inline fn end(render_pass_encoder: *RenderPassEncoder) void { + Impl.renderPassEncoderEnd(render_pass_encoder); + } + + pub inline fn endOcclusionQuery(render_pass_encoder: *RenderPassEncoder) void { + Impl.renderPassEncoderEndOcclusionQuery(render_pass_encoder); + } + + pub inline fn executeBundles( + render_pass_encoder: *RenderPassEncoder, + bundles: []*const RenderBundle, + ) void { + Impl.renderPassEncoderExecuteBundles( + render_pass_encoder, + bundles.len, + bundles.ptr, + ); + } + + pub inline fn insertDebugMarker(render_pass_encoder: *RenderPassEncoder, marker_label: [*:0]const u8) void { + Impl.renderPassEncoderInsertDebugMarker(render_pass_encoder, marker_label); + } + + pub inline fn popDebugGroup(render_pass_encoder: *RenderPassEncoder) void { + Impl.renderPassEncoderPopDebugGroup(render_pass_encoder); + } + + pub inline fn pushDebugGroup(render_pass_encoder: *RenderPassEncoder, group_label: [*:0]const u8) void { + Impl.renderPassEncoderPushDebugGroup(render_pass_encoder, group_label); + } + + /// Default `dynamic_offsets_count`: 0 + /// Default `dynamic_offsets`: `null` + pub inline fn setBindGroup(render_pass_encoder: *RenderPassEncoder, group_index: u32, group: *BindGroup, dynamic_offsets: ?[]const u32) void { + Impl.renderPassEncoderSetBindGroup( + render_pass_encoder, + group_index, + group, + if (dynamic_offsets) |v| v.len else 0, + if (dynamic_offsets) |v| v.ptr else null, + ); + } + + pub inline fn setBlendConstant(render_pass_encoder: *RenderPassEncoder, color: *const Color) void { + Impl.renderPassEncoderSetBlendConstant(render_pass_encoder, color); + } + + /// Default `offset`: 0 + /// Default `size`: `gpu.whole_size` + pub inline fn setIndexBuffer(render_pass_encoder: *RenderPassEncoder, buffer: *Buffer, format: IndexFormat, offset: u64, size: u64) void { + Impl.renderPassEncoderSetIndexBuffer(render_pass_encoder, buffer, format, offset, size); + } + + pub inline fn setLabel(render_pass_encoder: *RenderPassEncoder, label: [*:0]const u8) void { + Impl.renderPassEncoderSetLabel(render_pass_encoder, label); + } + + pub inline fn setPipeline(render_pass_encoder: *RenderPassEncoder, pipeline: *RenderPipeline) void { + Impl.renderPassEncoderSetPipeline(render_pass_encoder, pipeline); + } + + pub inline fn setScissorRect(render_pass_encoder: *RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) void { + Impl.renderPassEncoderSetScissorRect(render_pass_encoder, x, y, width, height); + } + + pub inline fn setStencilReference(render_pass_encoder: *RenderPassEncoder, _reference: u32) void { + Impl.renderPassEncoderSetStencilReference(render_pass_encoder, _reference); + } + + /// Default `offset`: 0 + /// Default `size`: `gpu.whole_size` + pub inline fn setVertexBuffer(render_pass_encoder: *RenderPassEncoder, slot: u32, buffer: *Buffer, offset: u64, size: u64) void { + Impl.renderPassEncoderSetVertexBuffer(render_pass_encoder, slot, buffer, offset, size); + } + + pub inline fn setViewport(render_pass_encoder: *RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32) void { + Impl.renderPassEncoderSetViewport(render_pass_encoder, x, y, width, height, min_depth, max_depth); + } + + pub inline fn writeTimestamp(render_pass_encoder: *RenderPassEncoder, query_set: *QuerySet, query_index: u32) void { + Impl.renderPassEncoderWriteTimestamp(render_pass_encoder, query_set, query_index); + } + + pub inline fn reference(render_pass_encoder: *RenderPassEncoder) void { + Impl.renderPassEncoderReference(render_pass_encoder); + } + + pub inline fn release(render_pass_encoder: *RenderPassEncoder) void { + Impl.renderPassEncoderRelease(render_pass_encoder); + } +}; diff --git a/src/gpu/render_pipeline.zig b/src/gpu/render_pipeline.zig new file mode 100644 index 00000000..25221299 --- /dev/null +++ b/src/gpu/render_pipeline.zig @@ -0,0 +1,38 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const DepthStencilState = @import("main.zig").DepthStencilState; +const MultisampleState = @import("main.zig").MultisampleState; +const VertexState = @import("main.zig").VertexState; +const PrimitiveState = @import("main.zig").PrimitiveState; +const FragmentState = @import("main.zig").FragmentState; +const PipelineLayout = @import("pipeline_layout.zig").PipelineLayout; +const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; +const Impl = @import("interface.zig").Impl; + +pub const RenderPipeline = opaque { + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + layout: ?*PipelineLayout = null, + vertex: VertexState, + primitive: PrimitiveState = .{}, + depth_stencil: ?*const DepthStencilState = null, + multisample: MultisampleState = .{}, + fragment: ?*const FragmentState = null, + }; + + pub inline fn getBindGroupLayout(render_pipeline: *RenderPipeline, group_index: u32) *BindGroupLayout { + return Impl.renderPipelineGetBindGroupLayout(render_pipeline, group_index); + } + + pub inline fn setLabel(render_pipeline: *RenderPipeline, label: [*:0]const u8) void { + Impl.renderPipelineSetLabel(render_pipeline, label); + } + + pub inline fn reference(render_pipeline: *RenderPipeline) void { + Impl.renderPipelineReference(render_pipeline); + } + + pub inline fn release(render_pipeline: *RenderPipeline) void { + Impl.renderPipelineRelease(render_pipeline); + } +}; diff --git a/src/gpu/sampler.zig b/src/gpu/sampler.zig new file mode 100644 index 00000000..7f9f5b57 --- /dev/null +++ b/src/gpu/sampler.zig @@ -0,0 +1,52 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const FilterMode = @import("main.zig").FilterMode; +const MipmapFilterMode = @import("main.zig").MipmapFilterMode; +const CompareFunction = @import("main.zig").CompareFunction; +const Impl = @import("interface.zig").Impl; + +pub const Sampler = opaque { + pub const AddressMode = enum(u32) { + repeat = 0x00000000, + mirror_repeat = 0x00000001, + clamp_to_edge = 0x00000002, + }; + + pub const BindingType = enum(u32) { + undefined = 0x00000000, + filtering = 0x00000001, + non_filtering = 0x00000002, + comparison = 0x00000003, + }; + + pub const BindingLayout = extern struct { + next_in_chain: ?*const ChainedStruct = null, + type: BindingType = .undefined, + }; + + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + address_mode_u: AddressMode = .clamp_to_edge, + address_mode_v: AddressMode = .clamp_to_edge, + address_mode_w: AddressMode = .clamp_to_edge, + mag_filter: FilterMode = .nearest, + min_filter: FilterMode = .nearest, + mipmap_filter: MipmapFilterMode = .nearest, + lod_min_clamp: f32 = 0.0, + lod_max_clamp: f32 = 32.0, + compare: CompareFunction = .undefined, + max_anisotropy: u16 = 1, + }; + + pub inline fn setLabel(sampler: *Sampler, label: [*:0]const u8) void { + Impl.samplerSetLabel(sampler, label); + } + + pub inline fn reference(sampler: *Sampler) void { + Impl.samplerReference(sampler); + } + + pub inline fn release(sampler: *Sampler) void { + Impl.samplerRelease(sampler); + } +}; diff --git a/src/gpu/shader_module.zig b/src/gpu/shader_module.zig new file mode 100644 index 00000000..41b41797 --- /dev/null +++ b/src/gpu/shader_module.zig @@ -0,0 +1,69 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const CompilationInfoCallback = @import("main.zig").CompilationInfoCallback; +const CompilationInfoRequestStatus = @import("main.zig").CompilationInfoRequestStatus; +const CompilationInfo = @import("main.zig").CompilationInfo; +const Impl = @import("interface.zig").Impl; +const dawn = @import("dawn.zig"); + +pub const ShaderModule = opaque { + pub const Descriptor = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + spirv_descriptor: ?*const SPIRVDescriptor, + wgsl_descriptor: ?*const WGSLDescriptor, + dawn_shader_module_spirv_options_descriptor: ?*const dawn.ShaderModuleSPIRVOptionsDescriptor, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*:0]const u8 = null, + }; + + pub const SPIRVDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shader_module_spirv_descriptor }, + code_size: u32, + code: [*]const u32, + }; + + pub const WGSLDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shader_module_wgsl_descriptor }, + code: [*:0]const u8, + }; + + pub inline fn getCompilationInfo( + shader_module: *ShaderModule, + context: anytype, + comptime callback: fn ( + ctx: @TypeOf(context), + status: CompilationInfoRequestStatus, + compilation_info: *const CompilationInfo, + ) callconv(.Inline) void, + ) void { + const Context = @TypeOf(context); + const Helper = struct { + pub fn cCallback( + status: CompilationInfoRequestStatus, + compilation_info: *const CompilationInfo, + userdata: ?*anyopaque, + ) callconv(.C) void { + callback( + if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), + status, + compilation_info, + ); + } + }; + Impl.shaderModuleGetCompilationInfo(shader_module, Helper.cCallback, if (Context == void) null else context); + } + + pub inline fn setLabel(shader_module: *ShaderModule, label: [*:0]const u8) void { + Impl.shaderModuleSetLabel(shader_module, label); + } + + pub inline fn reference(shader_module: *ShaderModule) void { + Impl.shaderModuleReference(shader_module); + } + + pub inline fn release(shader_module: *ShaderModule) void { + Impl.shaderModuleRelease(shader_module); + } +}; diff --git a/src/gpu/shared_fence.zig b/src/gpu/shared_fence.zig new file mode 100644 index 00000000..08d42e9c --- /dev/null +++ b/src/gpu/shared_fence.zig @@ -0,0 +1,91 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const ChainedStructOut = @import("main.zig").ChainedStructOut; + +pub const SharedFence = opaque { + pub const Type = enum(u32) { + shared_fence_type_undefined = 0x00000000, + shared_fence_type_vk_semaphore_opaque_fd = 0x00000001, + shared_fence_type_vk_semaphore_sync_fd = 0x00000002, + shared_fence_type_vk_semaphore_zircon_handle = 0x00000003, + shared_fence_type_dxgi_shared_handle = 0x00000004, + shared_fence_type_mtl_shared_event = 0x00000005, + }; + + pub const Descriptor = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + vk_semaphore_opaque_fd_descriptor: *const VkSemaphoreOpaqueFDDescriptor, + vk_semaphore_sync_fd_descriptor: *const VkSemaphoreSyncFDDescriptor, + vk_semaphore_zircon_handle_descriptor: *const VkSemaphoreZirconHandleDescriptor, + dxgi_shared_handle_descriptor: *const DXGISharedHandleDescriptor, + mtl_shared_event_descriptor: *const MTLSharedEventDescriptor, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*]const u8, + }; + + pub const DXGISharedHandleDescriptor = extern struct { + chain: ChainedStruct, + handle: *anyopaque, + }; + + pub const DXGISharedHandleExportInfo = extern struct { + chain: ChainedStructOut, + handle: *anyopaque, + }; + + pub const ExportInfo = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStructOut, + dxgi_shared_handle_export_info: *const DXGISharedHandleExportInfo, + mtl_shared_event_export_info: *const MTLSharedEventExportInfo, + vk_semaphore_opaque_fd_export_info: *const VkSemaphoreOpaqueFDExportInfo, + vk_semaphore_sync_fd_export_info: *const VkSemaphoreSyncFDExportInfo, + vk_semaphore_zircon_handle_export_info: *const VkSemaphoreZirconHandleExportInfo, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + type: Type, + }; + + pub const MTLSharedEventDescriptor = extern struct { + chain: ChainedStruct, + shared_event: *anyopaque, + }; + + pub const MTLSharedEventExportInfo = extern struct { + chain: ChainedStructOut, + shared_event: *anyopaque, + }; + + pub const VkSemaphoreOpaqueFDDescriptor = extern struct { + chain: ChainedStruct, + handle: c_int, + }; + + pub const VkSemaphoreOpaqueFDExportInfo = extern struct { + chain: ChainedStructOut, + handle: c_int, + }; + + pub const VkSemaphoreSyncFDDescriptor = extern struct { + chain: ChainedStruct, + handle: c_int, + }; + + pub const VkSemaphoreSyncFDExportInfo = extern struct { + chain: ChainedStructOut, + handle: c_int, + }; + + pub const VkSemaphoreZirconHandleDescriptor = extern struct { + chain: ChainedStruct, + handle: u32, + }; + + pub const VkSemaphoreZirconHandleExportInfo = extern struct { + chain: ChainedStructOut, + handle: u32, + }; +}; diff --git a/src/gpu/shared_texture_memory.zig b/src/gpu/shared_texture_memory.zig new file mode 100644 index 00000000..89e59c7b --- /dev/null +++ b/src/gpu/shared_texture_memory.zig @@ -0,0 +1,124 @@ +const Texture = @import("texture.zig").Texture; +const Bool32 = @import("main.zig").Bool32; +const Extent3D = @import("main.zig").Extent3D; +const SharedFence = @import("shared_fence.zig").SharedFence; +const ChainedStruct = @import("main.zig").ChainedStruct; +const ChainedStructOut = @import("main.zig").ChainedStructOut; + +pub const SharedTextureMemory = opaque { + pub const Properties = extern struct { + next_in_chain: *const ChainedStruct, + usage: Texture.UsageFlags, + size: Extent3D, + format: Texture.Format, + }; + + pub const VkImageDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_vk_image_descriptor }, + vk_format: i32, + vk_usage_flags: Texture.UsageFlags, + vk_extent3D: Extent3D, + }; + + pub const AHardwareBufferDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_a_hardware_buffer_descriptor }, + handle: *anyopaque, + }; + + pub const BeginAccessDescriptor = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + vk_image_layout_begin_state: *const VkImageLayoutBeginState, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + initialized: Bool32, + fence_count: usize, + fences: *const SharedFence, + signaled_values: *const u64, + }; + + pub const Descriptor = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + a_hardware_buffer_descriptor: *const AHardwareBufferDescriptor, + dma_buf_descriptor: *const DmaBufDescriptor, + dxgi_shared_handle_descriptor: *const DXGISharedHandleDescriptor, + egl_image_descriptor: *const EGLImageDescriptor, + io_surface_descriptor: *const IOSurfaceDescriptor, + opaque_fd_descriptor: *const OpaqueFDDescriptor, + vk_dedicated_allocation_descriptor: *const VkDedicatedAllocationDescriptor, + zircon_handle_descriptor: *const ZirconHandleDescriptor, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*]const u8, + }; + + pub const DmaBufDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_dma_buf_descriptor }, + memory_fd: c_int, + allocation_size: u64, + drm_modifier: u64, + plane_count: usize, + plane_offsets: *const u64, + plane_strides: *const u32, + }; + + pub const DXGISharedHandleDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_dxgi_shared_handle_descriptor }, + handle: *anyopaque, + }; + + pub const EGLImageDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_egl_image_descriptor }, + image: *anyopaque, + }; + + pub const EndAccessState = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + vk_image_layout_end_state: *const VkImageLayoutEndState, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + initialized: Bool32, + fence_count: usize, + fences: *const SharedFence, + signaled_values: *const u64, + }; + + pub const IOSurfaceDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_io_surface_descriptor }, + ioSurface: *anyopaque, + }; + + pub const OpaqueFDDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_opaque_fd_descriptor }, + memory_fd: c_int, + allocation_size: u64, + }; + + pub const VkDedicatedAllocationDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_vk_dedicated_allocation_descriptor }, + dedicated_allocation: Bool32, + }; + + pub const VkImageLayoutBeginState = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_vk_image_layout_begin_state }, + old_layout: i32, + new_layout: i32, + }; + + pub const VkImageLayoutEndState = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_vk_image_layout_end_state }, + old_layout: i32, + new_layout: i32, + }; + + pub const ZirconHandleDescriptor = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_zircon_handle_descriptor }, + memory_fd: u32, + allocation_size: u64, + }; +}; diff --git a/src/gpu/surface.zig b/src/gpu/surface.zig new file mode 100644 index 00000000..93655a18 --- /dev/null +++ b/src/gpu/surface.zig @@ -0,0 +1,72 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const Impl = @import("interface.zig").Impl; + +pub const Surface = opaque { + pub const Descriptor = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + from_android_native_window: *const DescriptorFromAndroidNativeWindow, + from_canvas_html_selector: *const DescriptorFromCanvasHTMLSelector, + from_metal_layer: *const DescriptorFromMetalLayer, + from_wayland_surface: *const DescriptorFromWaylandSurface, + from_windows_core_window: *const DescriptorFromWindowsCoreWindow, + from_windows_hwnd: *const DescriptorFromWindowsHWND, + from_windows_swap_chain_panel: *const DescriptorFromWindowsSwapChainPanel, + from_xlib_window: *const DescriptorFromXlibWindow, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*:0]const u8 = null, + }; + + pub const DescriptorFromAndroidNativeWindow = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_android_native_window }, + window: *anyopaque, + }; + + pub const DescriptorFromCanvasHTMLSelector = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_canvas_html_selector }, + selector: [*:0]const u8, + }; + + pub const DescriptorFromMetalLayer = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_metal_layer }, + layer: *anyopaque, + }; + + pub const DescriptorFromWaylandSurface = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_wayland_surface }, + display: *anyopaque, + surface: *anyopaque, + }; + + pub const DescriptorFromWindowsCoreWindow = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_windows_core_window }, + core_window: *anyopaque, + }; + + pub const DescriptorFromWindowsHWND = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_windows_hwnd }, + hinstance: *anyopaque, + hwnd: *anyopaque, + }; + + pub const DescriptorFromWindowsSwapChainPanel = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_windows_swap_chain_panel }, + swap_chain_panel: *anyopaque, + }; + + pub const DescriptorFromXlibWindow = extern struct { + chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_xlib_window }, + display: *anyopaque, + window: u32, + }; + + pub inline fn reference(surface: *Surface) void { + Impl.surfaceReference(surface); + } + + pub inline fn release(surface: *Surface) void { + Impl.surfaceRelease(surface); + } +}; diff --git a/src/gpu/swap_chain.zig b/src/gpu/swap_chain.zig new file mode 100644 index 00000000..e846a662 --- /dev/null +++ b/src/gpu/swap_chain.zig @@ -0,0 +1,37 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const PresentMode = @import("main.zig").PresentMode; +const Texture = @import("texture.zig").Texture; +const TextureView = @import("texture_view.zig").TextureView; +const Impl = @import("interface.zig").Impl; + +pub const SwapChain = opaque { + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + usage: Texture.UsageFlags, + format: Texture.Format, + width: u32, + height: u32, + present_mode: PresentMode, + }; + + pub inline fn getCurrentTexture(swap_chain: *SwapChain) ?*Texture { + return Impl.swapChainGetCurrentTexture(swap_chain); + } + + pub inline fn getCurrentTextureView(swap_chain: *SwapChain) ?*TextureView { + return Impl.swapChainGetCurrentTextureView(swap_chain); + } + + pub inline fn present(swap_chain: *SwapChain) void { + Impl.swapChainPresent(swap_chain); + } + + pub inline fn reference(swap_chain: *SwapChain) void { + Impl.swapChainReference(swap_chain); + } + + pub inline fn release(swap_chain: *SwapChain) void { + Impl.swapChainRelease(swap_chain); + } +}; diff --git a/src/gpu/texture.zig b/src/gpu/texture.zig new file mode 100644 index 00000000..fa8a744e --- /dev/null +++ b/src/gpu/texture.zig @@ -0,0 +1,266 @@ +const std = @import("std"); +const Bool32 = @import("main.zig").Bool32; +const ChainedStruct = @import("main.zig").ChainedStruct; +const TextureView = @import("texture_view.zig").TextureView; +const Extent3D = @import("main.zig").Extent3D; +const Impl = @import("interface.zig").Impl; +const types = @import("main.zig"); +const dawn = @import("dawn.zig"); + +pub const Texture = opaque { + pub const Aspect = enum(u32) { + all = 0x00000000, + stencil_only = 0x00000001, + depth_only = 0x00000002, + plane0_only = 0x00000003, + plane1_only = 0x00000004, + }; + + pub const Dimension = enum(u32) { + dimension_1d = 0x00000000, + dimension_2d = 0x00000001, + dimension_3d = 0x00000002, + }; + + pub const Format = enum(u32) { + undefined = 0x00000000, + r8_unorm = 0x00000001, + r8_snorm = 0x00000002, + r8_uint = 0x00000003, + r8_sint = 0x00000004, + r16_uint = 0x00000005, + r16_sint = 0x00000006, + r16_float = 0x00000007, + rg8_unorm = 0x00000008, + rg8_snorm = 0x00000009, + rg8_uint = 0x0000000a, + rg8_sint = 0x0000000b, + r32_float = 0x0000000c, + r32_uint = 0x0000000d, + r32_sint = 0x0000000e, + rg16_uint = 0x0000000f, + rg16_sint = 0x00000010, + rg16_float = 0x00000011, + rgba8_unorm = 0x00000012, + rgba8_unorm_srgb = 0x00000013, + rgba8_snorm = 0x00000014, + rgba8_uint = 0x00000015, + rgba8_sint = 0x00000016, + bgra8_unorm = 0x00000017, + bgra8_unorm_srgb = 0x00000018, + rgb10_a2_unorm = 0x00000019, + rg11_b10_ufloat = 0x0000001a, + rgb9_e5_ufloat = 0x0000001b, + rg32_float = 0x0000001c, + rg32_uint = 0x0000001d, + rg32_sint = 0x0000001e, + rgba16_uint = 0x0000001f, + rgba16_sint = 0x00000020, + rgba16_float = 0x00000021, + rgba32_float = 0x00000022, + rgba32_uint = 0x00000023, + rgba32_sint = 0x00000024, + stencil8 = 0x00000025, + depth16_unorm = 0x00000026, + depth24_plus = 0x00000027, + depth24_plus_stencil8 = 0x00000028, + depth32_float = 0x00000029, + depth32_float_stencil8 = 0x0000002a, + bc1_rgba_unorm = 0x0000002b, + bc1_rgba_unorm_srgb = 0x0000002c, + bc2_rgba_unorm = 0x0000002d, + bc2_rgba_unorm_srgb = 0x0000002e, + bc3_rgba_unorm = 0x0000002f, + bc3_rgba_unorm_srgb = 0x00000030, + bc4_runorm = 0x00000031, + bc4_rsnorm = 0x00000032, + bc5_rg_unorm = 0x00000033, + bc5_rg_snorm = 0x00000034, + bc6_hrgb_ufloat = 0x00000035, + bc6_hrgb_float = 0x00000036, + bc7_rgba_unorm = 0x00000037, + bc7_rgba_unorm_srgb = 0x00000038, + etc2_rgb8_unorm = 0x00000039, + etc2_rgb8_unorm_srgb = 0x0000003a, + etc2_rgb8_a1_unorm = 0x0000003b, + etc2_rgb8_a1_unorm_srgb = 0x0000003c, + etc2_rgba8_unorm = 0x0000003d, + etc2_rgba8_unorm_srgb = 0x0000003e, + eacr11_unorm = 0x0000003f, + eacr11_snorm = 0x00000040, + eacrg11_unorm = 0x00000041, + eacrg11_snorm = 0x00000042, + astc4x4_unorm = 0x00000043, + astc4x4_unorm_srgb = 0x00000044, + astc5x4_unorm = 0x00000045, + astc5x4_unorm_srgb = 0x00000046, + astc5x5_unorm = 0x00000047, + astc5x5_unorm_srgb = 0x00000048, + astc6x5_unorm = 0x00000049, + astc6x5_unorm_srgb = 0x0000004a, + astc6x6_unorm = 0x0000004b, + astc6x6_unorm_srgb = 0x0000004c, + astc8x5_unorm = 0x0000004d, + astc8x5_unorm_srgb = 0x0000004e, + astc8x6_unorm = 0x0000004f, + astc8x6_unorm_srgb = 0x00000050, + astc8x8_unorm = 0x00000051, + astc8x8_unorm_srgb = 0x00000052, + astc10x5_unorm = 0x00000053, + astc10x5_unorm_srgb = 0x00000054, + astc10x6_unorm = 0x00000055, + astc10x6_unorm_srgb = 0x00000056, + astc10x8_unorm = 0x00000057, + astc10x8_unorm_srgb = 0x00000058, + astc10x10_unorm = 0x00000059, + astc10x10_unorm_srgb = 0x0000005a, + astc12x10_unorm = 0x0000005b, + astc12x10_unorm_srgb = 0x0000005c, + astc12x12_unorm = 0x0000005d, + astc12x12_unorm_srgb = 0x0000005e, + r8_bg8_biplanar420_unorm = 0x0000005f, + }; + + pub const SampleType = enum(u32) { + undefined = 0x00000000, + float = 0x00000001, + unfilterable_float = 0x00000002, + depth = 0x00000003, + sint = 0x00000004, + uint = 0x00000005, + }; + + pub const UsageFlags = packed struct(u32) { + copy_src: bool = false, + copy_dst: bool = false, + texture_binding: bool = false, + storage_binding: bool = false, + render_attachment: bool = false, + transient_attachment: bool = false, + + _padding: u26 = 0, + + comptime { + std.debug.assert( + @sizeOf(@This()) == @sizeOf(u32) and + @bitSizeOf(@This()) == @bitSizeOf(u32), + ); + } + + pub const none = UsageFlags{}; + + pub fn equal(a: UsageFlags, b: UsageFlags) bool { + return @as(u6, @truncate(@as(u32, @bitCast(a)))) == @as(u6, @truncate(@as(u32, @bitCast(b)))); + } + }; + + pub const BindingLayout = extern struct { + next_in_chain: ?*const ChainedStruct = null, + sample_type: SampleType = .undefined, + view_dimension: TextureView.Dimension = .dimension_undefined, + multisampled: Bool32 = .false, + }; + + pub const DataLayout = extern struct { + next_in_chain: ?*const ChainedStruct = null, + offset: u64 = 0, + bytes_per_row: u32 = types.copy_stride_undefined, + rows_per_image: u32 = types.copy_stride_undefined, + }; + + pub const Descriptor = extern struct { + pub const NextInChain = extern union { + generic: ?*const ChainedStruct, + dawn_texture_internal_usage_descriptor: *const dawn.TextureInternalUsageDescriptor, + }; + + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*:0]const u8 = null, + usage: UsageFlags, + dimension: Dimension = .dimension_2d, + size: Extent3D, + format: Format, + mip_level_count: u32 = 1, + sample_count: u32 = 1, + view_format_count: usize = 0, + view_formats: ?[*]const Format = null, + + /// Provides a slightly friendlier Zig API to initialize this structure. + pub inline fn init(v: struct { + next_in_chain: NextInChain = .{ .generic = null }, + label: ?[*:0]const u8 = null, + usage: UsageFlags, + dimension: Dimension = .dimension_2d, + size: Extent3D, + format: Format, + mip_level_count: u32 = 1, + sample_count: u32 = 1, + view_formats: ?[]const Format = null, + }) Descriptor { + return .{ + .next_in_chain = v.next_in_chain, + .label = v.label, + .usage = v.usage, + .dimension = v.dimension, + .size = v.size, + .format = v.format, + .mip_level_count = v.mip_level_count, + .sample_count = v.sample_count, + .view_format_count = if (v.view_formats) |e| e.len else 0, + .view_formats = if (v.view_formats) |e| e.ptr else null, + }; + } + }; + + pub inline fn createView(texture: *Texture, descriptor: ?*const TextureView.Descriptor) *TextureView { + return Impl.textureCreateView(texture, descriptor); + } + + pub inline fn destroy(texture: *Texture) void { + Impl.textureDestroy(texture); + } + + pub inline fn getDepthOrArrayLayers(texture: *Texture) u32 { + return Impl.textureGetDepthOrArrayLayers(texture); + } + + pub inline fn getDimension(texture: *Texture) Dimension { + return Impl.textureGetDimension(texture); + } + + pub inline fn getFormat(texture: *Texture) Format { + return Impl.textureGetFormat(texture); + } + + pub inline fn getHeight(texture: *Texture) u32 { + return Impl.textureGetHeight(texture); + } + + pub inline fn getMipLevelCount(texture: *Texture) u32 { + return Impl.textureGetMipLevelCount(texture); + } + + pub inline fn getSampleCount(texture: *Texture) u32 { + return Impl.textureGetSampleCount(texture); + } + + pub inline fn getUsage(texture: *Texture) UsageFlags { + return Impl.textureGetUsage(texture); + } + + pub inline fn getWidth(texture: *Texture) u32 { + return Impl.textureGetWidth(texture); + } + + pub inline fn setLabel(texture: *Texture, label: [*:0]const u8) void { + Impl.textureSetLabel(texture, label); + } + + pub inline fn reference(texture: *Texture) void { + Impl.textureReference(texture); + } + + pub inline fn release(texture: *Texture) void { + Impl.textureRelease(texture); + } +}; diff --git a/src/gpu/texture_view.zig b/src/gpu/texture_view.zig new file mode 100644 index 00000000..15033275 --- /dev/null +++ b/src/gpu/texture_view.zig @@ -0,0 +1,40 @@ +const ChainedStruct = @import("main.zig").ChainedStruct; +const Texture = @import("texture.zig").Texture; +const Impl = @import("interface.zig").Impl; +const types = @import("main.zig"); + +pub const TextureView = opaque { + pub const Dimension = enum(u32) { + dimension_undefined = 0x00000000, + dimension_1d = 0x00000001, + dimension_2d = 0x00000002, + dimension_2d_array = 0x00000003, + dimension_cube = 0x00000004, + dimension_cube_array = 0x00000005, + dimension_3d = 0x00000006, + }; + + pub const Descriptor = extern struct { + next_in_chain: ?*const ChainedStruct = null, + label: ?[*:0]const u8 = null, + format: Texture.Format = .undefined, + dimension: Dimension = .dimension_undefined, + base_mip_level: u32 = 0, + mip_level_count: u32 = types.mip_level_count_undefined, + base_array_layer: u32 = 0, + array_layer_count: u32 = types.array_layer_count_undefined, + aspect: Texture.Aspect = .all, + }; + + pub inline fn setLabel(texture_view: *TextureView, label: [*:0]const u8) void { + Impl.textureViewSetLabel(texture_view, label); + } + + pub inline fn reference(texture_view: *TextureView) void { + Impl.textureViewReference(texture_view); + } + + pub inline fn release(texture_view: *TextureView) void { + Impl.textureViewRelease(texture_view); + } +};