gpu: update to latest webgpu.h API
Signed-off-by: Stephen Gutekanst <stephen@hexops.com>
This commit is contained in:
parent
74067bebed
commit
fdd3270a0f
22 changed files with 201 additions and 161 deletions
|
|
@ -127,8 +127,7 @@ And, to initialize data structures with slices in them, the following helpers ar
|
||||||
|
|
||||||
* `BindGroupLayout.Descriptor.init`
|
* `BindGroupLayout.Descriptor.init`
|
||||||
* `BindGroup.Descriptor.init`
|
* `BindGroup.Descriptor.init`
|
||||||
* `InstanceDescriptor.init`
|
* `dawn.TogglesDescriptor.init`
|
||||||
* `TogglesDeviceDescriptor.init`
|
|
||||||
* `Device.Descriptor.init`
|
* `Device.Descriptor.init`
|
||||||
* `PipelineLayout.Descriptor.init`
|
* `PipelineLayout.Descriptor.init`
|
||||||
* `QuerySet.Descriptor.init`
|
* `QuerySet.Descriptor.init`
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ const testing = std.testing;
|
||||||
const dawn = @import("dawn.zig");
|
const dawn = @import("dawn.zig");
|
||||||
const ChainedStructOut = @import("main.zig").ChainedStructOut;
|
const ChainedStructOut = @import("main.zig").ChainedStructOut;
|
||||||
const Device = @import("device.zig").Device;
|
const Device = @import("device.zig").Device;
|
||||||
|
const Instance = @import("instance.zig").Instance;
|
||||||
const FeatureName = @import("main.zig").FeatureName;
|
const FeatureName = @import("main.zig").FeatureName;
|
||||||
const SupportedLimits = @import("main.zig").SupportedLimits;
|
const SupportedLimits = @import("main.zig").SupportedLimits;
|
||||||
const RequestDeviceStatus = @import("main.zig").RequestDeviceStatus;
|
const RequestDeviceStatus = @import("main.zig").RequestDeviceStatus;
|
||||||
|
|
@ -42,6 +43,7 @@ pub const Adapter = opaque {
|
||||||
driver_description: [*:0]const u8,
|
driver_description: [*:0]const u8,
|
||||||
adapter_type: Type,
|
adapter_type: Type,
|
||||||
backend_type: BackendType,
|
backend_type: BackendType,
|
||||||
|
compatibility_mode: bool = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub inline fn createDevice(adapter: *Adapter, descriptor: ?*const Device.Descriptor) ?*Device {
|
pub inline fn createDevice(adapter: *Adapter, descriptor: ?*const Device.Descriptor) ?*Device {
|
||||||
|
|
@ -64,6 +66,10 @@ pub const Adapter = opaque {
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub inline fn getInstance(adapter: *Adapter) *Instance {
|
||||||
|
return Impl.adapterGetInstance(adapter);
|
||||||
|
}
|
||||||
|
|
||||||
pub inline fn getLimits(adapter: *Adapter, limits: *SupportedLimits) bool {
|
pub inline fn getLimits(adapter: *Adapter, limits: *SupportedLimits) bool {
|
||||||
return Impl.adapterGetLimits(adapter, limits);
|
return Impl.adapterGetLimits(adapter, limits);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -54,7 +54,7 @@ pub const BindGroup = opaque {
|
||||||
next_in_chain: ?*const ChainedStruct = null,
|
next_in_chain: ?*const ChainedStruct = null,
|
||||||
label: ?[*:0]const u8 = null,
|
label: ?[*:0]const u8 = null,
|
||||||
layout: *BindGroupLayout,
|
layout: *BindGroupLayout,
|
||||||
entry_count: u32 = 0,
|
entry_count: usize = 0,
|
||||||
entries: ?[*]const Entry = null,
|
entries: ?[*]const Entry = null,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
|
|
@ -68,7 +68,7 @@ pub const BindGroup = opaque {
|
||||||
.next_in_chain = v.next_in_chain,
|
.next_in_chain = v.next_in_chain,
|
||||||
.label = v.label,
|
.label = v.label,
|
||||||
.layout = v.layout,
|
.layout = v.layout,
|
||||||
.entry_count = if (v.entries) |e| @intCast(u32, e.len) else 0,
|
.entry_count = if (v.entries) |e| e.len else 0,
|
||||||
.entries = if (v.entries) |e| e.ptr else null,
|
.entries = if (v.entries) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -98,7 +98,7 @@ pub const BindGroupLayout = opaque {
|
||||||
pub const Descriptor = extern struct {
|
pub const Descriptor = extern struct {
|
||||||
next_in_chain: ?*const ChainedStruct = null,
|
next_in_chain: ?*const ChainedStruct = null,
|
||||||
label: ?[*:0]const u8 = null,
|
label: ?[*:0]const u8 = null,
|
||||||
entry_count: u32 = 0,
|
entry_count: usize = 0,
|
||||||
entries: ?[*]const Entry = null,
|
entries: ?[*]const Entry = null,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
|
|
@ -110,7 +110,7 @@ pub const BindGroupLayout = opaque {
|
||||||
return .{
|
return .{
|
||||||
.next_in_chain = v.next_in_chain,
|
.next_in_chain = v.next_in_chain,
|
||||||
.label = v.label,
|
.label = v.label,
|
||||||
.entry_count = if (v.entries) |e| @intCast(u32, e.len) else 0,
|
.entry_count = if (v.entries) |e| e.len else 0,
|
||||||
.entries = if (v.entries) |e| e.ptr else null,
|
.entries = if (v.entries) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -22,11 +22,14 @@ pub const Buffer = opaque {
|
||||||
|
|
||||||
pub const MapAsyncStatus = enum(u32) {
|
pub const MapAsyncStatus = enum(u32) {
|
||||||
success = 0x00000000,
|
success = 0x00000000,
|
||||||
err = 0x00000001,
|
validation_error = 0x00000001,
|
||||||
unknown = 0x00000002,
|
unknown = 0x00000002,
|
||||||
device_lost = 0x00000003,
|
device_lost = 0x00000003,
|
||||||
destroyed_before_callback = 0x00000004,
|
destroyed_before_callback = 0x00000004,
|
||||||
unmapped_before_callback = 0x00000005,
|
unmapped_before_callback = 0x00000005,
|
||||||
|
mapping_already_pending = 0x00000006,
|
||||||
|
offset_out_of_range = 0x00000007,
|
||||||
|
size_out_of_range = 0x00000008,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const UsageFlags = packed struct(u32) {
|
pub const UsageFlags = packed struct(u32) {
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@ pub const ComputePassEncoder = opaque {
|
||||||
compute_pass_encoder,
|
compute_pass_encoder,
|
||||||
group_index,
|
group_index,
|
||||||
group,
|
group,
|
||||||
if (dynamic_offsets) |v| @intCast(u32, v.len) else 0,
|
if (dynamic_offsets) |v| v.len else 0,
|
||||||
if (dynamic_offsets) |v| v.ptr else null,
|
if (dynamic_offsets) |v| v.ptr else null,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -14,52 +14,39 @@ pub const EncoderInternalUsageDescriptor = extern struct {
|
||||||
use_internal_usages: bool = false,
|
use_internal_usages: bool = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const InstanceDescriptor = extern struct {
|
|
||||||
chain: ChainedStruct = .{ .next = null, .s_type = .dawn_instance_descriptor },
|
|
||||||
additional_runtime_search_paths_count: u32 = 0,
|
|
||||||
additional_runtime_search_paths: ?[*]const [*:0]const u8 = null,
|
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
|
||||||
pub inline fn init(v: struct {
|
|
||||||
chain: ChainedStruct = .{ .next = null, .s_type = .dawn_instance_descriptor },
|
|
||||||
additional_runtime_search_paths: ?[]const [*:0]const u8 = null,
|
|
||||||
}) InstanceDescriptor {
|
|
||||||
return .{
|
|
||||||
.chain = v.chain,
|
|
||||||
.additional_runtime_search_paths_count = if (v.additional_runtime_search_paths) |e| @intCast(u32, e.len) else 0,
|
|
||||||
.additional_runtime_search_paths = if (v.additional_runtime_search_paths) |e| e.ptr else null,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const TextureInternalUsageDescriptor = extern struct {
|
pub const TextureInternalUsageDescriptor = extern struct {
|
||||||
chain: ChainedStruct = .{ .next = null, .s_type = .dawn_texture_internal_usage_descriptor },
|
chain: ChainedStruct = .{ .next = null, .s_type = .dawn_texture_internal_usage_descriptor },
|
||||||
internal_usage: Texture.UsageFlags = Texture.UsageFlags.none,
|
internal_usage: Texture.UsageFlags = Texture.UsageFlags.none,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const TogglesDeviceDescriptor = extern struct {
|
pub const TogglesDescriptor = extern struct {
|
||||||
chain: ChainedStruct = .{ .next = null, .s_type = .dawn_toggles_device_descriptor },
|
chain: ChainedStruct = .{ .next = null, .s_type = .dawn_toggles_descriptor },
|
||||||
force_enabled_toggles_count: u32 = 0,
|
enabled_toggles_count: usize = 0,
|
||||||
force_enabled_toggles: ?[*]const [*:0]const u8 = null,
|
enabled_toggles: ?[*]const [*:0]const u8 = null,
|
||||||
force_disabled_toggles_count: u32 = 0,
|
disabled_toggles_count: usize = 0,
|
||||||
force_disabled_toggles: ?[*]const [*:0]const u8 = null,
|
disabled_toggles: ?[*]const [*:0]const u8 = null,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
pub inline fn init(v: struct {
|
pub inline fn init(v: struct {
|
||||||
chain: ChainedStruct = .{ .next = null, .s_type = .dawn_toggles_device_descriptor },
|
chain: ChainedStruct = .{ .next = null, .s_type = .dawn_toggles_descriptor },
|
||||||
force_enabled_toggles: ?[]const [*:0]const u8 = null,
|
enabled_toggles: ?[]const [*:0]const u8 = null,
|
||||||
force_disabled_toggles: ?[]const [*:0]const u8 = null,
|
disabled_toggles: ?[]const [*:0]const u8 = null,
|
||||||
}) TogglesDeviceDescriptor {
|
}) TogglesDescriptor {
|
||||||
return .{
|
return .{
|
||||||
.chain = v.chain,
|
.chain = v.chain,
|
||||||
.force_enabled_toggles_count = if (v.force_enabled_toggles) |e| @intCast(u32, e.len) else 0,
|
.enabled_toggles_count = if (v.enabled_toggles) |e| e.len else 0,
|
||||||
.force_enabled_toggles = if (v.force_enabled_toggles) |e| e.ptr else null,
|
.enabled_toggles = if (v.enabled_toggles) |e| e.ptr else null,
|
||||||
.force_disabled_toggles_count = if (v.force_disabled_toggles) |e| @intCast(u32, e.len) else 0,
|
.disabled_toggles_count = if (v.disabled_toggles) |e| e.len else 0,
|
||||||
.force_disabled_toggles = if (v.force_disabled_toggles) |e| e.ptr else null,
|
.disabled_toggles = if (v.disabled_toggles) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub const ShaderModuleSPIRVOptionsDescriptor = extern struct {
|
||||||
|
chain: ChainedStruct = .{ .next = null, .s_type = .dawn_shader_module_spirv_options_descriptor },
|
||||||
|
allow_non_uniform_derivatives: bool = false,
|
||||||
|
};
|
||||||
|
|
||||||
pub const AdapterPropertiesPowerPreference = extern struct {
|
pub const AdapterPropertiesPowerPreference = extern struct {
|
||||||
chain: ChainedStructOut = .{
|
chain: ChainedStructOut = .{
|
||||||
.next = null,
|
.next = null,
|
||||||
|
|
|
||||||
|
|
@ -46,6 +46,12 @@ pub const Interface = struct {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub inline fn adapterGetInstance(adapter: *gpu.Adapter) *gpu.Instance {
|
||||||
|
return @ptrCast(*gpu.Instance, procs.adapterGetInstance.?(
|
||||||
|
@ptrCast(c.WGPUAdapter, adapter),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
pub inline fn adapterGetLimits(adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) bool {
|
pub inline fn adapterGetLimits(adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) bool {
|
||||||
return procs.adapterGetLimits.?(
|
return procs.adapterGetLimits.?(
|
||||||
@ptrCast(c.WGPUAdapter, adapter),
|
@ptrCast(c.WGPUAdapter, adapter),
|
||||||
|
|
@ -363,7 +369,7 @@ pub const Interface = struct {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn computePassEncoderSetBindGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void {
|
pub inline fn computePassEncoderSetBindGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void {
|
||||||
procs.computePassEncoderSetBindGroup.?(
|
procs.computePassEncoderSetBindGroup.?(
|
||||||
@ptrCast(c.WGPUComputePassEncoder, compute_pass_encoder),
|
@ptrCast(c.WGPUComputePassEncoder, compute_pass_encoder),
|
||||||
group_index,
|
group_index,
|
||||||
|
|
@ -605,8 +611,8 @@ pub const Interface = struct {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn devicePopErrorScope(device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) bool {
|
pub inline fn devicePopErrorScope(device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) void {
|
||||||
return procs.devicePopErrorScope.?(
|
procs.devicePopErrorScope.?(
|
||||||
@ptrCast(c.WGPUDevice, device),
|
@ptrCast(c.WGPUDevice, device),
|
||||||
@ptrCast(c.WGPUErrorCallback, callback),
|
@ptrCast(c.WGPUErrorCallback, callback),
|
||||||
userdata,
|
userdata,
|
||||||
|
|
@ -687,6 +693,12 @@ pub const Interface = struct {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub inline fn instanceProcessEvents(instance: *gpu.Instance) void {
|
||||||
|
procs.instanceProcessEvents.?(
|
||||||
|
@ptrCast(c.WGPUInstance, instance),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
pub inline fn instanceRequestAdapter(instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) void {
|
pub inline fn instanceRequestAdapter(instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) void {
|
||||||
procs.instanceRequestAdapter.?(
|
procs.instanceRequestAdapter.?(
|
||||||
@ptrCast(c.WGPUInstance, instance),
|
@ptrCast(c.WGPUInstance, instance),
|
||||||
|
|
@ -773,7 +785,7 @@ pub const Interface = struct {
|
||||||
procs.queueSetLabel.?(@ptrCast(c.WGPUQueue, queue), label);
|
procs.queueSetLabel.?(@ptrCast(c.WGPUQueue, queue), label);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn queueSubmit(queue: *gpu.Queue, command_count: u32, commands: [*]const *const gpu.CommandBuffer) void {
|
pub inline fn queueSubmit(queue: *gpu.Queue, command_count: usize, commands: [*]const *const gpu.CommandBuffer) void {
|
||||||
procs.queueSubmit.?(
|
procs.queueSubmit.?(
|
||||||
@ptrCast(c.WGPUQueue, queue),
|
@ptrCast(c.WGPUQueue, queue),
|
||||||
command_count,
|
command_count,
|
||||||
|
|
@ -810,6 +822,10 @@ pub const Interface = struct {
|
||||||
procs.queueRelease.?(@ptrCast(c.WGPUQueue, queue));
|
procs.queueRelease.?(@ptrCast(c.WGPUQueue, queue));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub inline fn renderBundleSetLabel(render_bundle: *gpu.RenderBundle, label: [*:0]const u8) void {
|
||||||
|
procs.renderBundleSetLabel.?(@ptrCast(c.WGPURenderBundle, render_bundle), label);
|
||||||
|
}
|
||||||
|
|
||||||
pub inline fn renderBundleReference(render_bundle: *gpu.RenderBundle) void {
|
pub inline fn renderBundleReference(render_bundle: *gpu.RenderBundle) void {
|
||||||
procs.renderBundleReference.?(@ptrCast(c.WGPURenderBundle, render_bundle));
|
procs.renderBundleReference.?(@ptrCast(c.WGPURenderBundle, render_bundle));
|
||||||
}
|
}
|
||||||
|
|
@ -871,7 +887,7 @@ pub const Interface = struct {
|
||||||
procs.renderBundleEncoderPushDebugGroup.?(@ptrCast(c.WGPURenderBundleEncoder, render_bundle_encoder), group_label);
|
procs.renderBundleEncoderPushDebugGroup.?(@ptrCast(c.WGPURenderBundleEncoder, render_bundle_encoder), group_label);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn renderBundleEncoderSetBindGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void {
|
pub inline fn renderBundleEncoderSetBindGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void {
|
||||||
procs.renderBundleEncoderSetBindGroup.?(
|
procs.renderBundleEncoderSetBindGroup.?(
|
||||||
@ptrCast(c.WGPURenderBundleEncoder, render_bundle_encoder),
|
@ptrCast(c.WGPURenderBundleEncoder, render_bundle_encoder),
|
||||||
group_index,
|
group_index,
|
||||||
|
|
@ -972,7 +988,7 @@ pub const Interface = struct {
|
||||||
procs.renderPassEncoderEndOcclusionQuery.?(@ptrCast(c.WGPURenderPassEncoder, render_pass_encoder));
|
procs.renderPassEncoderEndOcclusionQuery.?(@ptrCast(c.WGPURenderPassEncoder, render_pass_encoder));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn renderPassEncoderExecuteBundles(render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: u32, bundles: [*]const *const gpu.RenderBundle) void {
|
pub inline fn renderPassEncoderExecuteBundles(render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const gpu.RenderBundle) void {
|
||||||
procs.renderPassEncoderExecuteBundles.?(
|
procs.renderPassEncoderExecuteBundles.?(
|
||||||
@ptrCast(c.WGPURenderPassEncoder, render_pass_encoder),
|
@ptrCast(c.WGPURenderPassEncoder, render_pass_encoder),
|
||||||
bundles_count,
|
bundles_count,
|
||||||
|
|
@ -995,7 +1011,7 @@ pub const Interface = struct {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn renderPassEncoderSetBindGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void {
|
pub inline fn renderPassEncoderSetBindGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void {
|
||||||
procs.renderPassEncoderSetBindGroup.?(
|
procs.renderPassEncoderSetBindGroup.?(
|
||||||
@ptrCast(c.WGPURenderPassEncoder, render_pass_encoder),
|
@ptrCast(c.WGPURenderPassEncoder, render_pass_encoder),
|
||||||
group_index,
|
group_index,
|
||||||
|
|
@ -1147,14 +1163,8 @@ pub const Interface = struct {
|
||||||
procs.surfaceRelease.?(@ptrCast(c.WGPUSurface, surface));
|
procs.surfaceRelease.?(@ptrCast(c.WGPUSurface, surface));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn swapChainConfigure(swap_chain: *gpu.SwapChain, format: gpu.Texture.Format, allowed_usage: gpu.Texture.UsageFlags, width: u32, height: u32) void {
|
pub inline fn swapChainGetCurrentTexture(swap_chain: *gpu.SwapChain) ?*gpu.Texture {
|
||||||
procs.swapChainConfigure.?(
|
return @ptrCast(?*gpu.Texture, procs.swapChainGetCurrentTexture.?(@ptrCast(c.WGPUSwapChain, swap_chain)));
|
||||||
@ptrCast(c.WGPUSwapChain, swap_chain),
|
|
||||||
@enumToInt(format),
|
|
||||||
@bitCast(c.WGPUTextureUsageFlags, allowed_usage),
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn swapChainGetCurrentTextureView(swap_chain: *gpu.SwapChain) ?*gpu.TextureView {
|
pub inline fn swapChainGetCurrentTextureView(swap_chain: *gpu.SwapChain) ?*gpu.TextureView {
|
||||||
|
|
|
||||||
|
|
@ -46,16 +46,18 @@ pub const Device = opaque {
|
||||||
pub const Descriptor = extern struct {
|
pub const Descriptor = extern struct {
|
||||||
pub const NextInChain = extern union {
|
pub const NextInChain = extern union {
|
||||||
generic: ?*const ChainedStruct,
|
generic: ?*const ChainedStruct,
|
||||||
dawn_toggles_device_descriptor: *const dawn.TogglesDeviceDescriptor,
|
dawn_toggles_descriptor: *const dawn.TogglesDescriptor,
|
||||||
dawn_cache_device_descriptor: *const dawn.CacheDeviceDescriptor,
|
dawn_cache_device_descriptor: *const dawn.CacheDeviceDescriptor,
|
||||||
};
|
};
|
||||||
|
|
||||||
next_in_chain: NextInChain = .{ .generic = null },
|
next_in_chain: NextInChain = .{ .generic = null },
|
||||||
label: ?[*:0]const u8 = null,
|
label: ?[*:0]const u8 = null,
|
||||||
required_features_count: u32 = 0,
|
required_features_count: usize = 0,
|
||||||
required_features: ?[*]const FeatureName = null,
|
required_features: ?[*]const FeatureName = null,
|
||||||
required_limits: ?*const RequiredLimits = null,
|
required_limits: ?*const RequiredLimits = null,
|
||||||
default_queue: Queue.Descriptor = Queue.Descriptor{},
|
default_queue: Queue.Descriptor = Queue.Descriptor{},
|
||||||
|
device_lost_callback: LostCallback,
|
||||||
|
device_lost_userdata: ?*anyopaque,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
pub inline fn init(v: struct {
|
pub inline fn init(v: struct {
|
||||||
|
|
@ -68,7 +70,7 @@ pub const Device = opaque {
|
||||||
return .{
|
return .{
|
||||||
.next_in_chain = v.next_in_chain,
|
.next_in_chain = v.next_in_chain,
|
||||||
.label = v.label,
|
.label = v.label,
|
||||||
.required_features_count = if (v.required_features) |e| @intCast(u32, e.len) else 0,
|
.required_features_count = if (v.required_features) |e| e.len else 0,
|
||||||
.required_features = if (v.required_features) |e| e.ptr else null,
|
.required_features = if (v.required_features) |e| e.ptr else null,
|
||||||
.default_queue = v.default_queue,
|
.default_queue = v.default_queue,
|
||||||
};
|
};
|
||||||
|
|
@ -199,11 +201,11 @@ pub const Device = opaque {
|
||||||
pub inline fn createShaderModuleWGSL(
|
pub inline fn createShaderModuleWGSL(
|
||||||
device: *Device,
|
device: *Device,
|
||||||
label: ?[*:0]const u8,
|
label: ?[*:0]const u8,
|
||||||
wgsl_source: [*:0]const u8,
|
wgsl_code: [*:0]const u8,
|
||||||
) *ShaderModule {
|
) *ShaderModule {
|
||||||
return device.createShaderModule(&ShaderModule.Descriptor{
|
return device.createShaderModule(&ShaderModule.Descriptor{
|
||||||
.next_in_chain = .{ .wgsl_descriptor = &.{
|
.next_in_chain = .{ .wgsl_descriptor = &.{
|
||||||
.source = wgsl_source,
|
.code = wgsl_code,
|
||||||
} },
|
} },
|
||||||
.label = label,
|
.label = label,
|
||||||
});
|
});
|
||||||
|
|
@ -265,14 +267,14 @@ pub const Device = opaque {
|
||||||
device: *Device,
|
device: *Device,
|
||||||
context: anytype,
|
context: anytype,
|
||||||
comptime callback: fn (ctx: @TypeOf(context), typ: ErrorType, message: [*:0]const u8) callconv(.Inline) void,
|
comptime callback: fn (ctx: @TypeOf(context), typ: ErrorType, message: [*:0]const u8) callconv(.Inline) void,
|
||||||
) bool {
|
) void {
|
||||||
const Context = @TypeOf(context);
|
const Context = @TypeOf(context);
|
||||||
const Helper = struct {
|
const Helper = struct {
|
||||||
pub fn cCallback(typ: ErrorType, message: [*:0]const u8, userdata: ?*anyopaque) callconv(.C) void {
|
pub fn cCallback(typ: ErrorType, message: [*:0]const u8, userdata: ?*anyopaque) callconv(.C) void {
|
||||||
callback(if (Context == void) {} else @ptrCast(Context, @alignCast(@alignOf(std.meta.Child(Context)), userdata)), typ, message);
|
callback(if (Context == void) {} else @ptrCast(Context, @alignCast(@alignOf(std.meta.Child(Context)), userdata)), typ, message);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
return Impl.devicePopErrorScope(device, Helper.cCallback, if (Context == void) null else context);
|
Impl.devicePopErrorScope(device, Helper.cCallback, if (Context == void) null else context);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn pushErrorScope(device: *Device, filter: ErrorFilter) void {
|
pub inline fn pushErrorScope(device: *Device, filter: ErrorFilter) void {
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ pub const Instance = opaque {
|
||||||
pub const Descriptor = extern struct {
|
pub const Descriptor = extern struct {
|
||||||
pub const NextInChain = extern union {
|
pub const NextInChain = extern union {
|
||||||
generic: ?*const ChainedStruct,
|
generic: ?*const ChainedStruct,
|
||||||
dawn_instance_descriptor: *const dawn.InstanceDescriptor,
|
dawn_toggles_descriptor: *const dawn.TogglesDescriptor,
|
||||||
};
|
};
|
||||||
|
|
||||||
next_in_chain: NextInChain = .{ .generic = null },
|
next_in_chain: NextInChain = .{ .generic = null },
|
||||||
|
|
@ -21,6 +21,10 @@ pub const Instance = opaque {
|
||||||
return Impl.instanceCreateSurface(instance, descriptor);
|
return Impl.instanceCreateSurface(instance, descriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub inline fn processEvents(instance: *Instance) void {
|
||||||
|
Impl.instanceProcessEvents(instance);
|
||||||
|
}
|
||||||
|
|
||||||
pub inline fn requestAdapter(
|
pub inline fn requestAdapter(
|
||||||
instance: *Instance,
|
instance: *Instance,
|
||||||
options: ?*const RequestAdapterOptions,
|
options: ?*const RequestAdapterOptions,
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ pub fn Interface(comptime T: type) type {
|
||||||
assertDecl(T, "getProcAddress", fn (device: *gpu.Device, proc_name: [*:0]const u8) callconv(.Inline) ?gpu.Proc);
|
assertDecl(T, "getProcAddress", fn (device: *gpu.Device, proc_name: [*:0]const u8) callconv(.Inline) ?gpu.Proc);
|
||||||
assertDecl(T, "adapterCreateDevice", fn (adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor) callconv(.Inline) ?*gpu.Device);
|
assertDecl(T, "adapterCreateDevice", fn (adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor) callconv(.Inline) ?*gpu.Device);
|
||||||
assertDecl(T, "adapterEnumerateFeatures", fn (adapter: *gpu.Adapter, features: ?[*]gpu.FeatureName) callconv(.Inline) usize);
|
assertDecl(T, "adapterEnumerateFeatures", fn (adapter: *gpu.Adapter, features: ?[*]gpu.FeatureName) callconv(.Inline) usize);
|
||||||
|
assertDecl(T, "adapterGetInstance", fn (adapter: *gpu.Adapter) callconv(.Inline) *gpu.Instance);
|
||||||
assertDecl(T, "adapterGetLimits", fn (adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) callconv(.Inline) bool);
|
assertDecl(T, "adapterGetLimits", fn (adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) callconv(.Inline) bool);
|
||||||
assertDecl(T, "adapterGetProperties", fn (adapter: *gpu.Adapter, properties: *gpu.Adapter.Properties) callconv(.Inline) void);
|
assertDecl(T, "adapterGetProperties", fn (adapter: *gpu.Adapter, properties: *gpu.Adapter.Properties) callconv(.Inline) void);
|
||||||
assertDecl(T, "adapterHasFeature", fn (adapter: *gpu.Adapter, feature: gpu.FeatureName) callconv(.Inline) bool);
|
assertDecl(T, "adapterHasFeature", fn (adapter: *gpu.Adapter, feature: gpu.FeatureName) callconv(.Inline) bool);
|
||||||
|
|
@ -71,7 +72,7 @@ pub fn Interface(comptime T: type) type {
|
||||||
assertDecl(T, "computePassEncoderInsertDebugMarker", fn (compute_pass_encoder: *gpu.ComputePassEncoder, marker_label: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "computePassEncoderInsertDebugMarker", fn (compute_pass_encoder: *gpu.ComputePassEncoder, marker_label: [*:0]const u8) callconv(.Inline) void);
|
||||||
assertDecl(T, "computePassEncoderPopDebugGroup", fn (compute_pass_encoder: *gpu.ComputePassEncoder) callconv(.Inline) void);
|
assertDecl(T, "computePassEncoderPopDebugGroup", fn (compute_pass_encoder: *gpu.ComputePassEncoder) callconv(.Inline) void);
|
||||||
assertDecl(T, "computePassEncoderPushDebugGroup", fn (compute_pass_encoder: *gpu.ComputePassEncoder, group_label: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "computePassEncoderPushDebugGroup", fn (compute_pass_encoder: *gpu.ComputePassEncoder, group_label: [*:0]const u8) callconv(.Inline) void);
|
||||||
assertDecl(T, "computePassEncoderSetBindGroup", fn (compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) callconv(.Inline) void);
|
assertDecl(T, "computePassEncoderSetBindGroup", fn (compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) callconv(.Inline) void);
|
||||||
assertDecl(T, "computePassEncoderSetLabel", fn (compute_pass_encoder: *gpu.ComputePassEncoder, label: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "computePassEncoderSetLabel", fn (compute_pass_encoder: *gpu.ComputePassEncoder, label: [*:0]const u8) callconv(.Inline) void);
|
||||||
assertDecl(T, "computePassEncoderSetPipeline", fn (compute_pass_encoder: *gpu.ComputePassEncoder, pipeline: *gpu.ComputePipeline) callconv(.Inline) void);
|
assertDecl(T, "computePassEncoderSetPipeline", fn (compute_pass_encoder: *gpu.ComputePassEncoder, pipeline: *gpu.ComputePipeline) callconv(.Inline) void);
|
||||||
assertDecl(T, "computePassEncoderWriteTimestamp", fn (compute_pass_encoder: *gpu.ComputePassEncoder, query_set: *gpu.QuerySet, query_index: u32) callconv(.Inline) void);
|
assertDecl(T, "computePassEncoderWriteTimestamp", fn (compute_pass_encoder: *gpu.ComputePassEncoder, query_set: *gpu.QuerySet, query_index: u32) callconv(.Inline) void);
|
||||||
|
|
@ -108,7 +109,7 @@ pub fn Interface(comptime T: type) type {
|
||||||
assertDecl(T, "deviceGetQueue", fn (device: *gpu.Device) callconv(.Inline) *gpu.Queue);
|
assertDecl(T, "deviceGetQueue", fn (device: *gpu.Device) callconv(.Inline) *gpu.Queue);
|
||||||
assertDecl(T, "deviceHasFeature", fn (device: *gpu.Device, feature: gpu.FeatureName) callconv(.Inline) bool);
|
assertDecl(T, "deviceHasFeature", fn (device: *gpu.Device, feature: gpu.FeatureName) callconv(.Inline) bool);
|
||||||
assertDecl(T, "deviceInjectError", fn (device: *gpu.Device, typ: gpu.ErrorType, message: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "deviceInjectError", fn (device: *gpu.Device, typ: gpu.ErrorType, message: [*:0]const u8) callconv(.Inline) void);
|
||||||
assertDecl(T, "devicePopErrorScope", fn (device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) callconv(.Inline) bool);
|
assertDecl(T, "devicePopErrorScope", fn (device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) callconv(.Inline) void);
|
||||||
assertDecl(T, "devicePushErrorScope", fn (device: *gpu.Device, filter: gpu.ErrorFilter) callconv(.Inline) void);
|
assertDecl(T, "devicePushErrorScope", fn (device: *gpu.Device, filter: gpu.ErrorFilter) callconv(.Inline) void);
|
||||||
assertDecl(T, "deviceSetDeviceLostCallback", fn (device: *gpu.Device, callback: ?gpu.Device.LostCallback, userdata: ?*anyopaque) callconv(.Inline) void);
|
assertDecl(T, "deviceSetDeviceLostCallback", fn (device: *gpu.Device, callback: ?gpu.Device.LostCallback, userdata: ?*anyopaque) callconv(.Inline) void);
|
||||||
assertDecl(T, "deviceSetLabel", fn (device: *gpu.Device, label: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "deviceSetLabel", fn (device: *gpu.Device, label: [*:0]const u8) callconv(.Inline) void);
|
||||||
|
|
@ -122,6 +123,7 @@ pub fn Interface(comptime T: type) type {
|
||||||
assertDecl(T, "externalTextureReference", fn (external_texture: *gpu.ExternalTexture) callconv(.Inline) void);
|
assertDecl(T, "externalTextureReference", fn (external_texture: *gpu.ExternalTexture) callconv(.Inline) void);
|
||||||
assertDecl(T, "externalTextureRelease", fn (external_texture: *gpu.ExternalTexture) callconv(.Inline) void);
|
assertDecl(T, "externalTextureRelease", fn (external_texture: *gpu.ExternalTexture) callconv(.Inline) void);
|
||||||
assertDecl(T, "instanceCreateSurface", fn (instance: *gpu.Instance, descriptor: *const gpu.Surface.Descriptor) callconv(.Inline) *gpu.Surface);
|
assertDecl(T, "instanceCreateSurface", fn (instance: *gpu.Instance, descriptor: *const gpu.Surface.Descriptor) callconv(.Inline) *gpu.Surface);
|
||||||
|
assertDecl(T, "instanceProcessEvents", fn (instance: *gpu.Instance) callconv(.Inline) void);
|
||||||
assertDecl(T, "instanceRequestAdapter", fn (instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) callconv(.Inline) void);
|
assertDecl(T, "instanceRequestAdapter", fn (instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) callconv(.Inline) void);
|
||||||
assertDecl(T, "instanceReference", fn (instance: *gpu.Instance) callconv(.Inline) void);
|
assertDecl(T, "instanceReference", fn (instance: *gpu.Instance) callconv(.Inline) void);
|
||||||
assertDecl(T, "instanceRelease", fn (instance: *gpu.Instance) callconv(.Inline) void);
|
assertDecl(T, "instanceRelease", fn (instance: *gpu.Instance) callconv(.Inline) void);
|
||||||
|
|
@ -137,11 +139,12 @@ pub fn Interface(comptime T: type) type {
|
||||||
assertDecl(T, "queueCopyTextureForBrowser", fn (queue: *gpu.Queue, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D, options: *const gpu.CopyTextureForBrowserOptions) callconv(.Inline) void);
|
assertDecl(T, "queueCopyTextureForBrowser", fn (queue: *gpu.Queue, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D, options: *const gpu.CopyTextureForBrowserOptions) callconv(.Inline) void);
|
||||||
assertDecl(T, "queueOnSubmittedWorkDone", fn (queue: *gpu.Queue, signal_value: u64, callback: gpu.Queue.WorkDoneCallback, userdata: ?*anyopaque) callconv(.Inline) void);
|
assertDecl(T, "queueOnSubmittedWorkDone", fn (queue: *gpu.Queue, signal_value: u64, callback: gpu.Queue.WorkDoneCallback, userdata: ?*anyopaque) callconv(.Inline) void);
|
||||||
assertDecl(T, "queueSetLabel", fn (queue: *gpu.Queue, label: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "queueSetLabel", fn (queue: *gpu.Queue, label: [*:0]const u8) callconv(.Inline) void);
|
||||||
assertDecl(T, "queueSubmit", fn (queue: *gpu.Queue, command_count: u32, commands: [*]const *const gpu.CommandBuffer) callconv(.Inline) void);
|
assertDecl(T, "queueSubmit", fn (queue: *gpu.Queue, command_count: usize, commands: [*]const *const gpu.CommandBuffer) callconv(.Inline) void);
|
||||||
assertDecl(T, "queueWriteBuffer", fn (queue: *gpu.Queue, buffer: *gpu.Buffer, buffer_offset: u64, data: *const anyopaque, size: usize) callconv(.Inline) void);
|
assertDecl(T, "queueWriteBuffer", fn (queue: *gpu.Queue, buffer: *gpu.Buffer, buffer_offset: u64, data: *const anyopaque, size: usize) callconv(.Inline) void);
|
||||||
assertDecl(T, "queueWriteTexture", fn (queue: *gpu.Queue, destination: *const gpu.ImageCopyTexture, data: *const anyopaque, data_size: usize, data_layout: *const gpu.Texture.DataLayout, write_size: *const gpu.Extent3D) callconv(.Inline) void);
|
assertDecl(T, "queueWriteTexture", fn (queue: *gpu.Queue, destination: *const gpu.ImageCopyTexture, data: *const anyopaque, data_size: usize, data_layout: *const gpu.Texture.DataLayout, write_size: *const gpu.Extent3D) callconv(.Inline) void);
|
||||||
assertDecl(T, "queueReference", fn (queue: *gpu.Queue) callconv(.Inline) void);
|
assertDecl(T, "queueReference", fn (queue: *gpu.Queue) callconv(.Inline) void);
|
||||||
assertDecl(T, "queueRelease", fn (queue: *gpu.Queue) callconv(.Inline) void);
|
assertDecl(T, "queueRelease", fn (queue: *gpu.Queue) callconv(.Inline) void);
|
||||||
|
assertDecl(T, "renderBundleSetLabel", fn (render_bundle: *gpu.RenderBundle, label: [*:0]const u8) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderBundleReference", fn (render_bundle: *gpu.RenderBundle) callconv(.Inline) void);
|
assertDecl(T, "renderBundleReference", fn (render_bundle: *gpu.RenderBundle) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderBundleRelease", fn (render_bundle: *gpu.RenderBundle) callconv(.Inline) void);
|
assertDecl(T, "renderBundleRelease", fn (render_bundle: *gpu.RenderBundle) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderBundleEncoderDraw", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) callconv(.Inline) void);
|
assertDecl(T, "renderBundleEncoderDraw", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) callconv(.Inline) void);
|
||||||
|
|
@ -152,7 +155,7 @@ pub fn Interface(comptime T: type) type {
|
||||||
assertDecl(T, "renderBundleEncoderInsertDebugMarker", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, marker_label: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "renderBundleEncoderInsertDebugMarker", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, marker_label: [*:0]const u8) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderBundleEncoderPopDebugGroup", fn (render_bundle_encoder: *gpu.RenderBundleEncoder) callconv(.Inline) void);
|
assertDecl(T, "renderBundleEncoderPopDebugGroup", fn (render_bundle_encoder: *gpu.RenderBundleEncoder) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderBundleEncoderPushDebugGroup", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, group_label: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "renderBundleEncoderPushDebugGroup", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, group_label: [*:0]const u8) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderBundleEncoderSetBindGroup", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) callconv(.Inline) void);
|
assertDecl(T, "renderBundleEncoderSetBindGroup", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderBundleEncoderSetIndexBuffer", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) callconv(.Inline) void);
|
assertDecl(T, "renderBundleEncoderSetIndexBuffer", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderBundleEncoderSetLabel", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, label: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "renderBundleEncoderSetLabel", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, label: [*:0]const u8) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderBundleEncoderSetPipeline", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, pipeline: *gpu.RenderPipeline) callconv(.Inline) void);
|
assertDecl(T, "renderBundleEncoderSetPipeline", fn (render_bundle_encoder: *gpu.RenderBundleEncoder, pipeline: *gpu.RenderPipeline) callconv(.Inline) void);
|
||||||
|
|
@ -166,11 +169,11 @@ pub fn Interface(comptime T: type) type {
|
||||||
assertDecl(T, "renderPassEncoderDrawIndirect", fn (render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) callconv(.Inline) void);
|
assertDecl(T, "renderPassEncoderDrawIndirect", fn (render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderPassEncoderEnd", fn (render_pass_encoder: *gpu.RenderPassEncoder) callconv(.Inline) void);
|
assertDecl(T, "renderPassEncoderEnd", fn (render_pass_encoder: *gpu.RenderPassEncoder) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderPassEncoderEndOcclusionQuery", fn (render_pass_encoder: *gpu.RenderPassEncoder) callconv(.Inline) void);
|
assertDecl(T, "renderPassEncoderEndOcclusionQuery", fn (render_pass_encoder: *gpu.RenderPassEncoder) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderPassEncoderExecuteBundles", fn (render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: u32, bundles: [*]const *const gpu.RenderBundle) callconv(.Inline) void);
|
assertDecl(T, "renderPassEncoderExecuteBundles", fn (render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const gpu.RenderBundle) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderPassEncoderInsertDebugMarker", fn (render_pass_encoder: *gpu.RenderPassEncoder, marker_label: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "renderPassEncoderInsertDebugMarker", fn (render_pass_encoder: *gpu.RenderPassEncoder, marker_label: [*:0]const u8) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderPassEncoderPopDebugGroup", fn (render_pass_encoder: *gpu.RenderPassEncoder) callconv(.Inline) void);
|
assertDecl(T, "renderPassEncoderPopDebugGroup", fn (render_pass_encoder: *gpu.RenderPassEncoder) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderPassEncoderPushDebugGroup", fn (render_pass_encoder: *gpu.RenderPassEncoder, group_label: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "renderPassEncoderPushDebugGroup", fn (render_pass_encoder: *gpu.RenderPassEncoder, group_label: [*:0]const u8) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderPassEncoderSetBindGroup", fn (render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) callconv(.Inline) void);
|
assertDecl(T, "renderPassEncoderSetBindGroup", fn (render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderPassEncoderSetBlendConstant", fn (render_pass_encoder: *gpu.RenderPassEncoder, color: *const gpu.Color) callconv(.Inline) void);
|
assertDecl(T, "renderPassEncoderSetBlendConstant", fn (render_pass_encoder: *gpu.RenderPassEncoder, color: *const gpu.Color) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderPassEncoderSetIndexBuffer", fn (render_pass_encoder: *gpu.RenderPassEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) callconv(.Inline) void);
|
assertDecl(T, "renderPassEncoderSetIndexBuffer", fn (render_pass_encoder: *gpu.RenderPassEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) callconv(.Inline) void);
|
||||||
assertDecl(T, "renderPassEncoderSetLabel", fn (render_pass_encoder: *gpu.RenderPassEncoder, label: [*:0]const u8) callconv(.Inline) void);
|
assertDecl(T, "renderPassEncoderSetLabel", fn (render_pass_encoder: *gpu.RenderPassEncoder, label: [*:0]const u8) callconv(.Inline) void);
|
||||||
|
|
@ -195,7 +198,7 @@ pub fn Interface(comptime T: type) type {
|
||||||
assertDecl(T, "shaderModuleRelease", fn (shader_module: *gpu.ShaderModule) callconv(.Inline) void);
|
assertDecl(T, "shaderModuleRelease", fn (shader_module: *gpu.ShaderModule) callconv(.Inline) void);
|
||||||
assertDecl(T, "surfaceReference", fn (surface: *gpu.Surface) callconv(.Inline) void);
|
assertDecl(T, "surfaceReference", fn (surface: *gpu.Surface) callconv(.Inline) void);
|
||||||
assertDecl(T, "surfaceRelease", fn (surface: *gpu.Surface) callconv(.Inline) void);
|
assertDecl(T, "surfaceRelease", fn (surface: *gpu.Surface) callconv(.Inline) void);
|
||||||
assertDecl(T, "swapChainConfigure", fn (swap_chain: *gpu.SwapChain, format: gpu.Texture.Format, allowed_usage: gpu.Texture.UsageFlags, width: u32, height: u32) callconv(.Inline) void);
|
assertDecl(T, "swapChainGetCurrentTexture", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) ?*gpu.Texture);
|
||||||
assertDecl(T, "swapChainGetCurrentTextureView", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) ?*gpu.TextureView);
|
assertDecl(T, "swapChainGetCurrentTextureView", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) ?*gpu.TextureView);
|
||||||
assertDecl(T, "swapChainPresent", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) void);
|
assertDecl(T, "swapChainPresent", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) void);
|
||||||
assertDecl(T, "swapChainReference", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) void);
|
assertDecl(T, "swapChainReference", fn (swap_chain: *gpu.SwapChain) callconv(.Inline) void);
|
||||||
|
|
@ -249,6 +252,11 @@ pub fn Export(comptime T: type) type {
|
||||||
return T.adapterEnumerateFeatures(adapter, features);
|
return T.adapterEnumerateFeatures(adapter, features);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WGPU_EXPORT WGPUInstance wgpuAdapterGetInstance(WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE;
|
||||||
|
export fn wgpuAdapterGetInstance(adapter: *gpu.Adapter) *gpu.Instance {
|
||||||
|
return T.adapterGetInstance(adapter);
|
||||||
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT bool wgpuAdapterGetLimits(WGPUAdapter adapter, WGPUSupportedLimits * limits);
|
// WGPU_EXPORT bool wgpuAdapterGetLimits(WGPUAdapter adapter, WGPUSupportedLimits * limits);
|
||||||
export fn wgpuAdapterGetLimits(adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) bool {
|
export fn wgpuAdapterGetLimits(adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) bool {
|
||||||
return T.adapterGetLimits(adapter, limits);
|
return T.adapterGetLimits(adapter, limits);
|
||||||
|
|
@ -499,8 +507,8 @@ pub fn Export(comptime T: type) type {
|
||||||
T.computePassEncoderPushDebugGroup(compute_pass_encoder, group_label);
|
T.computePassEncoderPushDebugGroup(compute_pass_encoder, group_label);
|
||||||
}
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT void wgpuComputePassEncoderSetBindGroup(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPUBindGroup group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
|
// WGPU_EXPORT void wgpuComputePassEncoderSetBindGroup(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
|
||||||
export fn wgpuComputePassEncoderSetBindGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void {
|
export fn wgpuComputePassEncoderSetBindGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void {
|
||||||
T.computePassEncoderSetBindGroup(compute_pass_encoder, group_index, group, dynamic_offset_count, dynamic_offsets);
|
T.computePassEncoderSetBindGroup(compute_pass_encoder, group_index, group, dynamic_offset_count, dynamic_offsets);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -674,9 +682,9 @@ pub fn Export(comptime T: type) type {
|
||||||
T.deviceInjectError(device, typ, message);
|
T.deviceInjectError(device, typ, message);
|
||||||
}
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT bool wgpuDevicePopErrorScope(WGPUDevice device, WGPUErrorCallback callback, void * userdata);
|
// WGPU_EXPORT void wgpuDevicePopErrorScope(WGPUDevice device, WGPUErrorCallback callback, void * userdata);
|
||||||
export fn wgpuDevicePopErrorScope(device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) bool {
|
export fn wgpuDevicePopErrorScope(device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) void {
|
||||||
return T.devicePopErrorScope(device, callback, userdata);
|
T.devicePopErrorScope(device, callback, userdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT void wgpuDevicePushErrorScope(WGPUDevice device, WGPUErrorFilter filter);
|
// WGPU_EXPORT void wgpuDevicePushErrorScope(WGPUDevice device, WGPUErrorFilter filter);
|
||||||
|
|
@ -747,6 +755,11 @@ pub fn Export(comptime T: type) type {
|
||||||
return T.instanceCreateSurface(instance, descriptor);
|
return T.instanceCreateSurface(instance, descriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WGPU_EXPORT void instanceProcessEvents(WGPUInstance instance);
|
||||||
|
export fn wgpuInstanceProcessEvents(instance: *gpu.Instance) void {
|
||||||
|
T.instanceProcessEvents(instance);
|
||||||
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT void wgpuInstanceRequestAdapter(WGPUInstance instance, WGPURequestAdapterOptions const * options /* nullable */, WGPURequestAdapterCallback callback, void * userdata);
|
// WGPU_EXPORT void wgpuInstanceRequestAdapter(WGPUInstance instance, WGPURequestAdapterOptions const * options /* nullable */, WGPURequestAdapterCallback callback, void * userdata);
|
||||||
export fn wgpuInstanceRequestAdapter(instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) void {
|
export fn wgpuInstanceRequestAdapter(instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) void {
|
||||||
T.instanceRequestAdapter(instance, options, callback, userdata);
|
T.instanceRequestAdapter(instance, options, callback, userdata);
|
||||||
|
|
@ -822,8 +835,8 @@ pub fn Export(comptime T: type) type {
|
||||||
T.queueSetLabel(queue, label);
|
T.queueSetLabel(queue, label);
|
||||||
}
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT void wgpuQueueSubmit(WGPUQueue queue, uint32_t commandCount, WGPUCommandBuffer const * commands);
|
// WGPU_EXPORT void wgpuQueueSubmit(WGPUQueue queue, size_t commandCount, WGPUCommandBuffer const * commands);
|
||||||
export fn wgpuQueueSubmit(queue: *gpu.Queue, command_count: u32, commands: [*]const *const gpu.CommandBuffer) void {
|
export fn wgpuQueueSubmit(queue: *gpu.Queue, command_count: usize, commands: [*]const *const gpu.CommandBuffer) void {
|
||||||
T.queueSubmit(queue, command_count, commands);
|
T.queueSubmit(queue, command_count, commands);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -847,6 +860,11 @@ pub fn Export(comptime T: type) type {
|
||||||
T.queueRelease(queue);
|
T.queueRelease(queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WGPU_EXPORT void wgpuRenderBundleSetLabel(WGPURenderBundle renderBundle, char const * label);
|
||||||
|
export fn wgpuRenderBundleSetLabel(render_bundle: *gpu.RenderBundle, label: [*:0]const u8) void {
|
||||||
|
T.renderBundleSetLabel(render_bundle, label);
|
||||||
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT void wgpuRenderBundleReference(WGPURenderBundle renderBundle);
|
// WGPU_EXPORT void wgpuRenderBundleReference(WGPURenderBundle renderBundle);
|
||||||
export fn wgpuRenderBundleReference(render_bundle: *gpu.RenderBundle) void {
|
export fn wgpuRenderBundleReference(render_bundle: *gpu.RenderBundle) void {
|
||||||
T.renderBundleReference(render_bundle);
|
T.renderBundleReference(render_bundle);
|
||||||
|
|
@ -897,8 +915,8 @@ pub fn Export(comptime T: type) type {
|
||||||
T.renderBundleEncoderPushDebugGroup(render_bundle_encoder, group_label);
|
T.renderBundleEncoderPushDebugGroup(render_bundle_encoder, group_label);
|
||||||
}
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT void wgpuRenderBundleEncoderSetBindGroup(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPUBindGroup group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
|
// WGPU_EXPORT void wgpuRenderBundleEncoderSetBindGroup(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
|
||||||
export fn wgpuRenderBundleEncoderSetBindGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void {
|
export fn wgpuRenderBundleEncoderSetBindGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void {
|
||||||
T.renderBundleEncoderSetBindGroup(render_bundle_encoder, group_index, group, dynamic_offset_count, dynamic_offsets);
|
T.renderBundleEncoderSetBindGroup(render_bundle_encoder, group_index, group, dynamic_offset_count, dynamic_offsets);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -967,8 +985,8 @@ pub fn Export(comptime T: type) type {
|
||||||
T.renderPassEncoderEndOcclusionQuery(render_pass_encoder);
|
T.renderPassEncoderEndOcclusionQuery(render_pass_encoder);
|
||||||
}
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT void wgpuRenderPassEncoderExecuteBundles(WGPURenderPassEncoder renderPassEncoder, uint32_t bundlesCount, WGPURenderBundle const * bundles);
|
// WGPU_EXPORT void wgpuRenderPassEncoderExecuteBundles(WGPURenderPassEncoder renderPassEncoder, size_t bundleCount, WGPURenderBundle const * bundles);
|
||||||
export fn wgpuRenderPassEncoderExecuteBundles(render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: u32, bundles: [*]const *const gpu.RenderBundle) void {
|
export fn wgpuRenderPassEncoderExecuteBundles(render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const gpu.RenderBundle) void {
|
||||||
T.renderPassEncoderExecuteBundles(render_pass_encoder, bundles_count, bundles);
|
T.renderPassEncoderExecuteBundles(render_pass_encoder, bundles_count, bundles);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -987,8 +1005,8 @@ pub fn Export(comptime T: type) type {
|
||||||
T.renderPassEncoderPushDebugGroup(render_pass_encoder, group_label);
|
T.renderPassEncoderPushDebugGroup(render_pass_encoder, group_label);
|
||||||
}
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT void wgpuRenderPassEncoderSetBindGroup(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPUBindGroup group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
|
// WGPU_EXPORT void wgpuRenderPassEncoderSetBindGroup(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
|
||||||
export fn wgpuRenderPassEncoderSetBindGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void {
|
export fn wgpuRenderPassEncoderSetBindGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void {
|
||||||
T.renderPassEncoderSetBindGroup(render_pass_encoder, group_index, group, dynamic_offset_count, dynamic_offsets);
|
T.renderPassEncoderSetBindGroup(render_pass_encoder, group_index, group, dynamic_offset_count, dynamic_offsets);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1112,9 +1130,9 @@ pub fn Export(comptime T: type) type {
|
||||||
T.surfaceRelease(surface);
|
T.surfaceRelease(surface);
|
||||||
}
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT void wgpuSwapChainConfigure(WGPUSwapChain swapChain, WGPUTextureFormat format, WGPUTextureUsageFlags allowedUsage, uint32_t width, uint32_t height);
|
// WGPU_EXPORT WGPUTexture wgpuSwapChainGetCurrentTexture(WGPUSwapChain swapChain);
|
||||||
export fn wgpuSwapChainConfigure(swap_chain: *gpu.SwapChain, format: gpu.Texture.Format, allowed_usage: u32, width: u32, height: u32) void {
|
export fn wgpuSwapChainGetCurrentTexture(swap_chain: *gpu.SwapChain) ?*gpu.Texture {
|
||||||
T.swapChainConfigure(swap_chain, format, @bitCast(gpu.Texture.UsageFlags, allowed_usage), width, height);
|
return T.swapChainGetCurrentTexture(swap_chain);
|
||||||
}
|
}
|
||||||
|
|
||||||
// WGPU_EXPORT WGPUTextureView wgpuSwapChainGetCurrentTextureView(WGPUSwapChain swapChain);
|
// WGPU_EXPORT WGPUTextureView wgpuSwapChainGetCurrentTextureView(WGPUSwapChain swapChain);
|
||||||
|
|
@ -1244,6 +1262,11 @@ pub const StubInterface = Interface(struct {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub inline fn adapterGetInstance(adapter: *gpu.Adapter) *gpu.Instance {
|
||||||
|
_ = adapter;
|
||||||
|
unreachable;
|
||||||
|
}
|
||||||
|
|
||||||
pub inline fn adapterGetLimits(adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) bool {
|
pub inline fn adapterGetLimits(adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) bool {
|
||||||
_ = adapter;
|
_ = adapter;
|
||||||
_ = limits;
|
_ = limits;
|
||||||
|
|
@ -1560,7 +1583,7 @@ pub const StubInterface = Interface(struct {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn computePassEncoderSetBindGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void {
|
pub inline fn computePassEncoderSetBindGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void {
|
||||||
_ = compute_pass_encoder;
|
_ = compute_pass_encoder;
|
||||||
_ = group_index;
|
_ = group_index;
|
||||||
_ = group;
|
_ = group;
|
||||||
|
|
@ -1780,7 +1803,7 @@ pub const StubInterface = Interface(struct {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn devicePopErrorScope(device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) bool {
|
pub inline fn devicePopErrorScope(device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) void {
|
||||||
_ = device;
|
_ = device;
|
||||||
_ = callback;
|
_ = callback;
|
||||||
_ = userdata;
|
_ = userdata;
|
||||||
|
|
@ -1862,6 +1885,11 @@ pub const StubInterface = Interface(struct {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub inline fn instanceProcessEvents(instance: *gpu.Instance) void {
|
||||||
|
_ = instance;
|
||||||
|
unreachable;
|
||||||
|
}
|
||||||
|
|
||||||
pub inline fn instanceRequestAdapter(instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) void {
|
pub inline fn instanceRequestAdapter(instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) void {
|
||||||
_ = instance;
|
_ = instance;
|
||||||
_ = options;
|
_ = options;
|
||||||
|
|
@ -1950,7 +1978,7 @@ pub const StubInterface = Interface(struct {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn queueSubmit(queue: *gpu.Queue, command_count: u32, commands: [*]const *const gpu.CommandBuffer) void {
|
pub inline fn queueSubmit(queue: *gpu.Queue, command_count: usize, commands: [*]const *const gpu.CommandBuffer) void {
|
||||||
_ = queue;
|
_ = queue;
|
||||||
_ = command_count;
|
_ = command_count;
|
||||||
_ = commands;
|
_ = commands;
|
||||||
|
|
@ -1986,6 +2014,12 @@ pub const StubInterface = Interface(struct {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub inline fn renderBundleSetLabel(render_bundle: *gpu.RenderBundle, label: [*:0]const u8) void {
|
||||||
|
_ = render_bundle;
|
||||||
|
_ = label;
|
||||||
|
unreachable;
|
||||||
|
}
|
||||||
|
|
||||||
pub inline fn renderBundleReference(render_bundle: *gpu.RenderBundle) void {
|
pub inline fn renderBundleReference(render_bundle: *gpu.RenderBundle) void {
|
||||||
_ = render_bundle;
|
_ = render_bundle;
|
||||||
unreachable;
|
unreachable;
|
||||||
|
|
@ -2052,7 +2086,7 @@ pub const StubInterface = Interface(struct {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn renderBundleEncoderSetBindGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void {
|
pub inline fn renderBundleEncoderSetBindGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void {
|
||||||
_ = render_bundle_encoder;
|
_ = render_bundle_encoder;
|
||||||
_ = group_index;
|
_ = group_index;
|
||||||
_ = group;
|
_ = group;
|
||||||
|
|
@ -2150,7 +2184,7 @@ pub const StubInterface = Interface(struct {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn renderPassEncoderExecuteBundles(render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: u32, bundles: [*]const *const gpu.RenderBundle) void {
|
pub inline fn renderPassEncoderExecuteBundles(render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const gpu.RenderBundle) void {
|
||||||
_ = render_pass_encoder;
|
_ = render_pass_encoder;
|
||||||
_ = bundles_count;
|
_ = bundles_count;
|
||||||
_ = bundles;
|
_ = bundles;
|
||||||
|
|
@ -2174,7 +2208,7 @@ pub const StubInterface = Interface(struct {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn renderPassEncoderSetBindGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void {
|
pub inline fn renderPassEncoderSetBindGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void {
|
||||||
_ = render_pass_encoder;
|
_ = render_pass_encoder;
|
||||||
_ = group_index;
|
_ = group_index;
|
||||||
_ = group;
|
_ = group;
|
||||||
|
|
@ -2333,12 +2367,8 @@ pub const StubInterface = Interface(struct {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn swapChainConfigure(swap_chain: *gpu.SwapChain, format: gpu.Texture.Format, allowed_usage: gpu.Texture.UsageFlags, width: u32, height: u32) void {
|
pub inline fn swapChainGetCurrentTexture(swap_chain: *gpu.SwapChain) ?*gpu.Texture {
|
||||||
_ = swap_chain;
|
_ = swap_chain;
|
||||||
_ = format;
|
|
||||||
_ = allowed_usage;
|
|
||||||
_ = width;
|
|
||||||
_ = height;
|
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,6 @@ pub inline fn getProcAddress(_device: *device.Device, proc_name: [*:0]const u8)
|
||||||
return Impl.getProcAddress(_device, proc_name);
|
return Impl.getProcAddress(_device, proc_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
pub const array_layer_count_undef = 0xffffffff;
|
pub const array_layer_count_undef = 0xffffffff;
|
||||||
pub const copy_stride_undef = 0xffffffff;
|
pub const copy_stride_undef = 0xffffffff;
|
||||||
pub const limit_u32_undef = 0xffffffff;
|
pub const limit_u32_undef = 0xffffffff;
|
||||||
|
|
@ -67,14 +66,10 @@ pub const RenderPassDepthStencilAttachment = extern struct {
|
||||||
view: *TextureView,
|
view: *TextureView,
|
||||||
depth_load_op: LoadOp = .undefined,
|
depth_load_op: LoadOp = .undefined,
|
||||||
depth_store_op: StoreOp = .undefined,
|
depth_store_op: StoreOp = .undefined,
|
||||||
/// deprecated
|
|
||||||
clear_depth: f32 = std.math.nan(f32),
|
|
||||||
depth_clear_value: f32 = 0,
|
depth_clear_value: f32 = 0,
|
||||||
depth_read_only: bool = false,
|
depth_read_only: bool = false,
|
||||||
stencil_load_op: LoadOp = .undefined,
|
stencil_load_op: LoadOp = .undefined,
|
||||||
stencil_store_op: StoreOp = .undefined,
|
stencil_store_op: StoreOp = .undefined,
|
||||||
/// deprecated
|
|
||||||
clear_stencil: u32 = 0,
|
|
||||||
stencil_clear_value: u32 = 0,
|
stencil_clear_value: u32 = 0,
|
||||||
stencil_read_only: bool = false,
|
stencil_read_only: bool = false,
|
||||||
};
|
};
|
||||||
|
|
@ -90,12 +85,13 @@ pub const RequestAdapterOptions = extern struct {
|
||||||
compatible_surface: ?*Surface = null,
|
compatible_surface: ?*Surface = null,
|
||||||
power_preference: PowerPreference = .undefined,
|
power_preference: PowerPreference = .undefined,
|
||||||
force_fallback_adapter: bool = false,
|
force_fallback_adapter: bool = false,
|
||||||
|
compatibility_mode: bool = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const ComputePassDescriptor = extern struct {
|
pub const ComputePassDescriptor = extern struct {
|
||||||
next_in_chain: ?*const ChainedStruct = null,
|
next_in_chain: ?*const ChainedStruct = null,
|
||||||
label: ?[*:0]const u8 = null,
|
label: ?[*:0]const u8 = null,
|
||||||
timestamp_write_count: u32 = 0,
|
timestamp_write_count: usize = 0,
|
||||||
timestamp_writes: ?[*]const ComputePassTimestampWrite = null,
|
timestamp_writes: ?[*]const ComputePassTimestampWrite = null,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
|
|
@ -107,7 +103,7 @@ pub const ComputePassDescriptor = extern struct {
|
||||||
return .{
|
return .{
|
||||||
.next_in_chain = v.next_in_chain,
|
.next_in_chain = v.next_in_chain,
|
||||||
.label = v.label,
|
.label = v.label,
|
||||||
.timestamp_write_count = if (v.timestamp_writes) |e| @intCast(u32, e.len) else 0,
|
.timestamp_write_count = if (v.timestamp_writes) |e| e.len else 0,
|
||||||
.timestamp_writes = if (v.timestamp_writes) |e| e.ptr else null,
|
.timestamp_writes = if (v.timestamp_writes) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -121,11 +117,11 @@ pub const RenderPassDescriptor = extern struct {
|
||||||
|
|
||||||
next_in_chain: NextInChain = .{ .generic = null },
|
next_in_chain: NextInChain = .{ .generic = null },
|
||||||
label: ?[*:0]const u8 = null,
|
label: ?[*:0]const u8 = null,
|
||||||
color_attachment_count: u32 = 0,
|
color_attachment_count: usize = 0,
|
||||||
color_attachments: ?[*]const RenderPassColorAttachment = null,
|
color_attachments: ?[*]const RenderPassColorAttachment = null,
|
||||||
depth_stencil_attachment: ?*const RenderPassDepthStencilAttachment = null,
|
depth_stencil_attachment: ?*const RenderPassDepthStencilAttachment = null,
|
||||||
occlusion_query_set: ?*QuerySet = null,
|
occlusion_query_set: ?*QuerySet = null,
|
||||||
timestamp_write_count: u32 = 0,
|
timestamp_write_count: usize = 0,
|
||||||
timestamp_writes: ?[*]const RenderPassTimestampWrite = null,
|
timestamp_writes: ?[*]const RenderPassTimestampWrite = null,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
|
|
@ -140,11 +136,11 @@ pub const RenderPassDescriptor = extern struct {
|
||||||
return .{
|
return .{
|
||||||
.next_in_chain = v.next_in_chain,
|
.next_in_chain = v.next_in_chain,
|
||||||
.label = v.label,
|
.label = v.label,
|
||||||
.color_attachment_count = if (v.color_attachments) |e| @intCast(u32, e.len) else 0,
|
.color_attachment_count = if (v.color_attachments) |e| e.len else 0,
|
||||||
.color_attachments = if (v.color_attachments) |e| e.ptr else null,
|
.color_attachments = if (v.color_attachments) |e| e.ptr else null,
|
||||||
.depth_stencil_attachment = v.depth_stencil_attachment,
|
.depth_stencil_attachment = v.depth_stencil_attachment,
|
||||||
.occlusion_query_set = v.occlusion_query_set,
|
.occlusion_query_set = v.occlusion_query_set,
|
||||||
.timestamp_write_count = if (v.timestamp_writes) |e| @intCast(u32, e.len) else 0,
|
.timestamp_write_count = if (v.timestamp_writes) |e| e.len else 0,
|
||||||
.timestamp_writes = if (v.timestamp_writes) |e| e.ptr else null,
|
.timestamp_writes = if (v.timestamp_writes) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -232,10 +228,11 @@ pub const ComputePassTimestampLocation = enum(u32) {
|
||||||
|
|
||||||
pub const CreatePipelineAsyncStatus = enum(u32) {
|
pub const CreatePipelineAsyncStatus = enum(u32) {
|
||||||
success = 0x00000000,
|
success = 0x00000000,
|
||||||
err = 0x00000001,
|
validation_error = 0x00000001,
|
||||||
device_lost = 0x00000002,
|
internal_error = 0x00000002,
|
||||||
device_destroyed = 0x00000003,
|
device_lost = 0x00000003,
|
||||||
unknown = 0x00000004,
|
device_destroyed = 0x00000004,
|
||||||
|
unknown = 0x00000005,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const CullMode = enum(u32) {
|
pub const CullMode = enum(u32) {
|
||||||
|
|
@ -271,12 +268,17 @@ pub const FeatureName = enum(u32) {
|
||||||
indirect_first_instance = 0x00000008,
|
indirect_first_instance = 0x00000008,
|
||||||
shader_f16 = 0x00000009,
|
shader_f16 = 0x00000009,
|
||||||
rg11_b10_ufloat_renderable = 0x0000000A,
|
rg11_b10_ufloat_renderable = 0x0000000A,
|
||||||
|
bgra8_unorm_storage = 0x0000000B,
|
||||||
|
float32_filterable = 0x0000000C,
|
||||||
dawn_shader_float16 = 0x000003e9,
|
dawn_shader_float16 = 0x000003e9,
|
||||||
dawn_internal_usages = 0x000003ea,
|
dawn_internal_usages = 0x000003ea,
|
||||||
dawn_multi_planar_formats = 0x000003eb,
|
dawn_multi_planar_formats = 0x000003eb,
|
||||||
dawn_native = 0x000003ec,
|
dawn_native = 0x000003ec,
|
||||||
chromium_experimental_dp4a = 0x000003ed,
|
chromium_experimental_dp4a = 0x000003ed,
|
||||||
timestamp_query_inside_passes = 0x000003EE,
|
timestamp_query_inside_passes = 0x000003EE,
|
||||||
|
implicit_device_synchronization = 0x000003EF,
|
||||||
|
surface_capabilities = 0x000003F0,
|
||||||
|
transient_attachments = 0x000003F1,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const FilterMode = enum(u32) {
|
pub const FilterMode = enum(u32) {
|
||||||
|
|
@ -284,6 +286,11 @@ pub const FilterMode = enum(u32) {
|
||||||
linear = 0x00000001,
|
linear = 0x00000001,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub const MipmapFilterMode = enum(u32) {
|
||||||
|
nearest = 0x00000000,
|
||||||
|
linear = 0x00000001,
|
||||||
|
};
|
||||||
|
|
||||||
pub const FrontFace = enum(u32) {
|
pub const FrontFace = enum(u32) {
|
||||||
ccw = 0x00000000,
|
ccw = 0x00000000,
|
||||||
cw = 0x00000001,
|
cw = 0x00000001,
|
||||||
|
|
@ -377,12 +384,13 @@ pub const SType = enum(u32) {
|
||||||
surface_descriptor_from_windows_swap_chain_panel = 0x0000000E,
|
surface_descriptor_from_windows_swap_chain_panel = 0x0000000E,
|
||||||
render_pass_descriptor_max_draw_count = 0x0000000F,
|
render_pass_descriptor_max_draw_count = 0x0000000F,
|
||||||
dawn_texture_internal_usage_descriptor = 0x000003E8,
|
dawn_texture_internal_usage_descriptor = 0x000003E8,
|
||||||
dawn_toggles_device_descriptor = 0x000003EA,
|
|
||||||
dawn_encoder_internal_usage_descriptor = 0x000003EB,
|
dawn_encoder_internal_usage_descriptor = 0x000003EB,
|
||||||
dawn_instance_descriptor = 0x000003EC,
|
dawn_instance_descriptor = 0x000003EC,
|
||||||
dawn_cache_device_descriptor = 0x000003ED,
|
dawn_cache_device_descriptor = 0x000003ED,
|
||||||
dawn_adapter_properties_power_preference = 0x000003EE,
|
dawn_adapter_properties_power_preference = 0x000003EE,
|
||||||
dawn_buffer_descriptor_error_info_from_wire_client = 0x000003EF,
|
dawn_buffer_descriptor_error_info_from_wire_client = 0x000003EF,
|
||||||
|
dawn_toggles_descriptor = 0x000003F0,
|
||||||
|
dawn_shader_module_spirv_options_descriptor = 0x000003F1,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const StencilOperation = enum(u32) {
|
pub const StencilOperation = enum(u32) {
|
||||||
|
|
@ -684,7 +692,7 @@ pub const BlendState = extern struct {
|
||||||
|
|
||||||
pub const CompilationInfo = extern struct {
|
pub const CompilationInfo = extern struct {
|
||||||
next_in_chain: ?*const ChainedStruct = null,
|
next_in_chain: ?*const ChainedStruct = null,
|
||||||
message_count: u32,
|
message_count: usize,
|
||||||
messages: ?[*]const CompilationMessage = null,
|
messages: ?[*]const CompilationMessage = null,
|
||||||
|
|
||||||
/// Helper to get messages as a slice.
|
/// Helper to get messages as a slice.
|
||||||
|
|
@ -720,6 +728,7 @@ pub const ImageCopyExternalTexture = extern struct {
|
||||||
next_in_chain: ?*const ChainedStruct = null,
|
next_in_chain: ?*const ChainedStruct = null,
|
||||||
external_texture: *ExternalTexture,
|
external_texture: *ExternalTexture,
|
||||||
origin: Origin3D,
|
origin: Origin3D,
|
||||||
|
natural_size: Extent2D,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const ImageCopyTexture = extern struct {
|
pub const ImageCopyTexture = extern struct {
|
||||||
|
|
@ -734,7 +743,7 @@ pub const ProgrammableStageDescriptor = extern struct {
|
||||||
next_in_chain: ?*const ChainedStruct = null,
|
next_in_chain: ?*const ChainedStruct = null,
|
||||||
module: *ShaderModule,
|
module: *ShaderModule,
|
||||||
entry_point: [*:0]const u8,
|
entry_point: [*:0]const u8,
|
||||||
constant_count: u32 = 0,
|
constant_count: usize = 0,
|
||||||
constants: ?[*]const ConstantEntry = null,
|
constants: ?[*]const ConstantEntry = null,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
|
|
@ -748,7 +757,7 @@ pub const ProgrammableStageDescriptor = extern struct {
|
||||||
.next_in_chain = v.next_in_chain,
|
.next_in_chain = v.next_in_chain,
|
||||||
.module = v.module,
|
.module = v.module,
|
||||||
.entry_point = v.entry_point,
|
.entry_point = v.entry_point,
|
||||||
.constant_count = if (v.constants) |e| @intCast(u32, e.len) else 0,
|
.constant_count = if (v.constants) |e| e.len else 0,
|
||||||
.constants = if (v.constants) |e| e.ptr else null,
|
.constants = if (v.constants) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -759,13 +768,6 @@ pub const RenderPassColorAttachment = extern struct {
|
||||||
resolve_target: ?*TextureView = null,
|
resolve_target: ?*TextureView = null,
|
||||||
load_op: LoadOp,
|
load_op: LoadOp,
|
||||||
store_op: StoreOp,
|
store_op: StoreOp,
|
||||||
/// deprecated
|
|
||||||
clear_color: Color = .{
|
|
||||||
.r = std.math.nan(f64),
|
|
||||||
.g = std.math.nan(f64),
|
|
||||||
.b = std.math.nan(f64),
|
|
||||||
.a = std.math.nan(f64),
|
|
||||||
},
|
|
||||||
clear_value: Color,
|
clear_value: Color,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -790,7 +792,7 @@ pub const SupportedLimits = extern struct {
|
||||||
pub const VertexBufferLayout = extern struct {
|
pub const VertexBufferLayout = extern struct {
|
||||||
array_stride: u64,
|
array_stride: u64,
|
||||||
step_mode: VertexStepMode = .vertex,
|
step_mode: VertexStepMode = .vertex,
|
||||||
attribute_count: u32,
|
attribute_count: usize,
|
||||||
attributes: ?[*]const VertexAttribute = null,
|
attributes: ?[*]const VertexAttribute = null,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
|
|
@ -802,7 +804,7 @@ pub const VertexBufferLayout = extern struct {
|
||||||
return .{
|
return .{
|
||||||
.array_stride = v.array_stride,
|
.array_stride = v.array_stride,
|
||||||
.step_mode = v.step_mode,
|
.step_mode = v.step_mode,
|
||||||
.attribute_count = if (v.attributes) |e| @intCast(u32, e.len) else 0,
|
.attribute_count = if (v.attributes) |e| e.len else 0,
|
||||||
.attributes = if (v.attributes) |e| e.ptr else null,
|
.attributes = if (v.attributes) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -819,9 +821,9 @@ pub const VertexState = extern struct {
|
||||||
next_in_chain: ?*const ChainedStruct = null,
|
next_in_chain: ?*const ChainedStruct = null,
|
||||||
module: *ShaderModule,
|
module: *ShaderModule,
|
||||||
entry_point: [*:0]const u8,
|
entry_point: [*:0]const u8,
|
||||||
constant_count: u32 = 0,
|
constant_count: usize = 0,
|
||||||
constants: ?[*]const ConstantEntry = null,
|
constants: ?[*]const ConstantEntry = null,
|
||||||
buffer_count: u32 = 0,
|
buffer_count: usize = 0,
|
||||||
buffers: ?[*]const VertexBufferLayout = null,
|
buffers: ?[*]const VertexBufferLayout = null,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
|
|
@ -836,9 +838,9 @@ pub const VertexState = extern struct {
|
||||||
.next_in_chain = v.next_in_chain,
|
.next_in_chain = v.next_in_chain,
|
||||||
.module = v.module,
|
.module = v.module,
|
||||||
.entry_point = v.entry_point,
|
.entry_point = v.entry_point,
|
||||||
.constant_count = if (v.constants) |e| @intCast(u32, e.len) else 0,
|
.constant_count = if (v.constants) |e| e.len else 0,
|
||||||
.constants = if (v.constants) |e| e.ptr else null,
|
.constants = if (v.constants) |e| e.ptr else null,
|
||||||
.buffer_count = if (v.buffers) |e| @intCast(u32, e.len) else 0,
|
.buffer_count = if (v.buffers) |e| e.len else 0,
|
||||||
.buffers = if (v.buffers) |e| e.ptr else null,
|
.buffers = if (v.buffers) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -848,9 +850,9 @@ pub const FragmentState = extern struct {
|
||||||
next_in_chain: ?*const ChainedStruct = null,
|
next_in_chain: ?*const ChainedStruct = null,
|
||||||
module: *ShaderModule,
|
module: *ShaderModule,
|
||||||
entry_point: [*:0]const u8,
|
entry_point: [*:0]const u8,
|
||||||
constant_count: u32 = 0,
|
constant_count: usize = 0,
|
||||||
constants: ?[*]const ConstantEntry = null,
|
constants: ?[*]const ConstantEntry = null,
|
||||||
target_count: u32,
|
target_count: usize,
|
||||||
targets: ?[*]const ColorTargetState = null,
|
targets: ?[*]const ColorTargetState = null,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
|
|
@ -865,9 +867,9 @@ pub const FragmentState = extern struct {
|
||||||
.next_in_chain = v.next_in_chain,
|
.next_in_chain = v.next_in_chain,
|
||||||
.module = v.module,
|
.module = v.module,
|
||||||
.entry_point = v.entry_point,
|
.entry_point = v.entry_point,
|
||||||
.constant_count = if (v.constants) |e| @intCast(u32, e.len) else 0,
|
.constant_count = if (v.constants) |e| e.len else 0,
|
||||||
.constants = if (v.constants) |e| e.ptr else null,
|
.constants = if (v.constants) |e| e.ptr else null,
|
||||||
.target_count = if (v.targets) |e| @intCast(u32, e.len) else 0,
|
.target_count = if (v.targets) |e| e.len else 0,
|
||||||
.targets = if (v.targets) |e| e.ptr else null,
|
.targets = if (v.targets) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -927,7 +929,6 @@ pub const CreateRenderPipelineAsyncCallback = *const fn (
|
||||||
userdata: ?*anyopaque,
|
userdata: ?*anyopaque,
|
||||||
) callconv(.C) void;
|
) callconv(.C) void;
|
||||||
|
|
||||||
|
|
||||||
test {
|
test {
|
||||||
std.testing.refAllDeclsRecursive(@This());
|
std.testing.refAllDeclsRecursive(@This());
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@ pub const PipelineLayout = opaque {
|
||||||
pub const Descriptor = extern struct {
|
pub const Descriptor = extern struct {
|
||||||
next_in_chain: ?*const ChainedStruct = null,
|
next_in_chain: ?*const ChainedStruct = null,
|
||||||
label: ?[*:0]const u8 = null,
|
label: ?[*:0]const u8 = null,
|
||||||
bind_group_layout_count: u32 = 0,
|
bind_group_layout_count: usize = 0,
|
||||||
bind_group_layouts: ?[*]const *BindGroupLayout = null,
|
bind_group_layouts: ?[*]const *BindGroupLayout = null,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
|
|
@ -18,7 +18,7 @@ pub const PipelineLayout = opaque {
|
||||||
return .{
|
return .{
|
||||||
.next_in_chain = v.next_in_chain,
|
.next_in_chain = v.next_in_chain,
|
||||||
.label = v.label,
|
.label = v.label,
|
||||||
.bind_group_layout_count = if (v.bind_group_layouts) |e| @intCast(u32, e.len) else 0,
|
.bind_group_layout_count = if (v.bind_group_layouts) |e| e.len else 0,
|
||||||
.bind_group_layouts = if (v.bind_group_layouts) |e| e.ptr else null,
|
.bind_group_layouts = if (v.bind_group_layouts) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ pub const QuerySet = opaque {
|
||||||
type: QueryType,
|
type: QueryType,
|
||||||
count: u32,
|
count: u32,
|
||||||
pipeline_statistics: ?[*]const PipelineStatisticName = null,
|
pipeline_statistics: ?[*]const PipelineStatisticName = null,
|
||||||
pipeline_statistics_count: u32 = 0,
|
pipeline_statistics_count: usize = 0,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
pub inline fn init(v: struct {
|
pub inline fn init(v: struct {
|
||||||
|
|
@ -25,7 +25,7 @@ pub const QuerySet = opaque {
|
||||||
.label = v.label,
|
.label = v.label,
|
||||||
.type = v.type,
|
.type = v.type,
|
||||||
.count = v.count,
|
.count = v.count,
|
||||||
.pipeline_statistics_count = if (v.pipeline_statistics) |e| @intCast(u32, e.len) else 0,
|
.pipeline_statistics_count = if (v.pipeline_statistics) |e| e.len else 0,
|
||||||
.pipeline_statistics = if (v.pipeline_statistics) |e| e.ptr else null,
|
.pipeline_statistics = if (v.pipeline_statistics) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,7 @@ pub const Queue = opaque {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn submit(queue: *Queue, commands: []const *const CommandBuffer) void {
|
pub inline fn submit(queue: *Queue, commands: []const *const CommandBuffer) void {
|
||||||
Impl.queueSubmit(queue, @intCast(u32, commands.len), commands.ptr);
|
Impl.queueSubmit(queue, commands.len, commands.ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn writeBuffer(
|
pub inline fn writeBuffer(
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,10 @@ pub const RenderBundle = opaque {
|
||||||
label: ?[*:0]const u8 = null,
|
label: ?[*:0]const u8 = null,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub inline fn setLabel(render_bundle: *RenderBundle, label: [*:0]const u8) void {
|
||||||
|
Impl.renderBundleSetLabel(render_bundle, label);
|
||||||
|
}
|
||||||
|
|
||||||
pub inline fn reference(render_bundle: *RenderBundle) void {
|
pub inline fn reference(render_bundle: *RenderBundle) void {
|
||||||
Impl.renderBundleReference(render_bundle);
|
Impl.renderBundleReference(render_bundle);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ pub const RenderBundleEncoder = opaque {
|
||||||
pub const Descriptor = extern struct {
|
pub const Descriptor = extern struct {
|
||||||
next_in_chain: ?*const ChainedStruct = null,
|
next_in_chain: ?*const ChainedStruct = null,
|
||||||
label: ?[*:0]const u8 = null,
|
label: ?[*:0]const u8 = null,
|
||||||
color_formats_count: u32 = 0,
|
color_formats_count: usize = 0,
|
||||||
color_formats: ?[*]const Texture.Format = null,
|
color_formats: ?[*]const Texture.Format = null,
|
||||||
depth_stencil_format: Texture.Format = .undefined,
|
depth_stencil_format: Texture.Format = .undefined,
|
||||||
sample_count: u32 = 1,
|
sample_count: u32 = 1,
|
||||||
|
|
@ -31,7 +31,7 @@ pub const RenderBundleEncoder = opaque {
|
||||||
return .{
|
return .{
|
||||||
.next_in_chain = v.next_in_chain,
|
.next_in_chain = v.next_in_chain,
|
||||||
.label = v.label,
|
.label = v.label,
|
||||||
.color_formats_count = if (v.color_formats) |e| @intCast(u32, e.len) else 0,
|
.color_formats_count = if (v.color_formats) |e| e.len else 0,
|
||||||
.color_formats = if (v.color_formats) |e| e.ptr else null,
|
.color_formats = if (v.color_formats) |e| e.ptr else null,
|
||||||
.depth_stencil_format = v.depth_stencil_format,
|
.depth_stencil_format = v.depth_stencil_format,
|
||||||
.sample_count = v.sample_count,
|
.sample_count = v.sample_count,
|
||||||
|
|
@ -86,7 +86,7 @@ pub const RenderBundleEncoder = opaque {
|
||||||
render_bundle_encoder,
|
render_bundle_encoder,
|
||||||
group_index,
|
group_index,
|
||||||
group,
|
group,
|
||||||
if (dynamic_offsets) |v| @intCast(u32, v.len) else 0,
|
if (dynamic_offsets) |v| v.len else 0,
|
||||||
if (dynamic_offsets) |v| v.ptr else null,
|
if (dynamic_offsets) |v| v.ptr else null,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ pub const RenderPassEncoder = opaque {
|
||||||
) void {
|
) void {
|
||||||
Impl.renderPassEncoderExecuteBundles(
|
Impl.renderPassEncoderExecuteBundles(
|
||||||
render_pass_encoder,
|
render_pass_encoder,
|
||||||
@intCast(u32, bundles.len),
|
bundles.len,
|
||||||
bundles.ptr,
|
bundles.ptr,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
@ -73,7 +73,7 @@ pub const RenderPassEncoder = opaque {
|
||||||
render_pass_encoder,
|
render_pass_encoder,
|
||||||
group_index,
|
group_index,
|
||||||
group,
|
group,
|
||||||
if (dynamic_offsets) |v| @intCast(u32, v.len) else 0,
|
if (dynamic_offsets) |v| v.len else 0,
|
||||||
if (dynamic_offsets) |v| v.ptr else null,
|
if (dynamic_offsets) |v| v.ptr else null,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
const ChainedStruct = @import("main.zig").ChainedStruct;
|
const ChainedStruct = @import("main.zig").ChainedStruct;
|
||||||
const FilterMode = @import("main.zig").FilterMode;
|
const FilterMode = @import("main.zig").FilterMode;
|
||||||
|
const MipmapFilterMode = @import("main.zig").MipmapFilterMode;
|
||||||
const CompareFunction = @import("main.zig").CompareFunction;
|
const CompareFunction = @import("main.zig").CompareFunction;
|
||||||
const Impl = @import("interface.zig").Impl;
|
const Impl = @import("interface.zig").Impl;
|
||||||
|
|
||||||
|
|
@ -30,9 +31,9 @@ pub const Sampler = opaque {
|
||||||
address_mode_w: AddressMode = .clamp_to_edge,
|
address_mode_w: AddressMode = .clamp_to_edge,
|
||||||
mag_filter: FilterMode = .nearest,
|
mag_filter: FilterMode = .nearest,
|
||||||
min_filter: FilterMode = .nearest,
|
min_filter: FilterMode = .nearest,
|
||||||
mipmap_filter: FilterMode = .nearest,
|
mipmap_filter: MipmapFilterMode = .nearest,
|
||||||
lod_min_clamp: f32 = 0.0,
|
lod_min_clamp: f32 = 0.0,
|
||||||
lod_max_clamp: f32 = 1000.0,
|
lod_max_clamp: f32 = 32.0,
|
||||||
compare: CompareFunction = .undefined,
|
compare: CompareFunction = .undefined,
|
||||||
max_anisotropy: u16 = 1,
|
max_anisotropy: u16 = 1,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ const CompilationInfoCallback = @import("main.zig").CompilationInfoCallback;
|
||||||
const CompilationInfoRequestStatus = @import("main.zig").CompilationInfoRequestStatus;
|
const CompilationInfoRequestStatus = @import("main.zig").CompilationInfoRequestStatus;
|
||||||
const CompilationInfo = @import("main.zig").CompilationInfo;
|
const CompilationInfo = @import("main.zig").CompilationInfo;
|
||||||
const Impl = @import("interface.zig").Impl;
|
const Impl = @import("interface.zig").Impl;
|
||||||
|
const dawn = @import("dawn.zig");
|
||||||
|
|
||||||
pub const ShaderModule = opaque {
|
pub const ShaderModule = opaque {
|
||||||
pub const Descriptor = extern struct {
|
pub const Descriptor = extern struct {
|
||||||
|
|
@ -10,6 +11,7 @@ pub const ShaderModule = opaque {
|
||||||
generic: ?*const ChainedStruct,
|
generic: ?*const ChainedStruct,
|
||||||
spirv_descriptor: ?*const SPIRVDescriptor,
|
spirv_descriptor: ?*const SPIRVDescriptor,
|
||||||
wgsl_descriptor: ?*const WGSLDescriptor,
|
wgsl_descriptor: ?*const WGSLDescriptor,
|
||||||
|
dawn_shader_module_spirv_options_descriptor: ?*const dawn.ShaderModuleSPIRVOptionsDescriptor,
|
||||||
};
|
};
|
||||||
|
|
||||||
next_in_chain: NextInChain = .{ .generic = null },
|
next_in_chain: NextInChain = .{ .generic = null },
|
||||||
|
|
@ -24,7 +26,7 @@ pub const ShaderModule = opaque {
|
||||||
|
|
||||||
pub const WGSLDescriptor = extern struct {
|
pub const WGSLDescriptor = extern struct {
|
||||||
chain: ChainedStruct = .{ .next = null, .s_type = .shader_module_wgsl_descriptor },
|
chain: ChainedStruct = .{ .next = null, .s_type = .shader_module_wgsl_descriptor },
|
||||||
source: [*:0]const u8,
|
code: [*:0]const u8,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub inline fn getCompilationInfo(
|
pub inline fn getCompilationInfo(
|
||||||
|
|
|
||||||
|
|
@ -13,12 +13,10 @@ pub const SwapChain = opaque {
|
||||||
width: u32,
|
width: u32,
|
||||||
height: u32,
|
height: u32,
|
||||||
present_mode: PresentMode,
|
present_mode: PresentMode,
|
||||||
/// deprecated
|
|
||||||
implementation: u64 = 0,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub inline fn configure(swap_chain: *SwapChain, format: Texture.Format, allowed_usage: Texture.UsageFlags, width: u32, height: u32) void {
|
pub inline fn getCurrentTexture(swap_chain: *SwapChain) ?*Texture {
|
||||||
Impl.swapChainConfigure(swap_chain, format, allowed_usage, width, height);
|
return Impl.swapChainGetCurrentTexture(swap_chain);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn getCurrentTextureView(swap_chain: *SwapChain) ?*TextureView {
|
pub inline fn getCurrentTextureView(swap_chain: *SwapChain) ?*TextureView {
|
||||||
|
|
|
||||||
|
|
@ -15,13 +15,6 @@ pub const Texture = opaque {
|
||||||
plane1_only = 0x00000004,
|
plane1_only = 0x00000004,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const ComponentType = enum(u32) {
|
|
||||||
float = 0x00000000,
|
|
||||||
sint = 0x00000001,
|
|
||||||
uint = 0x00000002,
|
|
||||||
depth_comparison = 0x00000003,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const Dimension = enum(u32) {
|
pub const Dimension = enum(u32) {
|
||||||
dimension_1d = 0x00000000,
|
dimension_1d = 0x00000000,
|
||||||
dimension_2d = 0x00000001,
|
dimension_2d = 0x00000001,
|
||||||
|
|
@ -142,7 +135,7 @@ pub const Texture = opaque {
|
||||||
texture_binding: bool = false,
|
texture_binding: bool = false,
|
||||||
storage_binding: bool = false,
|
storage_binding: bool = false,
|
||||||
render_attachment: bool = false,
|
render_attachment: bool = false,
|
||||||
present: bool = false,
|
transient_attachment: bool = false,
|
||||||
|
|
||||||
_padding: u26 = 0,
|
_padding: u26 = 0,
|
||||||
|
|
||||||
|
|
@ -188,7 +181,7 @@ pub const Texture = opaque {
|
||||||
format: Format,
|
format: Format,
|
||||||
mip_level_count: u32 = 1,
|
mip_level_count: u32 = 1,
|
||||||
sample_count: u32 = 1,
|
sample_count: u32 = 1,
|
||||||
view_format_count: u32 = 0,
|
view_format_count: usize = 0,
|
||||||
view_formats: ?[*]const Format = null,
|
view_formats: ?[*]const Format = null,
|
||||||
|
|
||||||
/// Provides a slightly friendlier Zig API to initialize this structure.
|
/// Provides a slightly friendlier Zig API to initialize this structure.
|
||||||
|
|
@ -212,7 +205,7 @@ pub const Texture = opaque {
|
||||||
.format = v.format,
|
.format = v.format,
|
||||||
.mip_level_count = v.mip_level_count,
|
.mip_level_count = v.mip_level_count,
|
||||||
.sample_count = v.sample_count,
|
.sample_count = v.sample_count,
|
||||||
.view_format_count = if (v.view_formats) |e| @intCast(u32, e.len) else 0,
|
.view_format_count = if (v.view_formats) |e| e.len else 0,
|
||||||
.view_formats = if (v.view_formats) |e| e.ptr else null,
|
.view_formats = if (v.view_formats) |e| e.ptr else null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue