gpu: internalize Buffer types

Signed-off-by: Stephen Gutekanst <stephen@hexops.com>
This commit is contained in:
Stephen Gutekanst 2022-07-29 23:42:50 -07:00 committed by Stephen Gutekanst
parent e10432834b
commit 9ba109f659
4 changed files with 76 additions and 77 deletions

View file

@ -1,7 +1,6 @@
const ChainedStruct = @import("types.zig").ChainedStruct;
const ShaderStageFlags = @import("types.zig").ShaderStageFlags;
const Buffer = @import("buffer.zig").Buffer;
const BufferBindingLayout = @import("buffer.zig").BufferBindingLayout;
const Sampler = @import("sampler.zig").Sampler;
const SamplerBindingLayout = @import("sampler.zig").SamplerBindingLayout;
const Texture = @import("texture.zig").Texture;
@ -14,7 +13,7 @@ pub const BindGroupLayout = opaque {
next_in_chain: ?*const ChainedStruct = null,
binding: u32,
visibility: ShaderStageFlags,
buffer: BufferBindingLayout,
buffer: Buffer.BindingLayout,
sampler: SamplerBindingLayout,
texture: TextureBindingLayout,
storage_texture: StorageTextureBindingLayout,

View file

@ -4,6 +4,68 @@ const MapModeFlags = @import("types.zig").MapModeFlags;
const Impl = @import("interface.zig").Impl;
pub const Buffer = opaque {
pub const MapCallback = fn (status: MapAsyncStatus, userdata: *anyopaque) callconv(.C) void;
pub const BindingType = enum(u32) {
undef = 0x00000000,
uniform = 0x00000001,
storage = 0x00000002,
read_only_storage = 0x00000003,
};
pub const MapAsyncStatus = enum(u32) {
success = 0x00000000,
err = 0x00000001,
unknown = 0x00000002,
device_lost = 0x00000003,
destroyed_before_callback = 0x00000004,
unmapped_before_callback = 0x00000005,
};
// TODO: should be UsageFlags
pub const Usage = packed struct {
map_read: bool = false,
map_write: bool = false,
copy_src: bool = false,
copy_dst: bool = false,
index: bool = false,
vertex: bool = false,
uniform: bool = false,
storage: bool = false,
indirect: bool = false,
query_resolve: bool = false,
_padding: u22 = 0,
comptime {
std.debug.assert(
@sizeOf(@This()) == @sizeOf(u32) and
@bitSizeOf(@This()) == @bitSizeOf(u32),
);
}
pub const none = Usage{};
pub fn equal(a: Usage, b: Usage) bool {
return @truncate(u10, @bitCast(u32, a)) == @truncate(u10, @bitCast(u32, b));
}
};
pub const BindingLayout = extern struct {
next_in_chain: ?*const ChainedStruct = null,
type: BindingType = .undef,
has_dynamic_offset: bool = false,
min_binding_size: u64 = 0,
};
pub const Descriptor = extern struct {
next_in_chain: ?*const ChainedStruct = null,
label: ?[*:0]const u8 = null,
usage: Usage,
size: u64,
mapped_at_creation: bool = true,
};
pub inline fn destroy(buffer: *Buffer) void {
Impl.bufferDestroy(buffer);
}
@ -24,11 +86,11 @@ pub const Buffer = opaque {
return Impl.bufferGetSize(buffer);
}
pub inline fn bufferGetUsage(buffer: *Buffer) BufferUsage {
pub inline fn bufferGetUsage(buffer: *Buffer) Buffer.Usage {
return Impl.bufferGetUsage(buffer);
}
pub inline fn bufferMapAsync(buffer: *Buffer, mode: MapModeFlags, offset: usize, size: usize, callback: BufferMapCallback, userdata: *anyopaque) void {
pub inline fn bufferMapAsync(buffer: *Buffer, mode: MapModeFlags, offset: usize, size: usize, callback: MapCallback, userdata: *anyopaque) void {
Impl.bufferMapAsync(buffer, mode, offset, size, callback, userdata);
}
@ -48,64 +110,3 @@ pub const Buffer = opaque {
Impl.bufferRelease(buffer);
}
};
pub const BufferMapCallback = fn (status: BufferMapAsyncStatus, userdata: *anyopaque) callconv(.C) void;
pub const BufferBindingType = enum(u32) {
undef = 0x00000000,
uniform = 0x00000001,
storage = 0x00000002,
read_only_storage = 0x00000003,
};
pub const BufferMapAsyncStatus = enum(u32) {
success = 0x00000000,
err = 0x00000001,
unknown = 0x00000002,
device_lost = 0x00000003,
destroyed_before_callback = 0x00000004,
unmapped_before_callback = 0x00000005,
};
pub const BufferUsage = packed struct {
map_read: bool = false,
map_write: bool = false,
copy_src: bool = false,
copy_dst: bool = false,
index: bool = false,
vertex: bool = false,
uniform: bool = false,
storage: bool = false,
indirect: bool = false,
query_resolve: bool = false,
_padding: u22 = 0,
comptime {
std.debug.assert(
@sizeOf(@This()) == @sizeOf(u32) and
@bitSizeOf(@This()) == @bitSizeOf(u32),
);
}
pub const none = BufferUsage{};
pub fn equal(a: BufferUsage, b: BufferUsage) bool {
return @truncate(u10, @bitCast(u32, a)) == @truncate(u10, @bitCast(u32, b));
}
};
pub const BufferBindingLayout = extern struct {
next_in_chain: ?*const ChainedStruct = null,
type: BufferBindingType = .undef,
has_dynamic_offset: bool = false,
min_binding_size: u64 = 0,
};
pub const BufferDescriptor = extern struct {
next_in_chain: ?*const ChainedStruct = null,
label: ?[*:0]const u8 = null,
usage: BufferUsage,
size: u64,
mapped_at_creation: bool = true,
};

View file

@ -3,7 +3,6 @@ const QueueDescriptor = @import("queue.zig").QueueDescriptor;
const BindGroup = @import("bind_group.zig").BindGroup;
const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout;
const Buffer = @import("buffer.zig").Buffer;
const BufferDescriptor = @import("buffer.zig").BufferDescriptor;
const CommandEncoder = @import("command_encoder.zig").CommandEncoder;
const CommandEncoderDescriptor = @import("command_encoder.zig").CommandEncoderDescriptor;
const ComputePipeline = @import("compute_pipeline.zig").ComputePipeline;
@ -48,7 +47,7 @@ pub const Device = opaque {
return Impl.deviceCreateBindGroupLayout(device, descriptor);
}
pub inline fn createBuffer(device: *Device, descriptor: *const BufferDescriptor) *Buffer {
pub inline fn createBuffer(device: *Device, descriptor: *const Buffer.Descriptor) *Buffer {
return Impl.deviceCreateBuffer(device, descriptor);
}

View file

@ -35,8 +35,8 @@ pub fn Interface(comptime T: type) type {
assertDecl(T, "bufferGetConstMappedRange", fn (buffer: *gpu.Buffer, offset: usize, size: usize) callconv(.Inline) ?*const anyopaque);
assertDecl(T, "bufferGetMappedRange", fn (buffer: *gpu.Buffer, offset: usize, size: usize) callconv(.Inline) ?*anyopaque);
assertDecl(T, "bufferGetSize", fn (buffer: *gpu.Buffer) callconv(.Inline) u64);
assertDecl(T, "bufferGetUsage", fn (buffer: *gpu.Buffer) callconv(.Inline) gpu.BufferUsage);
assertDecl(T, "bufferMapAsync", fn (buffer: *gpu.Buffer, mode: gpu.MapModeFlags, offset: usize, size: usize, callback: gpu.BufferMapCallback, userdata: *anyopaque) callconv(.Inline) void);
assertDecl(T, "bufferGetUsage", fn (buffer: *gpu.Buffer) callconv(.Inline) gpu.Buffer.Usage);
assertDecl(T, "bufferMapAsync", fn (buffer: *gpu.Buffer, mode: gpu.MapModeFlags, offset: usize, size: usize, callback: gpu.Buffer.MapCallback, userdata: *anyopaque) callconv(.Inline) void);
assertDecl(T, "bufferSetLabel", fn (buffer: *gpu.Buffer, label: [*:0]const u8) callconv(.Inline) void);
assertDecl(T, "bufferUnmap", fn (buffer: *gpu.Buffer) callconv(.Inline) void);
assertDecl(T, "bufferReference", fn (buffer: *gpu.Buffer) callconv(.Inline) void);
@ -81,7 +81,7 @@ pub fn Interface(comptime T: type) type {
assertDecl(T, "computePipelineRelease", fn (compute_pipeline: *gpu.ComputePipeline) callconv(.Inline) void);
assertDecl(T, "deviceCreateBindGroup", fn (device: *gpu.Device, descriptor: *const gpu.BindGroup.Descriptor) callconv(.Inline) *gpu.BindGroup);
assertDecl(T, "deviceCreateBindGroupLayout", fn (device: *gpu.Device, descriptor: *const gpu.BindGroupLayout.Descriptor) callconv(.Inline) *gpu.BindGroupLayout);
assertDecl(T, "deviceCreateBuffer", fn (device: *gpu.Device, descriptor: *const gpu.BufferDescriptor) callconv(.Inline) *gpu.Buffer);
assertDecl(T, "deviceCreateBuffer", fn (device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) callconv(.Inline) *gpu.Buffer);
assertDecl(T, "deviceCreateCommandEncoder", fn (device: *gpu.Device, descriptor: ?*const gpu.CommandEncoderDescriptor) callconv(.Inline) *gpu.CommandEncoder);
assertDecl(T, "deviceCreateComputePipeline", fn (device: *gpu.Device, descriptor: *const gpu.ComputePipelineDescriptor) callconv(.Inline) *gpu.ComputePipeline);
assertDecl(T, "deviceCreateComputePipelineAsync", fn (device: *gpu.Device, descriptor: *const gpu.ComputePipelineDescriptor, callback: gpu.CreateComputePipelineAsyncCallback, userdata: *anyopaque) callconv(.Inline) void);
@ -327,14 +327,14 @@ pub fn Export(comptime T: type) type {
}
// WGPU_EXPORT WGPUBufferUsage wgpuBufferGetUsage(WGPUBuffer buffer);
export fn wgpuBufferGetUsage(buffer: *gpu.Buffer) gpu.BufferUsage {
export fn wgpuBufferGetUsage(buffer: *gpu.Buffer) gpu.Buffer.Usage {
return T.bufferGetUsage(buffer);
}
// TODO: Zig cannot currently export a packed struct gpu.MapModeFlags, so we use a u32 for
// now.
// WGPU_EXPORT void wgpuBufferMapAsync(WGPUBuffer buffer, WGPUMapModeFlags mode, size_t offset, size_t size, WGPUBufferMapCallback callback, void * userdata);
export fn wgpuBufferMapAsync(buffer: *gpu.Buffer, mode: u32, offset: usize, size: usize, callback: gpu.BufferMapCallback, userdata: *anyopaque) void {
export fn wgpuBufferMapAsync(buffer: *gpu.Buffer, mode: u32, offset: usize, size: usize, callback: gpu.Buffer.MapCallback, userdata: *anyopaque) void {
T.bufferMapAsync(buffer, @bitCast(gpu.MapModeFlags, mode), offset, size, callback, userdata);
}
@ -558,8 +558,8 @@ pub fn Export(comptime T: type) type {
return T.deviceCreateBindGroupLayout(device, descriptor);
}
// WGPU_EXPORT WGPUBuffer wgpuDeviceCreateBuffer(WGPUDevice device, WGPUBufferDescriptor const * descriptor);
export fn wgpuDeviceCreateBuffer(device: *gpu.Device, descriptor: *const gpu.BufferDescriptor) *gpu.Buffer {
// WGPU_EXPORT WGPUBuffer wgpuDeviceCreateBuffer(WGPUDevice device, WGPUBuffer.Descriptor const * descriptor);
export fn wgpuDeviceCreateBuffer(device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) *gpu.Buffer {
return T.deviceCreateBuffer(device, descriptor);
}
@ -1336,12 +1336,12 @@ pub const StubInterface = Interface(struct {
unreachable;
}
pub inline fn bufferGetUsage(buffer: *gpu.Buffer) gpu.BufferUsage {
pub inline fn bufferGetUsage(buffer: *gpu.Buffer) gpu.Buffer.Usage {
_ = buffer;
unreachable;
}
pub inline fn bufferMapAsync(buffer: *gpu.Buffer, mode: gpu.MapModeFlags, offset: usize, size: usize, callback: gpu.BufferMapCallback, userdata: *anyopaque) void {
pub inline fn bufferMapAsync(buffer: *gpu.Buffer, mode: gpu.MapModeFlags, offset: usize, size: usize, callback: gpu.Buffer.MapCallback, userdata: *anyopaque) void {
_ = buffer;
_ = mode;
_ = offset;
@ -1629,7 +1629,7 @@ pub const StubInterface = Interface(struct {
unreachable;
}
pub inline fn deviceCreateBuffer(device: *gpu.Device, descriptor: *const gpu.BufferDescriptor) *gpu.Buffer {
pub inline fn deviceCreateBuffer(device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) *gpu.Buffer {
_ = device;
_ = descriptor;
unreachable;