gpu: replace &slice[0] with slice.ptr
This avoids UB if the slice is empty
This commit is contained in:
parent
f4c8a1908d
commit
c62b5ba52f
4 changed files with 29 additions and 29 deletions
|
|
@ -38,12 +38,12 @@ pub inline fn destroy(buf: Buffer) void {
|
||||||
|
|
||||||
pub inline fn getConstMappedRange(buf: Buffer, comptime T: type, offset: usize, len: usize) []const T {
|
pub inline fn getConstMappedRange(buf: Buffer, comptime T: type, offset: usize, len: usize) []const T {
|
||||||
const data = buf.vtable.getConstMappedRange(buf.ptr, offset, @sizeOf(T) * len);
|
const data = buf.vtable.getConstMappedRange(buf.ptr, offset, @sizeOf(T) * len);
|
||||||
return @ptrCast(*const T, &data[0])[0..len];
|
return @ptrCast([*]const T, data.ptr)[0..len];
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn getMappedRange(buf: Buffer, comptime T: type, offset: usize, len: usize) []T {
|
pub inline fn getMappedRange(buf: Buffer, comptime T: type, offset: usize, len: usize) []T {
|
||||||
const data = buf.vtable.getMappedRange(buf.ptr, offset, @sizeOf(T) * len);
|
const data = buf.vtable.getMappedRange(buf.ptr, offset, @sizeOf(T) * len);
|
||||||
return @ptrCast(*T, &data[0])[0..len];
|
return @ptrCast([*]T, data.ptr)[0..len];
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn setLabel(buf: Buffer, label: [:0]const u8) void {
|
pub inline fn setLabel(buf: Buffer, label: [:0]const u8) void {
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ pub const VTable = struct {
|
||||||
pushDebugGroup: fn (ptr: *anyopaque, group_label: [*:0]const u8) void,
|
pushDebugGroup: fn (ptr: *anyopaque, group_label: [*:0]const u8) void,
|
||||||
resolveQuerySet: fn (ptr: *anyopaque, query_set: QuerySet, first_query: u32, query_count: u32, destination: Buffer, destination_offset: u64) void,
|
resolveQuerySet: fn (ptr: *anyopaque, query_set: QuerySet, first_query: u32, query_count: u32, destination: Buffer, destination_offset: u64) void,
|
||||||
setLabel: fn (ptr: *anyopaque, label: [:0]const u8) void,
|
setLabel: fn (ptr: *anyopaque, label: [:0]const u8) void,
|
||||||
writeBuffer: fn (ptr: *anyopaque, buffer: Buffer, buffer_offset: u64, data: *const u8, size: u64) void,
|
writeBuffer: fn (ptr: *anyopaque, buffer: Buffer, buffer_offset: u64, data: [*]const u8, size: u64) void,
|
||||||
writeTimestamp: fn (ptr: *anyopaque, query_set: QuerySet, query_index: u32) void,
|
writeTimestamp: fn (ptr: *anyopaque, query_set: QuerySet, query_index: u32) void,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -135,7 +135,7 @@ pub inline fn writeBuffer(pass: RenderPassEncoder, buffer: Buffer, buffer_offset
|
||||||
pass.ptr,
|
pass.ptr,
|
||||||
buffer,
|
buffer,
|
||||||
buffer_offset,
|
buffer_offset,
|
||||||
@ptrCast(*const u8, &data[0]),
|
@ptrCast([*]const u8, data.ptr),
|
||||||
@intCast(u64, data.len) * @sizeOf(std.meta.Elem(@TypeOf(data))),
|
@intCast(u64, data.len) * @sizeOf(std.meta.Elem(@TypeOf(data))),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -245,7 +245,7 @@ pub fn wrapAdapter(adapter: c.WGPUAdapter) Adapter {
|
||||||
.vtable = &adapter_vtable,
|
.vtable = &adapter_vtable,
|
||||||
};
|
};
|
||||||
|
|
||||||
const features_len = c.wgpuAdapterEnumerateFeatures(adapter.?, @ptrCast(*c.WGPUFeatureName, &wrapped._features[0]));
|
const features_len = c.wgpuAdapterEnumerateFeatures(adapter.?, @ptrCast([*]c.WGPUFeatureName, &wrapped._features));
|
||||||
wrapped.features = wrapped._features[0..features_len];
|
wrapped.features = wrapped._features[0..features_len];
|
||||||
return wrapped;
|
return wrapped;
|
||||||
}
|
}
|
||||||
|
|
@ -278,7 +278,7 @@ const adapter_vtable = Adapter.VTable{
|
||||||
.nextInChain = null,
|
.nextInChain = null,
|
||||||
.label = if (descriptor.label) |l| l else null,
|
.label = if (descriptor.label) |l| l else null,
|
||||||
.requiredFeaturesCount = if (descriptor.required_features) |f| @intCast(u32, f.len) else 0,
|
.requiredFeaturesCount = if (descriptor.required_features) |f| @intCast(u32, f.len) else 0,
|
||||||
.requiredFeatures = if (descriptor.required_features) |f| @ptrCast([*c]const c_uint, &f[0]) else null,
|
.requiredFeatures = if (descriptor.required_features) |f| @ptrCast([*]const c_uint, f.ptr) else null,
|
||||||
.requiredLimits = if (required_limits) |*l| l else null,
|
.requiredLimits = if (required_limits) |*l| l else null,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -320,7 +320,7 @@ fn wrapDevice(device: c.WGPUDevice) Device {
|
||||||
.vtable = &device_vtable,
|
.vtable = &device_vtable,
|
||||||
};
|
};
|
||||||
|
|
||||||
const features_len = c.wgpuDeviceEnumerateFeatures(device.?, @ptrCast(*c.WGPUFeatureName, &wrapped._features[0]));
|
const features_len = c.wgpuDeviceEnumerateFeatures(device.?, @ptrCast([*]c.WGPUFeatureName, &wrapped._features));
|
||||||
wrapped.features = wrapped._features[0..features_len];
|
wrapped.features = wrapped._features[0..features_len];
|
||||||
return wrapped;
|
return wrapped;
|
||||||
}
|
}
|
||||||
|
|
@ -410,7 +410,7 @@ const device_vtable = Device.VTable{
|
||||||
.label = if (descriptor.label) |l| l else null,
|
.label = if (descriptor.label) |l| l else null,
|
||||||
.layout = @ptrCast(c.WGPUBindGroupLayout, descriptor.layout.ptr),
|
.layout = @ptrCast(c.WGPUBindGroupLayout, descriptor.layout.ptr),
|
||||||
.entryCount = @intCast(u32, entries.len),
|
.entryCount = @intCast(u32, entries.len),
|
||||||
.entries = &entries[0],
|
.entries = entries.ptr,
|
||||||
};
|
};
|
||||||
|
|
||||||
return wrapBindGroup(c.wgpuDeviceCreateBindGroup(@ptrCast(c.WGPUDevice, ptr), &desc));
|
return wrapBindGroup(c.wgpuDeviceCreateBindGroup(@ptrCast(c.WGPUDevice, ptr), &desc));
|
||||||
|
|
@ -451,7 +451,7 @@ const device_vtable = Device.VTable{
|
||||||
.nextInChain = null,
|
.nextInChain = null,
|
||||||
.label = if (descriptor.label) |l| l else null,
|
.label = if (descriptor.label) |l| l else null,
|
||||||
.entryCount = @intCast(u32, descriptor.entries.len),
|
.entryCount = @intCast(u32, descriptor.entries.len),
|
||||||
.entries = @ptrCast(*const c.WGPUBindGroupLayoutEntry, &descriptor.entries[0]),
|
.entries = @ptrCast([*]const c.WGPUBindGroupLayoutEntry, descriptor.entries.ptr),
|
||||||
};
|
};
|
||||||
return wrapBindGroupLayout(c.wgpuDeviceCreateBindGroupLayout(@ptrCast(c.WGPUDevice, ptr), &desc));
|
return wrapBindGroupLayout(c.wgpuDeviceCreateBindGroupLayout(@ptrCast(c.WGPUDevice, ptr), &desc));
|
||||||
}
|
}
|
||||||
|
|
@ -487,7 +487,7 @@ const device_vtable = Device.VTable{
|
||||||
.next = null,
|
.next = null,
|
||||||
.sType = c.WGPUSType_ShaderModuleSPIRVDescriptor,
|
.sType = c.WGPUSType_ShaderModuleSPIRVDescriptor,
|
||||||
},
|
},
|
||||||
.code = @ptrCast([*c]const u32, &spirv[0]),
|
.code = spirv.ptr,
|
||||||
.codeSize = @intCast(u32, spirv.len),
|
.codeSize = @intCast(u32, spirv.len),
|
||||||
};
|
};
|
||||||
const desc = c.WGPUShaderModuleDescriptor{
|
const desc = c.WGPUShaderModuleDescriptor{
|
||||||
|
|
@ -634,7 +634,7 @@ const device_vtable = Device.VTable{
|
||||||
.nextInChain = null,
|
.nextInChain = null,
|
||||||
.label = if (descriptor.label) |l| l else null,
|
.label = if (descriptor.label) |l| l else null,
|
||||||
.bindGroupLayoutCount = @intCast(u32, bind_group_layouts.len),
|
.bindGroupLayoutCount = @intCast(u32, bind_group_layouts.len),
|
||||||
.bindGroupLayouts = &bind_group_layouts[0],
|
.bindGroupLayouts = bind_group_layouts.ptr,
|
||||||
};
|
};
|
||||||
return wrapPipelineLayout(c.wgpuDeviceCreatePipelineLayout(@ptrCast(c.WGPUDevice, ptr), &desc));
|
return wrapPipelineLayout(c.wgpuDeviceCreatePipelineLayout(@ptrCast(c.WGPUDevice, ptr), &desc));
|
||||||
}
|
}
|
||||||
|
|
@ -646,7 +646,7 @@ const device_vtable = Device.VTable{
|
||||||
.label = if (descriptor.label) |l| l else null,
|
.label = if (descriptor.label) |l| l else null,
|
||||||
.type = @enumToInt(descriptor.type),
|
.type = @enumToInt(descriptor.type),
|
||||||
.count = descriptor.count,
|
.count = descriptor.count,
|
||||||
.pipelineStatistics = @ptrCast(*const c.WGPUPipelineStatisticName, &descriptor.pipeline_statistics[0]),
|
.pipelineStatistics = @ptrCast([*]const c.WGPUPipelineStatisticName, descriptor.pipeline_statistics.ptr),
|
||||||
.pipelineStatisticsCount = @intCast(u32, descriptor.pipeline_statistics.len),
|
.pipelineStatisticsCount = @intCast(u32, descriptor.pipeline_statistics.len),
|
||||||
};
|
};
|
||||||
return wrapQuerySet(c.wgpuDeviceCreateQuerySet(@ptrCast(c.WGPUDevice, ptr), &desc));
|
return wrapQuerySet(c.wgpuDeviceCreateQuerySet(@ptrCast(c.WGPUDevice, ptr), &desc));
|
||||||
|
|
@ -658,7 +658,7 @@ const device_vtable = Device.VTable{
|
||||||
.nextInChain = null,
|
.nextInChain = null,
|
||||||
.label = if (descriptor.label) |l| l else null,
|
.label = if (descriptor.label) |l| l else null,
|
||||||
.colorFormatsCount = @intCast(u32, descriptor.color_formats.len),
|
.colorFormatsCount = @intCast(u32, descriptor.color_formats.len),
|
||||||
.colorFormats = @ptrCast(*const c.WGPUTextureFormat, &descriptor.color_formats[0]),
|
.colorFormats = @ptrCast([*]const c.WGPUTextureFormat, descriptor.color_formats.ptr),
|
||||||
.depthStencilFormat = @enumToInt(descriptor.depth_stencil_format),
|
.depthStencilFormat = @enumToInt(descriptor.depth_stencil_format),
|
||||||
.sampleCount = descriptor.sample_count,
|
.sampleCount = descriptor.sample_count,
|
||||||
.depthReadOnly = descriptor.depth_read_only,
|
.depthReadOnly = descriptor.depth_read_only,
|
||||||
|
|
@ -781,7 +781,7 @@ inline fn convertComputePipelineDescriptor(descriptor: *const ComputePipeline.De
|
||||||
.module = @ptrCast(c.WGPUShaderModule, descriptor.compute.module.ptr),
|
.module = @ptrCast(c.WGPUShaderModule, descriptor.compute.module.ptr),
|
||||||
.entryPoint = descriptor.compute.entry_point,
|
.entryPoint = descriptor.compute.entry_point,
|
||||||
.constantCount = if (descriptor.compute.constants) |v| @intCast(u32, v.len) else 0,
|
.constantCount = if (descriptor.compute.constants) |v| @intCast(u32, v.len) else 0,
|
||||||
.constants = if (descriptor.compute.constants) |v| @ptrCast(*const c.WGPUConstantEntry, &v[0]) else null,
|
.constants = if (descriptor.compute.constants) |v| @ptrCast([*]const c.WGPUConstantEntry, v.ptr) else null,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -812,9 +812,9 @@ inline fn convertRenderPipelineDescriptor(
|
||||||
.module = @ptrCast(c.WGPUShaderModule, d.fragment.module.ptr),
|
.module = @ptrCast(c.WGPUShaderModule, d.fragment.module.ptr),
|
||||||
.entryPoint = d.vertex.entry_point,
|
.entryPoint = d.vertex.entry_point,
|
||||||
.constantCount = if (d.fragment.constants) |v| @intCast(u32, v.len) else 0,
|
.constantCount = if (d.fragment.constants) |v| @intCast(u32, v.len) else 0,
|
||||||
.constants = if (d.fragment.constants) |v| @ptrCast(*const c.WGPUConstantEntry, &v[0]) else null,
|
.constants = if (d.fragment.constants) |v| @ptrCast([*]const c.WGPUConstantEntry, v.ptr) else null,
|
||||||
.targetCount = if (d.fragment.targets) |v| @intCast(u32, v.len) else 0,
|
.targetCount = if (d.fragment.targets) |v| @intCast(u32, v.len) else 0,
|
||||||
.targets = if (d.fragment.targets) |v| @ptrCast(*const c.WGPUColorTargetState, &v[0]) else null,
|
.targets = if (d.fragment.targets) |v| @ptrCast([*]const c.WGPUColorTargetState, v.ptr) else null,
|
||||||
};
|
};
|
||||||
|
|
||||||
return c.WGPURenderPipelineDescriptor{
|
return c.WGPURenderPipelineDescriptor{
|
||||||
|
|
@ -826,9 +826,9 @@ inline fn convertRenderPipelineDescriptor(
|
||||||
.module = @ptrCast(c.WGPUShaderModule, d.vertex.module.ptr),
|
.module = @ptrCast(c.WGPUShaderModule, d.vertex.module.ptr),
|
||||||
.entryPoint = d.vertex.entry_point,
|
.entryPoint = d.vertex.entry_point,
|
||||||
.constantCount = if (d.vertex.constants) |v| @intCast(u32, v.len) else 0,
|
.constantCount = if (d.vertex.constants) |v| @intCast(u32, v.len) else 0,
|
||||||
.constants = if (d.vertex.constants) |v| @ptrCast(*const c.WGPUConstantEntry, &v[0]) else null,
|
.constants = if (d.vertex.constants) |v| @ptrCast([*]const c.WGPUConstantEntry, v.ptr) else null,
|
||||||
.bufferCount = if (d.vertex.buffers) |v| @intCast(u32, v.len) else 0,
|
.bufferCount = if (d.vertex.buffers) |v| @intCast(u32, v.len) else 0,
|
||||||
.buffers = if (d.vertex.buffers) |v| @ptrCast(*const c.WGPUVertexBufferLayout, &v[0]) else null,
|
.buffers = if (d.vertex.buffers) |v| @ptrCast([*]const c.WGPUVertexBufferLayout, v.ptr) else null,
|
||||||
},
|
},
|
||||||
.primitive = c.WGPUPrimitiveState{
|
.primitive = c.WGPUPrimitiveState{
|
||||||
.nextInChain = null,
|
.nextInChain = null,
|
||||||
|
|
@ -908,7 +908,7 @@ const queue_vtable = Queue.VTable{
|
||||||
c.wgpuQueueSubmit(
|
c.wgpuQueueSubmit(
|
||||||
wgpu_queue,
|
wgpu_queue,
|
||||||
@intCast(u32, commands.len),
|
@intCast(u32, commands.len),
|
||||||
@ptrCast(*c.WGPUCommandBuffer, &commands[0]),
|
@ptrCast([*]c.WGPUCommandBuffer, commands.ptr),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}).submit,
|
}).submit,
|
||||||
|
|
@ -1272,7 +1272,7 @@ const render_pass_encoder_vtable = RenderPassEncoder.VTable{
|
||||||
c.wgpuRenderPassEncoderExecuteBundles(
|
c.wgpuRenderPassEncoderExecuteBundles(
|
||||||
@ptrCast(c.WGPURenderPassEncoder, ptr),
|
@ptrCast(c.WGPURenderPassEncoder, ptr),
|
||||||
@intCast(u32, c_bundles.len),
|
@intCast(u32, c_bundles.len),
|
||||||
&c_bundles[0],
|
c_bundles.ptr,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}).executeBundles,
|
}).executeBundles,
|
||||||
|
|
@ -1303,7 +1303,7 @@ const render_pass_encoder_vtable = RenderPassEncoder.VTable{
|
||||||
group_index,
|
group_index,
|
||||||
@ptrCast(c.WGPUBindGroup, group.ptr),
|
@ptrCast(c.WGPUBindGroup, group.ptr),
|
||||||
@intCast(u32, dynamic_offsets.len),
|
@intCast(u32, dynamic_offsets.len),
|
||||||
&dynamic_offsets[0],
|
dynamic_offsets.ptr,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}).setBindGroup,
|
}).setBindGroup,
|
||||||
|
|
@ -1500,7 +1500,7 @@ const render_bundle_encoder_vtable = RenderBundleEncoder.VTable{
|
||||||
group_index,
|
group_index,
|
||||||
@ptrCast(c.WGPUBindGroup, group.ptr),
|
@ptrCast(c.WGPUBindGroup, group.ptr),
|
||||||
@intCast(u32, dynamic_offsets.len),
|
@intCast(u32, dynamic_offsets.len),
|
||||||
&dynamic_offsets[0],
|
dynamic_offsets.ptr,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}).setBindGroup,
|
}).setBindGroup,
|
||||||
|
|
@ -1868,7 +1868,7 @@ const command_encoder_vtable = CommandEncoder.VTable{
|
||||||
.nextInChain = null,
|
.nextInChain = null,
|
||||||
.label = if (d.label) |l| l else null,
|
.label = if (d.label) |l| l else null,
|
||||||
.timestampWriteCount = @intCast(u32, timestamp_writes.len),
|
.timestampWriteCount = @intCast(u32, timestamp_writes.len),
|
||||||
.timestampWrites = @ptrCast(*const c.WGPUComputePassTimestampWrite, ×tamp_writes[0]),
|
.timestampWrites = @ptrCast([*]const c.WGPUComputePassTimestampWrite, timestamp_writes.ptr),
|
||||||
};
|
};
|
||||||
return wrapComputePassEncoder(c.wgpuCommandEncoderBeginComputePass(@ptrCast(c.WGPUCommandEncoder, ptr), &desc));
|
return wrapComputePassEncoder(c.wgpuCommandEncoderBeginComputePass(@ptrCast(c.WGPUCommandEncoder, ptr), &desc));
|
||||||
}
|
}
|
||||||
|
|
@ -1945,7 +1945,7 @@ const command_encoder_vtable = CommandEncoder.VTable{
|
||||||
.nextInChain = null,
|
.nextInChain = null,
|
||||||
.label = if (d.label) |l| l else null,
|
.label = if (d.label) |l| l else null,
|
||||||
.colorAttachmentCount = @intCast(u32, color_attachments.len),
|
.colorAttachmentCount = @intCast(u32, color_attachments.len),
|
||||||
.colorAttachments = &color_attachments[0],
|
.colorAttachments = color_attachments.ptr,
|
||||||
.depthStencilAttachment = if (d.depth_stencil_attachment) |v| &c.WGPURenderPassDepthStencilAttachment{
|
.depthStencilAttachment = if (d.depth_stencil_attachment) |v| &c.WGPURenderPassDepthStencilAttachment{
|
||||||
.view = @ptrCast(c.WGPUTextureView, v.view.ptr),
|
.view = @ptrCast(c.WGPUTextureView, v.view.ptr),
|
||||||
.depthLoadOp = @enumToInt(v.depth_load_op),
|
.depthLoadOp = @enumToInt(v.depth_load_op),
|
||||||
|
|
@ -1961,7 +1961,7 @@ const command_encoder_vtable = CommandEncoder.VTable{
|
||||||
} else null,
|
} else null,
|
||||||
.occlusionQuerySet = if (d.occlusion_query_set) |v| @ptrCast(c.WGPUQuerySet, v.ptr) else null,
|
.occlusionQuerySet = if (d.occlusion_query_set) |v| @ptrCast(c.WGPUQuerySet, v.ptr) else null,
|
||||||
.timestampWriteCount = if (timestamp_writes) |v| @intCast(u32, v.len) else 0,
|
.timestampWriteCount = if (timestamp_writes) |v| @intCast(u32, v.len) else 0,
|
||||||
.timestampWrites = if (timestamp_writes) |v| @ptrCast(*const c.WGPURenderPassTimestampWrite, &v[0]) else null,
|
.timestampWrites = if (timestamp_writes) |v| @ptrCast([*]const c.WGPURenderPassTimestampWrite, v.ptr) else null,
|
||||||
};
|
};
|
||||||
return wrapRenderPassEncoder(c.wgpuCommandEncoderBeginRenderPass(@ptrCast(c.WGPUCommandEncoder, ptr), &desc));
|
return wrapRenderPassEncoder(c.wgpuCommandEncoderBeginRenderPass(@ptrCast(c.WGPUCommandEncoder, ptr), &desc));
|
||||||
}
|
}
|
||||||
|
|
@ -2051,7 +2051,7 @@ const command_encoder_vtable = CommandEncoder.VTable{
|
||||||
}
|
}
|
||||||
}).pushDebugGroup,
|
}).pushDebugGroup,
|
||||||
.writeBuffer = (struct {
|
.writeBuffer = (struct {
|
||||||
pub fn writeBuffer(ptr: *anyopaque, buffer: Buffer, buffer_offset: u64, data: *const u8, size: u64) void {
|
pub fn writeBuffer(ptr: *anyopaque, buffer: Buffer, buffer_offset: u64, data: [*]const u8, size: u64) void {
|
||||||
c.wgpuCommandEncoderWriteBuffer(
|
c.wgpuCommandEncoderWriteBuffer(
|
||||||
@ptrCast(c.WGPUCommandEncoder, ptr),
|
@ptrCast(c.WGPUCommandEncoder, ptr),
|
||||||
@ptrCast(c.WGPUBuffer, buffer.ptr),
|
@ptrCast(c.WGPUBuffer, buffer.ptr),
|
||||||
|
|
@ -2162,7 +2162,7 @@ const compute_pass_encoder_vtable = ComputePassEncoder.VTable{
|
||||||
group_index,
|
group_index,
|
||||||
@ptrCast(c.WGPUBindGroup, group.ptr),
|
@ptrCast(c.WGPUBindGroup, group.ptr),
|
||||||
@intCast(u32, dynamic_offsets.len),
|
@intCast(u32, dynamic_offsets.len),
|
||||||
&dynamic_offsets[0],
|
dynamic_offsets.ptr,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}).setBindGroup,
|
}).setBindGroup,
|
||||||
|
|
|
||||||
|
|
@ -55,7 +55,7 @@ pub inline fn writeBuffer(queue: Queue, buffer: Buffer, buffer_offset: u64, data
|
||||||
queue.ptr,
|
queue.ptr,
|
||||||
buffer,
|
buffer,
|
||||||
buffer_offset,
|
buffer_offset,
|
||||||
@ptrCast(*const anyopaque, &data[0]),
|
@ptrCast(*const anyopaque, data.ptr),
|
||||||
@intCast(u64, data.len) * @sizeOf(std.meta.Elem(@TypeOf(data))),
|
@intCast(u64, data.len) * @sizeOf(std.meta.Elem(@TypeOf(data))),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
@ -70,7 +70,7 @@ pub inline fn writeTexture(
|
||||||
queue.vtable.writeTexture(
|
queue.vtable.writeTexture(
|
||||||
queue.ptr,
|
queue.ptr,
|
||||||
destination,
|
destination,
|
||||||
@ptrCast(*const anyopaque, &data[0]),
|
@ptrCast(*const anyopaque, data.ptr),
|
||||||
@intCast(u64, data.len) * @sizeOf(std.meta.Elem(@TypeOf(data))),
|
@intCast(u64, data.len) * @sizeOf(std.meta.Elem(@TypeOf(data))),
|
||||||
data_layout,
|
data_layout,
|
||||||
write_size,
|
write_size,
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue