all: refactor: cleanup module structure

Signed-off-by: Stephen Gutekanst <stephen@hexops.com>
This commit is contained in:
Stephen Gutekanst 2023-09-22 08:24:16 -07:00
parent 52c4eb5d74
commit c16cddd250
7 changed files with 88 additions and 67 deletions

368
src/gfx/Sprite.zig Normal file
View file

@ -0,0 +1,368 @@
const std = @import("std");
const core = @import("mach-core");
const gpu = core.gpu;
const ecs = @import("mach-ecs");
const Engine = @import("../engine.zig").Engine;
const mach = @import("../main.zig");
const math = mach.math;
const vec2 = math.vec2;
const Vec2 = math.Vec2;
const Vec3 = math.Vec3;
const Mat3x3 = math.Mat3x3;
const Mat4x4 = math.Mat4x4;
/// Internal state
pipelines: std.AutoArrayHashMapUnmanaged(u32, Pipeline),
pub const name = .mach_gfx_sprite;
pub const components = struct {
/// The ID of the pipeline this sprite belongs to. By default, zero.
///
/// This determines which shader, textures, etc. are used for rendering the sprite.
pub const pipeline = u8;
/// The sprite model transformation matrix. A sprite is measured in pixel units, starting from
/// (0, 0) at the top-left corner and extending to the size of the sprite. By default, the world
/// origin (0, 0) lives at the center of the window.
///
/// Example: in a 500px by 500px window, a sprite located at (0, 0) with size (250, 250) will
/// cover the top-right hand corner of the window.
pub const transform = Mat4x4;
/// UV coordinate transformation matrix describing top-left corner / origin of sprite, in pixels.
pub const uv_transform = Mat3x3;
/// The size of the sprite, in pixels.
pub const size = Vec2;
};
const Uniforms = extern struct {
// WebGPU requires that the size of struct fields are multiples of 16
// So we use align(16) and 'extern' to maintain field order
/// The view * orthographic projection matrix
view_projection: Mat4x4 align(16),
/// Total size of the sprite texture in pixels
texture_size: Vec2 align(16),
};
const Pipeline = struct {
render: *gpu.RenderPipeline,
texture_sampler: *gpu.Sampler,
texture: *gpu.Texture,
texture2: ?*gpu.Texture,
texture3: ?*gpu.Texture,
texture4: ?*gpu.Texture,
bind_group: *gpu.BindGroup,
uniforms: *gpu.Buffer,
// Storage buffers
num_sprites: u32,
transforms: *gpu.Buffer,
uv_transforms: *gpu.Buffer,
sizes: *gpu.Buffer,
pub fn reference(p: *Pipeline) void {
p.render.reference();
p.texture_sampler.reference();
p.texture.reference();
if (p.texture2) |tex| tex.reference();
if (p.texture3) |tex| tex.reference();
if (p.texture4) |tex| tex.reference();
p.bind_group.reference();
p.uniforms.reference();
p.transforms.reference();
p.uv_transforms.reference();
p.sizes.reference();
}
pub fn deinit(p: *Pipeline) void {
p.render.release();
p.texture_sampler.release();
p.texture.release();
if (p.texture2) |tex| tex.release();
if (p.texture3) |tex| tex.release();
if (p.texture4) |tex| tex.release();
p.bind_group.release();
p.uniforms.release();
p.transforms.release();
p.uv_transforms.release();
p.sizes.release();
}
};
pub const PipelineOptions = struct {
pipeline: u32,
/// Shader program to use when rendering.
shader: ?*gpu.ShaderModule = null,
/// Whether to use linear (blurry) or nearest (pixelated) upscaling/downscaling.
texture_sampler: ?*gpu.Sampler = null,
/// Textures to use when rendering. The default shader can handle one texture.
texture: *gpu.Texture,
texture2: ?*gpu.Texture = null,
texture3: ?*gpu.Texture = null,
texture4: ?*gpu.Texture = null,
/// Alpha and color blending options.
blend_state: ?gpu.BlendState = null,
/// Pipeline overrides, these can be used to e.g. pass additional things to your shader program.
bind_group_layout: ?*gpu.BindGroupLayout = null,
bind_group: ?*gpu.BindGroup = null,
color_target_state: ?gpu.ColorTargetState = null,
fragment_state: ?gpu.FragmentState = null,
pipeline_layout: ?*gpu.PipelineLayout = null,
};
pub fn machGfxSpriteInit(
sprite_mod: *mach.Mod(.mach_gfx_sprite),
) !void {
sprite_mod.state = .{
// TODO: struct default value initializers don't work
.pipelines = .{},
};
}
pub fn machGfxSpriteInitPipeline(
engine: *mach.Mod(.engine),
sprite_mod: *mach.Mod(.mach_gfx_sprite),
opt: PipelineOptions,
) !void {
const device = engine.state.device;
const pipeline = try sprite_mod.state.pipelines.getOrPut(engine.allocator, opt.pipeline);
if (pipeline.found_existing) {
pipeline.value_ptr.*.deinit();
}
// Storage buffers
const sprite_buffer_cap = 1024 * 512; // TODO: allow user to specify preallocation
const transforms = device.createBuffer(&.{
.usage = .{ .storage = true, .copy_dst = true },
.size = @sizeOf(Mat4x4) * sprite_buffer_cap,
.mapped_at_creation = .false,
});
const uv_transforms = device.createBuffer(&.{
.usage = .{ .storage = true, .copy_dst = true },
.size = @sizeOf(Mat3x3) * sprite_buffer_cap,
.mapped_at_creation = .false,
});
const sizes = device.createBuffer(&.{
.usage = .{ .storage = true, .copy_dst = true },
.size = @sizeOf(Vec2) * sprite_buffer_cap,
.mapped_at_creation = .false,
});
const texture_sampler = opt.texture_sampler orelse device.createSampler(&.{
.mag_filter = .nearest,
.min_filter = .nearest,
});
const uniforms = device.createBuffer(&.{
.usage = .{ .copy_dst = true, .uniform = true },
.size = @sizeOf(Uniforms),
.mapped_at_creation = .false,
});
const bind_group_layout = opt.bind_group_layout orelse device.createBindGroupLayout(
&gpu.BindGroupLayout.Descriptor.init(.{
.entries = &.{
gpu.BindGroupLayout.Entry.buffer(0, .{ .vertex = true }, .uniform, false, 0),
gpu.BindGroupLayout.Entry.buffer(1, .{ .vertex = true }, .read_only_storage, false, 0),
gpu.BindGroupLayout.Entry.buffer(2, .{ .vertex = true }, .read_only_storage, false, 0),
gpu.BindGroupLayout.Entry.buffer(3, .{ .vertex = true }, .read_only_storage, false, 0),
gpu.BindGroupLayout.Entry.sampler(4, .{ .fragment = true }, .filtering),
gpu.BindGroupLayout.Entry.texture(5, .{ .fragment = true }, .float, .dimension_2d, false),
gpu.BindGroupLayout.Entry.texture(6, .{ .fragment = true }, .float, .dimension_2d, false),
gpu.BindGroupLayout.Entry.texture(7, .{ .fragment = true }, .float, .dimension_2d, false),
gpu.BindGroupLayout.Entry.texture(8, .{ .fragment = true }, .float, .dimension_2d, false),
},
}),
);
defer bind_group_layout.release();
const texture_view = opt.texture.createView(&gpu.TextureView.Descriptor{});
const texture2_view = if (opt.texture2) |tex| tex.createView(&gpu.TextureView.Descriptor{}) else texture_view;
const texture3_view = if (opt.texture3) |tex| tex.createView(&gpu.TextureView.Descriptor{}) else texture_view;
const texture4_view = if (opt.texture4) |tex| tex.createView(&gpu.TextureView.Descriptor{}) else texture_view;
defer texture_view.release();
defer texture2_view.release();
defer texture3_view.release();
defer texture4_view.release();
const bind_group = opt.bind_group orelse device.createBindGroup(
&gpu.BindGroup.Descriptor.init(.{
.layout = bind_group_layout,
.entries = &.{
gpu.BindGroup.Entry.buffer(0, uniforms, 0, @sizeOf(Uniforms)),
gpu.BindGroup.Entry.buffer(1, transforms, 0, @sizeOf(Mat4x4) * sprite_buffer_cap),
gpu.BindGroup.Entry.buffer(2, uv_transforms, 0, @sizeOf(Mat3x3) * sprite_buffer_cap),
gpu.BindGroup.Entry.buffer(3, sizes, 0, @sizeOf(Vec2) * sprite_buffer_cap),
gpu.BindGroup.Entry.sampler(4, texture_sampler),
gpu.BindGroup.Entry.textureView(5, texture_view),
gpu.BindGroup.Entry.textureView(6, texture2_view),
gpu.BindGroup.Entry.textureView(7, texture3_view),
gpu.BindGroup.Entry.textureView(8, texture4_view),
},
}),
);
const blend_state = opt.blend_state orelse gpu.BlendState{
.color = .{
.operation = .add,
.src_factor = .src_alpha,
.dst_factor = .one_minus_src_alpha,
},
.alpha = .{
.operation = .add,
.src_factor = .one,
.dst_factor = .zero,
},
};
const shader_module = opt.shader orelse device.createShaderModuleWGSL("sprite.wgsl", @embedFile("sprite.wgsl"));
defer shader_module.release();
const color_target = opt.color_target_state orelse gpu.ColorTargetState{
.format = core.descriptor.format,
.blend = &blend_state,
.write_mask = gpu.ColorWriteMaskFlags.all,
};
const fragment = opt.fragment_state orelse gpu.FragmentState.init(.{
.module = shader_module,
.entry_point = "fragMain",
.targets = &.{color_target},
});
const bind_group_layouts = [_]*gpu.BindGroupLayout{bind_group_layout};
const pipeline_layout = opt.pipeline_layout orelse device.createPipelineLayout(&gpu.PipelineLayout.Descriptor.init(.{
.bind_group_layouts = &bind_group_layouts,
}));
defer pipeline_layout.release();
const render = device.createRenderPipeline(&gpu.RenderPipeline.Descriptor{
.fragment = &fragment,
.layout = pipeline_layout,
.vertex = gpu.VertexState{
.module = shader_module,
.entry_point = "vertMain",
},
});
pipeline.value_ptr.* = Pipeline{
.render = render,
.texture_sampler = texture_sampler,
.texture = opt.texture,
.texture2 = opt.texture2,
.texture3 = opt.texture3,
.texture4 = opt.texture4,
.bind_group = bind_group,
.uniforms = uniforms,
.num_sprites = 0,
.transforms = transforms,
.uv_transforms = uv_transforms,
.sizes = sizes,
};
pipeline.value_ptr.reference();
}
pub fn deinit(sprite_mod: *mach.Mod(.mach_gfx_sprite)) !void {
for (sprite_mod.state.pipelines.entries.items(.value)) |*pipeline| pipeline.deinit();
sprite_mod.state.pipelines.deinit(sprite_mod.allocator);
}
pub fn machGfxSpriteUpdated(
engine: *mach.Mod(.engine),
sprite_mod: *mach.Mod(.mach_gfx_sprite),
pipeline_id: u32,
) !void {
const pipeline = sprite_mod.state.pipelines.getPtr(pipeline_id).?;
const device = engine.state.device;
// TODO: make sure these entities only belong to the given pipeline
// we need a better tagging mechanism
var archetypes_iter = engine.entities.query(.{ .all = &.{
.{ .mach_gfx_sprite = &.{
.uv_transform,
.transform,
.size,
.pipeline,
} },
} });
const encoder = device.createCommandEncoder(null);
defer encoder.release();
pipeline.num_sprites = 0;
var transforms_offset: usize = 0;
var uv_transforms_offset: usize = 0;
var sizes_offset: usize = 0;
while (archetypes_iter.next()) |archetype| {
var transforms = archetype.slice(.mach_gfx_sprite, .transform);
var uv_transforms = archetype.slice(.mach_gfx_sprite, .uv_transform);
var sizes = archetype.slice(.mach_gfx_sprite, .size);
// TODO: confirm the lifetime of these slices is OK for writeBuffer, how long do they need
// to live?
encoder.writeBuffer(pipeline.transforms, transforms_offset, transforms);
encoder.writeBuffer(pipeline.uv_transforms, uv_transforms_offset, uv_transforms);
encoder.writeBuffer(pipeline.sizes, sizes_offset, sizes);
transforms_offset += transforms.len;
uv_transforms_offset += uv_transforms.len;
sizes_offset += sizes.len;
pipeline.num_sprites += @intCast(transforms.len);
}
var command = encoder.finish(null);
defer command.release();
engine.state.queue.submit(&[_]*gpu.CommandBuffer{command});
}
pub fn machGfxSpritePreRender(
engine: *mach.Mod(.engine),
sprite_mod: *mach.Mod(.mach_gfx_sprite),
pipeline_id: u32,
) !void {
const pipeline = sprite_mod.state.pipelines.get(pipeline_id).?;
// Update uniform buffer
const ortho = Mat4x4.ortho(
-@as(f32, @floatFromInt(core.size().width)) / 2,
@as(f32, @floatFromInt(core.size().width)) / 2,
-@as(f32, @floatFromInt(core.size().height)) / 2,
@as(f32, @floatFromInt(core.size().height)) / 2,
-0.1,
100000,
);
const uniforms = Uniforms{
.view_projection = ortho,
// TODO: dimensions of other textures, number of textures present
.texture_size = vec2(
@as(f32, @floatFromInt(pipeline.texture.getWidth())),
@as(f32, @floatFromInt(pipeline.texture.getHeight())),
),
};
engine.state.encoder.writeBuffer(pipeline.uniforms, 0, &[_]Uniforms{uniforms});
}
pub fn machGfxSpriteRender(
engine: *mach.Mod(.engine),
sprite_mod: *mach.Mod(.mach_gfx_sprite),
pipeline_id: u32,
) !void {
const pipeline = sprite_mod.state.pipelines.get(pipeline_id).?;
// Draw the sprite batch
const pass = engine.state.pass;
const total_vertices = pipeline.num_sprites * 6;
pass.setPipeline(pipeline.render);
// TODO: remove dynamic offsets?
pass.setBindGroup(0, pipeline.bind_group, &.{});
pass.draw(total_vertices, 1, 0, 0);
}

521
src/gfx/Text.zig Normal file
View file

@ -0,0 +1,521 @@
const std = @import("std");
const core = @import("mach-core");
const gpu = core.gpu;
const ecs = @import("mach-ecs");
const Engine = @import("../engine.zig").Engine;
const FontRenderer = @import("font.zig").FontRenderer;
const mach = @import("../main.zig");
const math = mach.math;
const vec2 = math.vec2;
const Vec2 = math.Vec2;
const Vec3 = math.Vec3;
const Vec4 = math.Vec4;
const Mat3x3 = math.Mat3x3;
const Mat4x4 = math.Mat4x4;
/// Internal state
pipelines: std.AutoArrayHashMapUnmanaged(u32, Pipeline),
pub const name = .mach_gfx_text;
/// Converts points to pixels. e.g. a 12pt font size `12.0 * points_to_pixels == 16.0`
pub const points_to_pixels = 4.0 / 3.0;
// TODO: italics/bold
//
// TODO: should users use multiple text entities for different italic/bold/color regions, or should
// we handle that internally?
//
// TODO: better/proper text layout, shaping
//
// TODO: integrate freetype integration
//
// TODO: allow user to specify projection matrix (3d-space flat text etc.)
pub const components = struct {
/// The ID of the pipeline this text belongs to. By default, zero.
///
/// This determines which shader, textures, etc. are used for rendering the text.
pub const pipeline = u8;
/// The text model transformation matrix. Text is measured in pixel units, starting from
/// (0, 0) at the top-left corner and extending to the size of the text. By default, the world
/// origin (0, 0) lives at the center of the window.
pub const transform = Mat4x4;
/// A string of UTF-8 encoded text.
pub const text = []const u8;
/// The font to be rendered.
pub const font = FontRenderer;
/// Font size in pixels. To convert from points to pixels, multiply by `points_to_pixels`.
pub const font_size = f32;
/// Text color.
// TODO: actually respect color
pub const color = Vec4;
};
const Uniforms = extern struct {
// WebGPU requires that the size of struct fields are multiples of 16
// So we use align(16) and 'extern' to maintain field order
/// The view * orthographic projection matrix
view_projection: Mat4x4 align(16),
/// Total size of the font atlas texture in pixels
texture_size: Vec2 align(16),
};
const Glyph = extern struct {
/// Position of this glyph (top-left corner.)
pos: Vec2,
/// Width of the glyph in pixels.
size: Vec2,
/// Normalized position of the top-left UV coordinate
uv_pos: Vec2,
/// Which text this glyph belongs to; this is the index for transforms[i], colors[i].
text_index: u32,
};
const RegionMap = std.AutoArrayHashMapUnmanaged(u21, mach.Atlas.Region);
const Pipeline = struct {
render: *gpu.RenderPipeline,
texture_sampler: *gpu.Sampler,
texture: *gpu.Texture,
texture_atlas: mach.Atlas,
texture2: ?*gpu.Texture,
texture3: ?*gpu.Texture,
texture4: ?*gpu.Texture,
bind_group: *gpu.BindGroup,
uniforms: *gpu.Buffer,
regions: RegionMap = .{},
// Storage buffers
num_texts: u32,
num_glyphs: u32,
transforms: *gpu.Buffer,
colors: *gpu.Buffer,
glyphs: *gpu.Buffer,
pub fn reference(p: *Pipeline) void {
p.render.reference();
p.texture_sampler.reference();
p.texture.reference();
if (p.texture2) |tex| tex.reference();
if (p.texture3) |tex| tex.reference();
if (p.texture4) |tex| tex.reference();
p.bind_group.reference();
p.uniforms.reference();
p.transforms.reference();
p.colors.reference();
p.glyphs.reference();
}
pub fn deinit(p: *Pipeline, allocator: std.mem.Allocator) void {
p.render.release();
p.texture_sampler.release();
p.texture.release();
p.texture_atlas.deinit(allocator);
if (p.texture2) |tex| tex.release();
if (p.texture3) |tex| tex.release();
if (p.texture4) |tex| tex.release();
p.bind_group.release();
p.uniforms.release();
p.regions.deinit(allocator);
p.transforms.release();
p.colors.release();
p.glyphs.release();
}
};
pub const PipelineOptions = struct {
pipeline: u32,
/// Shader program to use when rendering.
shader: ?*gpu.ShaderModule = null,
/// Whether to use linear (blurry) or nearest (pixelated) upscaling/downscaling.
texture_sampler: ?*gpu.Sampler = null,
/// Textures to use when rendering. The default shader can handle one texture (the font atlas.)
texture2: ?*gpu.Texture = null,
texture3: ?*gpu.Texture = null,
texture4: ?*gpu.Texture = null,
/// Alpha and color blending options.
blend_state: ?gpu.BlendState = null,
/// Pipeline overrides, these can be used to e.g. pass additional things to your shader program.
bind_group_layout: ?*gpu.BindGroupLayout = null,
bind_group: ?*gpu.BindGroup = null,
color_target_state: ?gpu.ColorTargetState = null,
fragment_state: ?gpu.FragmentState = null,
pipeline_layout: ?*gpu.PipelineLayout = null,
};
pub fn machGfxTextInit(
text_mod: *mach.Mod(.mach_gfx_text),
) !void {
text_mod.state = .{
// TODO: struct default value initializers don't work
.pipelines = .{},
};
}
pub fn machGfxTextInitPipeline(
engine: *mach.Mod(.engine),
text_mod: *mach.Mod(.mach_gfx_text),
opt: PipelineOptions,
) !void {
const device = engine.state.device;
const pipeline = try text_mod.state.pipelines.getOrPut(engine.allocator, opt.pipeline);
if (pipeline.found_existing) {
pipeline.value_ptr.*.deinit(engine.allocator);
}
// Prepare texture for the font atlas.
const img_size = gpu.Extent3D{ .width = 1024, .height = 1024 };
const texture = device.createTexture(&.{
.size = img_size,
.format = .rgba8_unorm,
.usage = .{
.texture_binding = true,
.copy_dst = true,
.render_attachment = true,
},
});
const texture_atlas = try mach.Atlas.init(
engine.allocator,
img_size.width,
.rgba,
);
// Storage buffers
const buffer_cap = 1024 * 128; // TODO: allow user to specify preallocation
const glyph_buffer_cap = 1024 * 512; // TODO: allow user to specify preallocation
const transforms = device.createBuffer(&.{
.usage = .{ .storage = true, .copy_dst = true },
.size = @sizeOf(Mat4x4) * buffer_cap,
.mapped_at_creation = .false,
});
const colors = device.createBuffer(&.{
.usage = .{ .storage = true, .copy_dst = true },
.size = @sizeOf(Vec4) * buffer_cap,
.mapped_at_creation = .false,
});
const glyphs = device.createBuffer(&.{
.usage = .{ .storage = true, .copy_dst = true },
.size = @sizeOf(Glyph) * glyph_buffer_cap,
.mapped_at_creation = .false,
});
const texture_sampler = opt.texture_sampler orelse device.createSampler(&.{
.mag_filter = .nearest,
.min_filter = .nearest,
});
const uniforms = device.createBuffer(&.{
.usage = .{ .copy_dst = true, .uniform = true },
.size = @sizeOf(Uniforms),
.mapped_at_creation = .false,
});
const bind_group_layout = opt.bind_group_layout orelse device.createBindGroupLayout(
&gpu.BindGroupLayout.Descriptor.init(.{
.entries = &.{
gpu.BindGroupLayout.Entry.buffer(0, .{ .vertex = true }, .uniform, false, 0),
gpu.BindGroupLayout.Entry.buffer(1, .{ .vertex = true }, .read_only_storage, false, 0),
gpu.BindGroupLayout.Entry.buffer(2, .{ .vertex = true }, .read_only_storage, false, 0),
gpu.BindGroupLayout.Entry.buffer(3, .{ .vertex = true }, .read_only_storage, false, 0),
gpu.BindGroupLayout.Entry.sampler(4, .{ .fragment = true }, .filtering),
gpu.BindGroupLayout.Entry.texture(5, .{ .fragment = true }, .float, .dimension_2d, false),
gpu.BindGroupLayout.Entry.texture(6, .{ .fragment = true }, .float, .dimension_2d, false),
gpu.BindGroupLayout.Entry.texture(7, .{ .fragment = true }, .float, .dimension_2d, false),
gpu.BindGroupLayout.Entry.texture(8, .{ .fragment = true }, .float, .dimension_2d, false),
},
}),
);
defer bind_group_layout.release();
const texture_view = texture.createView(&gpu.TextureView.Descriptor{});
const texture2_view = if (opt.texture2) |tex| tex.createView(&gpu.TextureView.Descriptor{}) else texture_view;
const texture3_view = if (opt.texture3) |tex| tex.createView(&gpu.TextureView.Descriptor{}) else texture_view;
const texture4_view = if (opt.texture4) |tex| tex.createView(&gpu.TextureView.Descriptor{}) else texture_view;
defer texture_view.release();
defer texture2_view.release();
defer texture3_view.release();
defer texture4_view.release();
const bind_group = opt.bind_group orelse device.createBindGroup(
&gpu.BindGroup.Descriptor.init(.{
.layout = bind_group_layout,
.entries = &.{
gpu.BindGroup.Entry.buffer(0, uniforms, 0, @sizeOf(Uniforms)),
gpu.BindGroup.Entry.buffer(1, transforms, 0, @sizeOf(Mat4x4) * buffer_cap),
gpu.BindGroup.Entry.buffer(2, colors, 0, @sizeOf(Vec4) * buffer_cap),
gpu.BindGroup.Entry.buffer(3, glyphs, 0, @sizeOf(Glyph) * glyph_buffer_cap),
gpu.BindGroup.Entry.sampler(4, texture_sampler),
gpu.BindGroup.Entry.textureView(5, texture_view),
gpu.BindGroup.Entry.textureView(6, texture2_view),
gpu.BindGroup.Entry.textureView(7, texture3_view),
gpu.BindGroup.Entry.textureView(8, texture4_view),
},
}),
);
const blend_state = opt.blend_state orelse gpu.BlendState{
.color = .{
.operation = .add,
.src_factor = .src_alpha,
.dst_factor = .one_minus_src_alpha,
},
.alpha = .{
.operation = .add,
.src_factor = .one,
.dst_factor = .zero,
},
};
const shader_module = opt.shader orelse device.createShaderModuleWGSL("text.wgsl", @embedFile("text.wgsl"));
defer shader_module.release();
const color_target = opt.color_target_state orelse gpu.ColorTargetState{
.format = core.descriptor.format,
.blend = &blend_state,
.write_mask = gpu.ColorWriteMaskFlags.all,
};
const fragment = opt.fragment_state orelse gpu.FragmentState.init(.{
.module = shader_module,
.entry_point = "fragMain",
.targets = &.{color_target},
});
const bind_group_layouts = [_]*gpu.BindGroupLayout{bind_group_layout};
const pipeline_layout = opt.pipeline_layout orelse device.createPipelineLayout(&gpu.PipelineLayout.Descriptor.init(.{
.bind_group_layouts = &bind_group_layouts,
}));
defer pipeline_layout.release();
const render = device.createRenderPipeline(&gpu.RenderPipeline.Descriptor{
.fragment = &fragment,
.layout = pipeline_layout,
.vertex = gpu.VertexState{
.module = shader_module,
.entry_point = "vertMain",
},
});
pipeline.value_ptr.* = Pipeline{
.render = render,
.texture_sampler = texture_sampler,
.texture = texture,
.texture_atlas = texture_atlas,
.texture2 = opt.texture2,
.texture3 = opt.texture3,
.texture4 = opt.texture4,
.bind_group = bind_group,
.uniforms = uniforms,
.num_texts = 0,
.num_glyphs = 0,
.transforms = transforms,
.colors = colors,
.glyphs = glyphs,
};
pipeline.value_ptr.reference();
}
pub fn deinit(text_mod: *mach.Mod(.mach_gfx_text)) !void {
for (text_mod.state.pipelines.entries.items(.value)) |*pipeline| pipeline.deinit(text_mod.allocator);
text_mod.state.pipelines.deinit(text_mod.allocator);
}
pub fn machGfxTextUpdated(
engine: *mach.Mod(.engine),
text_mod: *mach.Mod(.mach_gfx_text),
pipeline_id: u32,
) !void {
const pipeline = text_mod.state.pipelines.getPtr(pipeline_id).?;
const device = engine.state.device;
// TODO: make sure these entities only belong to the given pipeline
// we need a better tagging mechanism
var archetypes_iter = engine.entities.query(.{ .all = &.{
.{ .mach_gfx_text = &.{
.pipeline,
.transform,
.text,
.font,
.font_size,
.color,
} },
} });
const encoder = device.createCommandEncoder(null);
defer encoder.release();
pipeline.num_texts = 0;
pipeline.num_glyphs = 0;
var glyphs = std.ArrayListUnmanaged(Glyph){};
var transforms_offset: usize = 0;
var colors_offset: usize = 0;
var texture_update = false;
while (archetypes_iter.next()) |archetype| {
var transforms = archetype.slice(.mach_gfx_text, .transform);
var colors = archetype.slice(.mach_gfx_text, .color);
// TODO: confirm the lifetime of these slices is OK for writeBuffer, how long do they need
// to live?
encoder.writeBuffer(pipeline.transforms, transforms_offset, transforms);
encoder.writeBuffer(pipeline.colors, colors_offset, colors);
transforms_offset += transforms.len;
colors_offset += colors.len;
pipeline.num_texts += @intCast(transforms.len);
// Render texts
// TODO: this is very expensive and shouldn't be done here, should be done only on detected
// text change.
const px_density = 2.0;
var fonts = archetype.slice(.mach_gfx_text, .font);
var font_sizes = archetype.slice(.mach_gfx_text, .font_size);
var texts = archetype.slice(.mach_gfx_text, .text);
for (fonts, font_sizes, texts) |font, font_size, text| {
var offset_x: f32 = 0.0;
var offset_y: f32 = 0.0;
var utf8 = (try std.unicode.Utf8View.init(text)).iterator();
while (utf8.nextCodepoint()) |codepoint| {
const m = try font.measure(codepoint, font_size * px_density);
if (codepoint != '\n') {
var region = try pipeline.regions.getOrPut(engine.allocator, codepoint);
if (!region.found_existing) {
const glyph = try font.render(codepoint, font_size * px_density);
if (glyph.bitmap) |bitmap| {
var glyph_atlas_region = try pipeline.texture_atlas.reserve(engine.allocator, glyph.width, glyph.height);
pipeline.texture_atlas.set(glyph_atlas_region, @as([*]const u8, @ptrCast(bitmap.ptr))[0 .. bitmap.len * 4]);
texture_update = true;
// Exclude the 1px blank space margin when describing the region of the texture
// that actually represents the glyph.
const margin = 1;
glyph_atlas_region.x += margin;
glyph_atlas_region.y += margin;
glyph_atlas_region.width -= margin * 2;
glyph_atlas_region.height -= margin * 2;
region.value_ptr.* = glyph_atlas_region;
} else {
// whitespace
region.value_ptr.* = mach.Atlas.Region{
.width = 0,
.height = 0,
.x = 0,
.y = 0,
};
}
}
// Note: render(font_size) and render(font_size*px_density) is not equal in
// m.size.x() and m.size.x()*px_density, because font rendering may handle rounding
// differently. We always work in native pixels, and then convert to virtual pixels
// right before display in order to keep everything accurate.
//
// Also note that e.g. font_size*px_density may result in a different horizontal
// bearing than font_size with horizontal bearing * 2.0. These subtleties are
// important and decided by the font itself.
const r = region.value_ptr.*;
std.debug.assert(r.width == @as(u32, @intFromFloat(m.size.x())));
std.debug.assert(r.height == @as(u32, @intFromFloat(m.size.y())));
try glyphs.append(engine.allocator, .{
.pos = vec2(
offset_x + m.bearing_horizontal.x(),
offset_y - (m.size.y() - m.bearing_horizontal.y()),
).divScalar(px_density),
.size = m.size.divScalar(px_density),
.text_index = 0,
.uv_pos = vec2(@floatFromInt(r.x), @floatFromInt(r.y)),
});
pipeline.num_glyphs += 1;
}
if (codepoint == '\n') {
offset_x = 0;
offset_y -= m.advance.y();
} else {
offset_x += m.advance.x();
}
}
}
}
encoder.writeBuffer(pipeline.glyphs, 0, glyphs.items);
glyphs.deinit(engine.allocator);
if (texture_update) {
// rgba32_pixels
// TODO: use proper texture dimensions here
const img_size = gpu.Extent3D{ .width = 1024, .height = 1024 };
const data_layout = gpu.Texture.DataLayout{
.bytes_per_row = @as(u32, @intCast(img_size.width * 4)),
.rows_per_image = @as(u32, @intCast(img_size.height)),
};
engine.state.queue.writeTexture(
&.{ .texture = pipeline.texture },
&data_layout,
&img_size,
pipeline.texture_atlas.data,
);
}
var command = encoder.finish(null);
defer command.release();
engine.state.queue.submit(&[_]*gpu.CommandBuffer{command});
}
pub fn machGfxTextPreRender(
engine: *mach.Mod(.engine),
text_mod: *mach.Mod(.mach_gfx_text),
pipeline_id: u32,
) !void {
const pipeline = text_mod.state.pipelines.get(pipeline_id).?;
// Update uniform buffer
const ortho = Mat4x4.ortho(
-@as(f32, @floatFromInt(core.size().width)) / 2,
@as(f32, @floatFromInt(core.size().width)) / 2,
-@as(f32, @floatFromInt(core.size().height)) / 2,
@as(f32, @floatFromInt(core.size().height)) / 2,
-0.1,
100000,
);
const uniforms = Uniforms{
.view_projection = ortho,
// TODO: dimensions of other textures, number of textures present
.texture_size = vec2(
@as(f32, @floatFromInt(pipeline.texture.getWidth())),
@as(f32, @floatFromInt(pipeline.texture.getHeight())),
),
};
engine.state.encoder.writeBuffer(pipeline.uniforms, 0, &[_]Uniforms{uniforms});
}
pub fn machGfxTextRender(
engine: *mach.Mod(.engine),
text_mod: *mach.Mod(.mach_gfx_text),
pipeline_id: u32,
) !void {
const pipeline = text_mod.state.pipelines.get(pipeline_id).?;
// Draw the text batch
const pass = engine.state.pass;
const total_vertices = pipeline.num_glyphs * 6;
pass.setPipeline(pipeline.render);
// TODO: remove dynamic offsets?
pass.setBindGroup(0, pipeline.bind_group, &.{});
pass.draw(total_vertices, 1, 0, 0);
}

41
src/gfx/font.zig Normal file
View file

@ -0,0 +1,41 @@
const math = @import("../main.zig").math;
const std = @import("std");
/// An interface that can render Unicode codepoints into glyphs.
pub const FontRenderer = struct {
ptr: *anyopaque,
vtable: *const VTable,
pub const VTable = struct {
render: *const fn (ctx: *anyopaque, codepoint: u21, size: f32) error{RenderError}!Glyph,
measure: *const fn (ctx: *anyopaque, codepoint: u21, size: f32) error{MeasureError}!GlyphMetrics,
};
pub fn render(r: FontRenderer, codepoint: u21, size: f32) error{RenderError}!Glyph {
return r.vtable.render(r.ptr, codepoint, size);
}
pub fn measure(r: FontRenderer, codepoint: u21, size: f32) error{MeasureError}!GlyphMetrics {
return r.vtable.measure(r.ptr, codepoint, size);
}
};
pub const RGBA32 = extern struct {
r: u8,
g: u8,
b: u8,
a: u8,
};
pub const Glyph = struct {
bitmap: ?[]const RGBA32,
width: u32,
height: u32,
};
pub const GlyphMetrics = struct {
size: math.Vec2,
advance: math.Vec2,
bearing_horizontal: math.Vec2,
bearing_vertical: math.Vec2,
};

20
src/gfx/main.zig Normal file
View file

@ -0,0 +1,20 @@
pub const util = @import("util.zig");
pub const Sprite = @import("Sprite.zig");
pub const Text = @import("Text.zig");
pub const FontRenderer = @import("font.zig").FontRenderer;
pub const RGBA32 = @import("font.zig").RGBA32;
pub const Glyph = @import("font.zig").Glyph;
pub const GlyphMetrics = @import("font.zig").GlyphMetrics;
test {
const std = @import("std");
// TODO: refactor code so we can use this here:
// std.testing.refAllDeclsRecursive(@This());
std.testing.refAllDeclsRecursive(util);
// std.testing.refAllDeclsRecursive(Sprite);
// std.testing.refAllDeclsRecursive(Text);
std.testing.refAllDeclsRecursive(FontRenderer);
std.testing.refAllDeclsRecursive(RGBA32);
std.testing.refAllDeclsRecursive(Glyph);
std.testing.refAllDeclsRecursive(GlyphMetrics);
}

102
src/gfx/sprite.wgsl Normal file
View file

@ -0,0 +1,102 @@
//-----------------------------------------------------------------------------
// Vertex shader
//-----------------------------------------------------------------------------
struct VertexOutput {
// Vertex position
@builtin(position) Position : vec4<f32>,
// UV coordinate
@location(0) fragUV : vec2<f32>,
};
// Our vertex shader will recieve these parameters
struct Uniforms {
// The view * orthographic projection matrix
view_projection: mat4x4<f32>,
// Total size of the sprite texture in pixels
texture_size: vec2<f32>,
};
@group(0) @binding(0) var<uniform> uniforms : Uniforms;
// Sprite model transformation matrices
@group(0) @binding(1) var<storage, read> sprite_transforms: array<mat4x4<f32>>;
// Sprite UV coordinate transformation matrices. Sprite UV coordinates are (0, 0) at the top-left
// corner, and in pixels.
@group(0) @binding(2) var<storage, read> sprite_uv_transforms: array<mat3x3<f32>>;
// Sprite sizes, in pixels.
@group(0) @binding(3) var<storage, read> sprite_sizes: array<vec2<f32>>;
@vertex
fn vertMain(
@builtin(vertex_index) VertexIndex : u32
) -> VertexOutput {
// Our vertex shader will be called six times per sprite (2 triangles make up a sprite, so six
// vertices.) The VertexIndex tells us which vertex we need to render, so we know e.g. vertices
// 0-5 correspond to the first sprite, vertices 6-11 correspond to the second sprite, and so on.
let sprite_transform = sprite_transforms[VertexIndex / 6];
let sprite_uv_transform = sprite_uv_transforms[VertexIndex / 6];
let sprite_size = sprite_sizes[VertexIndex / 6];
// Imagine the vertices and UV coordinates of a card. There are two triangles, the UV coordinates
// describe the corresponding location of each vertex on the texture. We hard-code the vertex
// positions and UV coordinates here:
let positions = array<vec2<f32>, 6>(
vec2<f32>(0, 0), // left, bottom
vec2<f32>(0, 1), // left, top
vec2<f32>(1, 0), // right, bottom
vec2<f32>(1, 0), // right, bottom
vec2<f32>(0, 1), // left, top
vec2<f32>(1, 1), // right, top
);
let uvs = array<vec2<f32>, 6>(
vec2<f32>(0, 1), // left, bottom
vec2<f32>(0, 0), // left, top
vec2<f32>(1, 1), // right, bottom
vec2<f32>(1, 1), // right, bottom
vec2<f32>(0, 0), // left, top
vec2<f32>(1, 0), // right, top
);
// Based on the vertex index, we determine which positions[n] and uvs[n] we need to use. Our
// vertex shader is invoked 6 times per sprite, we need to produce the right vertex/uv coordinates
// each time to produce a textured card.
let pos_2d = positions[VertexIndex % 6];
var uv = uvs[VertexIndex % 6];
// Currently, our pos_2d and uv coordinates describe a card that covers 1px by 1px; and the UV
// coordinates describe using the entire texture. We alter the coordinates to describe the
// desired sprite location, size, and apply a subset of the texture instead of the entire texture.
var pos = vec4<f32>(pos_2d * sprite_size, 0, 1); // normalized -> pixels
pos = sprite_transform * pos; // apply sprite transform (pixels)
pos = uniforms.view_projection * pos; // pixels -> normalized
uv *= sprite_size; // normalized -> pixels
uv = (sprite_uv_transform * vec3<f32>(uv.xy, 1)).xy; // apply sprite UV transform (pixels)
uv /= uniforms.texture_size; // pixels -> normalized
var output : VertexOutput;
output.Position = pos;
output.fragUV = uv;
return output;
}
//-----------------------------------------------------------------------------
// Fragment shader
//-----------------------------------------------------------------------------
@group(0) @binding(4) var spriteSampler: sampler;
@group(0) @binding(5) var spriteTexture: texture_2d<f32>;
@fragment
fn fragMain(
@location(0) fragUV: vec2<f32>
) -> @location(0) vec4<f32> {
var c = textureSample(spriteTexture, spriteSampler, fragUV);
if (c.a <= 0.0) {
discard;
}
return c;
}

105
src/gfx/text.wgsl Normal file
View file

@ -0,0 +1,105 @@
//-----------------------------------------------------------------------------
// Vertex shader
//-----------------------------------------------------------------------------
struct VertexOutput {
// Vertex position
@builtin(position) Position : vec4<f32>,
// UV coordinate
@location(0) fragUV : vec2<f32>,
};
// Our vertex shader will recieve these parameters
struct Uniforms {
// The view * orthographic projection matrix
view_projection: mat4x4<f32>,
// Total size of the font atlas texture in pixels
texture_size: vec2<f32>,
};
struct Glyph {
// Position of this glyph (top-left corner.)
pos: vec2<f32>,
// Size of the glyph in pixels.
size: vec2<f32>,
// Normalized position of the top-left UV coordinate
uv_pos: vec2<f32>,
// Which text this glyph belongs to; this is the index for transforms[i], colors[i]
text_index: u32,
}
@group(0) @binding(0) var<uniform> uniforms : Uniforms;
@group(0) @binding(1) var<storage, read> transforms: array<mat4x4<f32>>;
@group(0) @binding(2) var<storage, read> colors: array<vec4<f32>>;
@group(0) @binding(3) var<storage, read> glyphs: array<Glyph>;
@vertex
fn vertMain(
@builtin(vertex_index) VertexIndex : u32
) -> VertexOutput {
var glyph = glyphs[VertexIndex / 6];
let transform = transforms[glyph.text_index];
let color = colors[glyph.text_index];
// Based on the vertex index, we determine which positions[n] and uvs[n] we need to use. Our
// vertex shader is invoked 6 times per glyph, we need to produce the right vertex/uv coordinates
// each time to produce a textured card.
let positions = array<vec2<f32>, 6>(
vec2<f32>(0, 0), // left, bottom
vec2<f32>(0, 1), // left, top
vec2<f32>(1, 0), // right, bottom
vec2<f32>(1, 0), // right, bottom
vec2<f32>(0, 1), // left, top
vec2<f32>(1, 1), // right, top
);
let uvs = array<vec2<f32>, 6>(
vec2<f32>(0, 1), // left, bottom
vec2<f32>(0, 0), // left, top
vec2<f32>(1, 1), // right, bottom
vec2<f32>(1, 1), // right, bottom
vec2<f32>(0, 0), // left, top
vec2<f32>(1, 0), // right, top
);
let pos_2d = positions[VertexIndex % 6];
var uv = uvs[VertexIndex % 6];
// Currently, our pos_2d and uv coordinates describe a card that covers 1px by 1px; and the UV
// coordinates describe using the entire texture. We alter the coordinates to describe the
// desired glyph location, size, and apply a subset of the texture instead of the entire texture.
var pos = vec4<f32>((pos_2d * glyph.size) + glyph.pos, 0, 1); // normalized -> pixels
pos = transform * pos; // apply glyph transform (pixels)
pos = uniforms.view_projection * pos; // pixels -> normalized
// TODO: elevate px_density out of shader
let px_density = 2.0;
uv *= glyph.size*px_density; // normalized -> pixels
uv += glyph.uv_pos; // apply glyph UV position offset (pixels)
uv /= uniforms.texture_size; // pixels -> normalized
var output : VertexOutput;
output.Position = pos;
output.fragUV = uv;
return output;
}
//-----------------------------------------------------------------------------
// Fragment shader
//-----------------------------------------------------------------------------
@group(0) @binding(4) var glyphSampler: sampler;
@group(0) @binding(5) var glyphTexture: texture_2d<f32>;
@fragment
fn fragMain(
@location(0) fragUV: vec2<f32>
) -> @location(0) vec4<f32> {
var c = textureSample(glyphTexture, glyphSampler, fragUV);
if (c.a <= 0.0) {
discard;
}
return c;
}