examples: remove gkurve test-bed for now (will come back later)
Signed-off-by: Stephen Gutekanst <stephen@hexops.com>
This commit is contained in:
parent
d573a59d67
commit
2c73f8c518
8 changed files with 0 additions and 1748 deletions
|
|
@ -1,150 +0,0 @@
|
|||
const std = @import("std");
|
||||
|
||||
const mach = @import("mach");
|
||||
const App = @import("main.zig").App;
|
||||
const gpu = mach.gpu;
|
||||
const math = mach.math;
|
||||
const AtlasUV = mach.gfx.Atlas.Region.UV;
|
||||
|
||||
const Mat4x4 = math.Mat4x4;
|
||||
const vec3 = math.vec3;
|
||||
const Vec2 = @Vector(2, f32);
|
||||
|
||||
pub const Vertex = struct {
|
||||
pos: @Vector(4, f32),
|
||||
uv: Vec2,
|
||||
};
|
||||
const VERTEX_ATTRIBUTES = [_]gpu.VertexAttribute{
|
||||
.{ .format = .float32x4, .offset = @offsetOf(Vertex, "pos"), .shader_location = 0 },
|
||||
.{ .format = .float32x2, .offset = @offsetOf(Vertex, "uv"), .shader_location = 1 },
|
||||
};
|
||||
pub const VERTEX_BUFFER_LAYOUT = gpu.VertexBufferLayout{
|
||||
.array_stride = @sizeOf(Vertex),
|
||||
.step_mode = .vertex,
|
||||
.attribute_count = VERTEX_ATTRIBUTES.len,
|
||||
.attributes = &VERTEX_ATTRIBUTES,
|
||||
};
|
||||
pub const VertexUniform = Mat4x4;
|
||||
|
||||
const GkurveType = enum(u32) {
|
||||
quadratic_convex = 0,
|
||||
semicircle_convex = 1,
|
||||
quadratic_concave = 2,
|
||||
semicircle_concave = 3,
|
||||
triangle = 4,
|
||||
};
|
||||
|
||||
pub const FragUniform = struct {
|
||||
type: GkurveType = .triangle,
|
||||
// Padding for struct alignment to 16 bytes (minimum in WebGPU uniform).
|
||||
padding: @Vector(3, f32) = undefined,
|
||||
blend_color: @Vector(4, f32) = @Vector(4, f32){ 1, 1, 1, 1 },
|
||||
};
|
||||
|
||||
pub fn equilateralTriangle(app: *App, position: Vec2, scale: f32, uniform: FragUniform, uv: AtlasUV, height_scale: f32) !void {
|
||||
const triangle_height = scale * @sqrt(0.75) * height_scale;
|
||||
|
||||
try app.vertices.appendSlice(&[3]Vertex{
|
||||
.{
|
||||
.pos = .{ position[0] + scale / 2, position[1] + triangle_height, 0, 1 },
|
||||
.uv = .{ uv.x + uv.width * 0.5, uv.y + uv.height },
|
||||
},
|
||||
.{
|
||||
.pos = .{ position[0], position[1], 0, 1 },
|
||||
.uv = .{ uv.x, uv.y },
|
||||
},
|
||||
.{
|
||||
.pos = .{ position[0] + scale, position[1], 0, 1 },
|
||||
.uv = .{ uv.x + uv.width, uv.y },
|
||||
},
|
||||
});
|
||||
|
||||
try app.fragment_uniform_list.append(uniform);
|
||||
|
||||
app.update_vertex_buffer = true;
|
||||
app.update_frag_uniform_buffer = true;
|
||||
}
|
||||
|
||||
pub fn quad(app: *App, position: Vec2, scale: Vec2, uniform: FragUniform, uv: AtlasUV) !void {
|
||||
const bottom_left_uv = Vec2{ uv.x, uv.y };
|
||||
const bottom_right_uv = Vec2{ uv.x + uv.width, uv.y };
|
||||
const top_left_uv = Vec2{ uv.x, uv.y + uv.height };
|
||||
const top_right_uv = Vec2{ uv.x + uv.width, uv.y + uv.height };
|
||||
|
||||
try app.vertices.appendSlice(&[6]Vertex{
|
||||
.{ .pos = .{ position[0], position[1] + scale[1], 0, 1 }, .uv = top_left_uv },
|
||||
.{ .pos = .{ position[0], position[1], 0, 1 }, .uv = bottom_left_uv },
|
||||
.{ .pos = .{ position[0] + scale[0], position[1], 0, 1 }, .uv = bottom_right_uv },
|
||||
|
||||
.{ .pos = .{ position[0] + scale[0], position[1] + scale[1], 0, 1 }, .uv = top_right_uv },
|
||||
.{ .pos = .{ position[0], position[1] + scale[1], 0, 1 }, .uv = top_left_uv },
|
||||
.{ .pos = .{ position[0] + scale[0], position[1], 0, 1 }, .uv = bottom_right_uv },
|
||||
});
|
||||
|
||||
try app.fragment_uniform_list.appendSlice(&.{ uniform, uniform });
|
||||
|
||||
app.update_vertex_buffer = true;
|
||||
app.update_frag_uniform_buffer = true;
|
||||
}
|
||||
|
||||
pub fn circle(app: *App, position: Vec2, radius: f32, blend_color: @Vector(4, f32), uv: AtlasUV) !void {
|
||||
const low_mid = Vertex{
|
||||
.pos = .{ position[0], position[1] - (radius * 2.0), 0, 1 },
|
||||
.uv = .{ uv.x + uv.width * 0.5, uv.y },
|
||||
};
|
||||
const high_mid = Vertex{
|
||||
.pos = .{ position[0], position[1] + (radius * 2.0), 0, 1 },
|
||||
.uv = .{ uv.x + uv.width * 0.5, uv.y + uv.height },
|
||||
};
|
||||
|
||||
const mid_left = Vertex{
|
||||
.pos = .{ position[0] - radius, position[1], 0, 1 },
|
||||
.uv = .{ uv.x, uv.y + uv.height * 0.5 },
|
||||
};
|
||||
const mid_right = Vertex{
|
||||
.pos = .{ position[0] + radius, position[1], 0, 1 },
|
||||
.uv = .{ uv.x + uv.width, uv.y + uv.height * 0.5 },
|
||||
};
|
||||
|
||||
try app.vertices.appendSlice(&[_]Vertex{
|
||||
high_mid,
|
||||
mid_left,
|
||||
mid_right,
|
||||
|
||||
low_mid,
|
||||
mid_left,
|
||||
mid_right,
|
||||
});
|
||||
|
||||
try app.fragment_uniform_list.appendSlice(&[_]FragUniform{
|
||||
.{
|
||||
.type = .semicircle_convex,
|
||||
.blend_color = blend_color,
|
||||
},
|
||||
.{
|
||||
.type = .semicircle_convex,
|
||||
.blend_color = blend_color,
|
||||
},
|
||||
});
|
||||
|
||||
app.update_vertex_buffer = true;
|
||||
app.update_frag_uniform_buffer = true;
|
||||
}
|
||||
|
||||
pub fn getVertexUniformBufferObject() !VertexUniform {
|
||||
// Note: We use window width/height here, not framebuffer width/height.
|
||||
// On e.g. macOS, window size may be 640x480 while framebuffer size may be
|
||||
// 1280x960 (subpixels.) Doing this lets us use a pixel, not subpixel,
|
||||
// coordinate system.
|
||||
const window_size = mach.core.size();
|
||||
const proj = Mat4x4.projection2D(.{
|
||||
.left = 0,
|
||||
.right = @floatFromInt(window_size.width),
|
||||
.bottom = 0,
|
||||
.top = @floatFromInt(window_size.height),
|
||||
.near = -0.1,
|
||||
.far = 100,
|
||||
});
|
||||
const mvp = proj.mul(&Mat4x4.translate(vec3(-1, -1, 0)));
|
||||
return mvp;
|
||||
}
|
||||
|
|
@ -1,199 +0,0 @@
|
|||
struct FragUniform {
|
||||
type_: u32,
|
||||
padding: vec3<f32>,
|
||||
blend_color: vec4<f32>,
|
||||
}
|
||||
@binding(1) @group(0) var<storage> ubos: array<FragUniform>;
|
||||
@binding(2) @group(0) var mySampler: sampler;
|
||||
@binding(3) @group(0) var myTexture: texture_2d<f32>;
|
||||
|
||||
const wireframe = false;
|
||||
const antialiased = true;
|
||||
const aa_px = 1.0; // pixels to consume for AA
|
||||
const dist_scale_px = 300.0; // TODO: do not hard code
|
||||
|
||||
@fragment fn main(
|
||||
@location(0) uv: vec2<f32>,
|
||||
@interpolate(linear) @location(1) bary_in: vec2<f32>,
|
||||
@interpolate(flat) @location(2) triangle_index: u32,
|
||||
) -> @location(0) vec4<f32> {
|
||||
// Example 1: Visualize barycentric coordinates:
|
||||
// let bary = bary_in;
|
||||
// return vec4<f32>(bary.x, bary.y, 0.0, 1.0);
|
||||
// return vec4<f32>(0.0, bary.x, 0.0, 1.0); // [1.0 (bottom-left vertex), 0.0 (bottom-right vertex)]
|
||||
// return vec4<f32>(0.0, bary.y, 0.0, 1.0); // [1.0 (bottom-left vertex), 0.0 (top-right face)]
|
||||
|
||||
// Example 2: Very simple quadratic bezier
|
||||
// let bary = bary_in;
|
||||
// if (bary.x * bary.x - bary.y) > 0 {
|
||||
// discard;
|
||||
// }
|
||||
// return vec4<f32>(0.0, 1.0, 0.0, 1.0);
|
||||
|
||||
// Example 3: Render gkurve primitives
|
||||
let inversion = select( 1.0, -1.0, ubos[triangle_index].type_ == 0u || ubos[triangle_index].type_ == 1u);
|
||||
// Texture uvs
|
||||
var correct_uv = uv;
|
||||
correct_uv.y = 1.0 - correct_uv.y;
|
||||
var color = textureSample(myTexture, mySampler, correct_uv) * ubos[triangle_index].blend_color;
|
||||
|
||||
// Curve rendering
|
||||
let border_color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
|
||||
let border_px = 30.0;
|
||||
let is_semicircle = ubos[triangle_index].type_ == 1u || ubos[triangle_index].type_ == 3u;
|
||||
var result = select(
|
||||
curveColor(bary_in, border_px, border_color, color, inversion, is_semicircle),
|
||||
color,
|
||||
ubos[triangle_index].type_ == 4u, // triangle rendering
|
||||
);
|
||||
|
||||
// Wireframe rendering
|
||||
let wireframe_px = 1.0;
|
||||
let wireframe_color = vec4<f32>(0.5, 0.5, 0.5, 1.0);
|
||||
if (wireframe) {
|
||||
result = wireframeColor(bary_in, wireframe_px, wireframe_color, result);
|
||||
}
|
||||
|
||||
if (result.a == 0.0) { discard; }
|
||||
return result;
|
||||
}
|
||||
|
||||
// Performs alpha 'over' blending between two premultiplied-alpha colors.
|
||||
fn alphaOver(a: vec4<f32>, b: vec4<f32>) -> vec4<f32> {
|
||||
return a + (b * (1.0 - a.a));
|
||||
}
|
||||
|
||||
// Calculates signed distance to a quadratic bézier curve using barycentric coordinates.
|
||||
fn distanceToQuadratic(bary: vec2<f32>) -> f32 {
|
||||
// Gradients
|
||||
let px = dpdx(bary.xy);
|
||||
let py = dpdy(bary.xy);
|
||||
|
||||
// Chain rule
|
||||
let fx = (2.0 * bary.x) * px.x - px.y;
|
||||
let fy = (2.0 * bary.x) * py.x - py.y;
|
||||
|
||||
return (bary.x * bary.x - bary.y) / sqrt(fx * fx + fy * fy);
|
||||
}
|
||||
|
||||
// Calculates signed distance to a semicircle using barycentric coordinates.
|
||||
fn distanceToSemicircle(bary: vec2<f32>) -> f32 {
|
||||
let x = abs(((bary.x - 0.5) * 2.0)); // [0.0 left, 1.0 center, 0.0 right]
|
||||
let y = ((bary.x-bary.y) * 4.0); // [2.0 bottom, 0.0 top]
|
||||
let c = x*x + y*y;
|
||||
|
||||
// Gradients
|
||||
let px = dpdx(bary.xy);
|
||||
let py = dpdy(bary.xy);
|
||||
|
||||
// Chain rule
|
||||
let fx = c * px.x - px.y;
|
||||
let fy = c * py.x - py.y;
|
||||
|
||||
let d = (1.0 - (x*x + y*y)) - 0.2;
|
||||
return (-d / 6.0) / sqrt(fx * fx + fy * fy);
|
||||
}
|
||||
|
||||
// Calculates signed distance to the wireframe (i.e. faces) of the triangle using barycentric
|
||||
// coordinates.
|
||||
fn distanceToWireframe(bary: vec2<f32>) -> f32 {
|
||||
let normal = vec3<f32>(
|
||||
bary.y, // distance to right face
|
||||
(bary.x - bary.y) * 2.0, // distance to bottom face
|
||||
1.0 - (((bary.x - bary.y)) + bary.x), // distance to left face
|
||||
);
|
||||
let fw = sqrt(dpdx(normal)*dpdx(normal) + dpdy(normal)*dpdy(normal));
|
||||
let d = normal / fw;
|
||||
return min(min(d.x, d.y), d.z);
|
||||
}
|
||||
|
||||
// Calculates the color of the wireframe, taking into account antialiasing and alpha blending with
|
||||
// the desired background blend color.
|
||||
fn wireframeColor(bary: vec2<f32>, px: f32, color: vec4<f32>, blend_color: vec4<f32>) -> vec4<f32> {
|
||||
let dist = distanceToWireframe(bary);
|
||||
if (antialiased) {
|
||||
let outer = dist;
|
||||
let inner = (px + (aa_px * 2.0)) - dist;
|
||||
let in_wireframe = outer >= 0.0 && inner >= 0.0;
|
||||
if (in_wireframe) {
|
||||
// Note: If this is the outer edge of the wireframe, we do not want to perform alpha
|
||||
// blending with the background blend color, since it is an antialiased edge and should
|
||||
// be transparent. However, if it is the internal edge of the wireframe, we do want to
|
||||
// perform alpha blending as it should be an overlay, not transparent.
|
||||
let is_outer_edge = outer < inner;
|
||||
if (is_outer_edge) {
|
||||
let alpha = smoothstep(0.0, 1.0, outer*(1.0 / aa_px));
|
||||
return vec4<f32>((color.rgb/color.a)*alpha, alpha);
|
||||
} else {
|
||||
let aa_inner = inner - aa_px;
|
||||
let alpha = smoothstep(0.0, 1.0, aa_inner*(1.0 / aa_px));
|
||||
let wireframe_color = vec4<f32>((color.rgb/color.a)*alpha, alpha);
|
||||
return alphaOver(wireframe_color, blend_color);
|
||||
}
|
||||
}
|
||||
return blend_color;
|
||||
} else {
|
||||
// If we're at the edge use the wireframe color, otherwise use the background blend_color.
|
||||
return select(blend_color, color, (px - dist) >= 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
// Calculates the color for a curve, taking into account antialiasing and alpha blending with
|
||||
// the desired background blend color.
|
||||
//
|
||||
// inversion: concave (-1.0) or convex (1.0)
|
||||
// is_semicircle: quadratic bezier (false) or semicircle (true)
|
||||
fn curveColor(
|
||||
bary: vec2<f32>,
|
||||
border_px: f32,
|
||||
border_color: vec4<f32>,
|
||||
blend_color: vec4<f32>,
|
||||
inversion: f32,
|
||||
is_semicircle: bool,
|
||||
) -> vec4<f32> {
|
||||
let dist = select(
|
||||
distanceToQuadratic(bary),
|
||||
distanceToSemicircle(bary),
|
||||
is_semicircle,
|
||||
) * inversion;
|
||||
let is_inverted = (inversion + 1.0) / 2.0; // 1.0 if inverted, 0.0 otherwise
|
||||
|
||||
if (antialiased) {
|
||||
let outer = dist + ((border_px + (aa_px * 2.0)) * is_inverted); // bottom
|
||||
let inner = ((border_px + (aa_px * 2.0)) * (1.0-is_inverted)) - dist; // top
|
||||
let in_border = outer >= 0.0 && inner >= 0.0;
|
||||
if (in_border) {
|
||||
// Note: If this is the outer edge of the curve, we do not want to perform alpha
|
||||
// blending with the background blend color, since it is an antialiased edge and should
|
||||
// be transparent. However, if it is the internal edge of the curve, we do want to
|
||||
// perform alpha blending as it should be an overlay, not transparent.
|
||||
let is_outer_edge = outer < inner;
|
||||
if (is_outer_edge) {
|
||||
let aa_outer = outer - (aa_px * is_inverted);
|
||||
let alpha = smoothstep(0.0, 1.0, aa_outer*(1.0 / aa_px));
|
||||
return vec4<f32>((border_color.rgb/border_color.a)*alpha, alpha);
|
||||
} else {
|
||||
let aa_inner = inner - (aa_px * (1.0 - is_inverted));
|
||||
let alpha = smoothstep(0.0, 1.0, aa_inner*(1.0 / aa_px));
|
||||
let new_border_color = vec4<f32>((border_color.rgb/border_color.a)*alpha, alpha);
|
||||
return alphaOver(new_border_color, blend_color);
|
||||
}
|
||||
return border_color;
|
||||
} else if (outer >= 0.0) {
|
||||
return blend_color;
|
||||
} else {
|
||||
return vec4<f32>(0.0);
|
||||
}
|
||||
} else {
|
||||
let outer = dist + (border_px * is_inverted);
|
||||
let inner = (border_px * (1.0-is_inverted)) - dist;
|
||||
let in_border = outer >= 0.0 && inner >= 0.0;
|
||||
if (in_border) {
|
||||
return border_color;
|
||||
} else if (outer >= 0.0) {
|
||||
return blend_color;
|
||||
} else {
|
||||
return vec4<f32>(0.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,146 +0,0 @@
|
|||
//! At the moment we use only rgba32, but maybe it could be useful to use also other types
|
||||
|
||||
const std = @import("std");
|
||||
const mach = @import("mach");
|
||||
const ft = @import("freetype");
|
||||
const zigimg = @import("zigimg");
|
||||
const Atlas = mach.gfx.Atlas;
|
||||
const AtlasErr = Atlas.Error;
|
||||
const AtlasUV = Atlas.Region.UV;
|
||||
const App = @import("main.zig").App;
|
||||
const draw = @import("draw.zig");
|
||||
|
||||
pub const Label = @This();
|
||||
|
||||
const Vec2 = @Vector(2, f32);
|
||||
const Vec4 = @Vector(4, f32);
|
||||
|
||||
const GlyphInfo = struct {
|
||||
uv_data: AtlasUV,
|
||||
metrics: ft.GlyphMetrics,
|
||||
};
|
||||
|
||||
face: ft.Face,
|
||||
size: i32,
|
||||
char_map: std.AutoHashMap(u21, GlyphInfo),
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
const WriterContext = struct {
|
||||
label: *Label,
|
||||
app: *App,
|
||||
position: Vec2,
|
||||
text_color: Vec4,
|
||||
};
|
||||
const WriterError = ft.Error || std.mem.Allocator.Error || AtlasErr;
|
||||
const Writer = std.io.Writer(WriterContext, WriterError, write);
|
||||
|
||||
pub fn writer(label: *Label, app: *App, position: Vec2, text_color: Vec4) Writer {
|
||||
return Writer{
|
||||
.context = .{
|
||||
.label = label,
|
||||
.app = app,
|
||||
.position = position,
|
||||
.text_color = text_color,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn init(lib: ft.Library, font_path: [*:0]const u8, face_index: i32, char_size: i32, allocator: std.mem.Allocator) !Label {
|
||||
return Label{
|
||||
.face = try lib.createFace(font_path, face_index),
|
||||
.size = char_size,
|
||||
.char_map = std.AutoHashMap(u21, GlyphInfo).init(allocator),
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(label: *Label) void {
|
||||
label.face.deinit();
|
||||
label.char_map.deinit();
|
||||
}
|
||||
|
||||
fn write(ctx: WriterContext, bytes: []const u8) WriterError!usize {
|
||||
var offset = Vec2{ 0, 0 };
|
||||
var j: usize = 0;
|
||||
while (j < bytes.len) {
|
||||
const len = std.unicode.utf8ByteSequenceLength(bytes[j]) catch unreachable;
|
||||
const char = std.unicode.utf8Decode(bytes[j..(j + len)]) catch unreachable;
|
||||
j += len;
|
||||
switch (char) {
|
||||
'\n' => {
|
||||
offset[0] = 0;
|
||||
offset[1] -= @as(f32, @floatFromInt(ctx.label.face.size().metrics().height >> 6));
|
||||
},
|
||||
' ' => {
|
||||
const v = try ctx.label.char_map.getOrPut(char);
|
||||
if (!v.found_existing) {
|
||||
try ctx.label.face.setCharSize(ctx.label.size * 64, 0, 50, 0);
|
||||
try ctx.label.face.loadChar(char, .{ .render = true });
|
||||
const glyph = ctx.label.face.glyph();
|
||||
v.value_ptr.* = GlyphInfo{
|
||||
.uv_data = undefined,
|
||||
.metrics = glyph.metrics(),
|
||||
};
|
||||
}
|
||||
offset[0] += @as(f32, @floatFromInt(v.value_ptr.metrics.horiAdvance >> 6));
|
||||
},
|
||||
else => {
|
||||
const v = try ctx.label.char_map.getOrPut(char);
|
||||
if (!v.found_existing) {
|
||||
try ctx.label.face.setCharSize(ctx.label.size * 64, 0, 50, 0);
|
||||
try ctx.label.face.loadChar(char, .{ .render = true });
|
||||
const glyph = ctx.label.face.glyph();
|
||||
const glyph_bitmap = glyph.bitmap();
|
||||
const glyph_width = glyph_bitmap.width();
|
||||
const glyph_height = glyph_bitmap.rows();
|
||||
|
||||
// Add 1 pixel padding to texture to avoid bleeding over other textures
|
||||
const glyph_data = try ctx.label.allocator.alloc(zigimg.color.Rgba32, (glyph_width + 2) * (glyph_height + 2));
|
||||
defer ctx.label.allocator.free(glyph_data);
|
||||
const glyph_buffer = glyph_bitmap.buffer().?;
|
||||
for (glyph_data, 0..) |*data, i| {
|
||||
const x = i % (glyph_width + 2);
|
||||
const y = i / (glyph_width + 2);
|
||||
|
||||
// zig fmt: off
|
||||
const glyph_col =
|
||||
if (x == 0 or x == (glyph_width + 1) or y == 0 or y == (glyph_height + 1))
|
||||
0
|
||||
else
|
||||
glyph_buffer[(y - 1) * glyph_width + (x - 1)];
|
||||
// zig fmt: on
|
||||
|
||||
data.* = zigimg.color.Rgba32.initRgb(glyph_col, glyph_col, glyph_col);
|
||||
}
|
||||
var glyph_atlas_region = try ctx.app.texture_atlas_data.reserve(ctx.label.allocator, glyph_width + 2, glyph_height + 2);
|
||||
ctx.app.texture_atlas_data.set(glyph_atlas_region, @as([*]const u8, @ptrCast(glyph_data.ptr))[0 .. glyph_data.len * 4]);
|
||||
|
||||
glyph_atlas_region.x += 1;
|
||||
glyph_atlas_region.y += 1;
|
||||
glyph_atlas_region.width -= 2;
|
||||
glyph_atlas_region.height -= 2;
|
||||
|
||||
v.value_ptr.* = GlyphInfo{
|
||||
.uv_data = glyph_atlas_region.calculateUV(ctx.app.texture_atlas_data.size),
|
||||
.metrics = glyph.metrics(),
|
||||
};
|
||||
}
|
||||
|
||||
try draw.quad(
|
||||
ctx.app,
|
||||
ctx.position + offset + Vec2{ @as(f32, @floatFromInt(v.value_ptr.metrics.horiBearingX >> 6)), @as(f32, @floatFromInt((v.value_ptr.metrics.horiBearingY - v.value_ptr.metrics.height) >> 6)) },
|
||||
.{ @as(f32, @floatFromInt(v.value_ptr.metrics.width >> 6)), @as(f32, @floatFromInt(v.value_ptr.metrics.height >> 6)) },
|
||||
.{ .blend_color = ctx.text_color },
|
||||
v.value_ptr.uv_data,
|
||||
);
|
||||
offset[0] += @as(f32, @floatFromInt(v.value_ptr.metrics.horiAdvance >> 6));
|
||||
},
|
||||
}
|
||||
}
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn print(label: *Label, app: *App, comptime fmt: []const u8, args: anytype, position: Vec2, text_color: Vec4) !void {
|
||||
const w = writer(label, app, position, text_color);
|
||||
try w.print(fmt, args);
|
||||
}
|
||||
|
|
@ -1,339 +0,0 @@
|
|||
// TODO:
|
||||
// - handle textures better with texture atlas
|
||||
// - handle adding and removing triangles and quads better
|
||||
|
||||
const std = @import("std");
|
||||
const mach = @import("mach");
|
||||
const core = mach.core;
|
||||
const ft = @import("freetype");
|
||||
const zigimg = @import("zigimg");
|
||||
const assets = @import("assets");
|
||||
const draw = @import("draw.zig");
|
||||
const Label = @import("label.zig");
|
||||
const ResizableLabel = @import("resizable_label.zig");
|
||||
const gpu = mach.gpu;
|
||||
const Atlas = mach.gfx.Atlas;
|
||||
|
||||
pub const App = @This();
|
||||
|
||||
pipeline: *gpu.RenderPipeline,
|
||||
vertex_buffer: *gpu.Buffer,
|
||||
vertices: std.ArrayList(draw.Vertex),
|
||||
update_vertex_buffer: bool,
|
||||
vertex_uniform_buffer: *gpu.Buffer,
|
||||
update_vertex_uniform_buffer: bool,
|
||||
frag_uniform_buffer: *gpu.Buffer,
|
||||
fragment_uniform_list: std.ArrayList(draw.FragUniform),
|
||||
update_frag_uniform_buffer: bool,
|
||||
bind_group: *gpu.BindGroup,
|
||||
texture_atlas_data: Atlas,
|
||||
|
||||
pub fn init(app: *App) !void {
|
||||
try core.init(.{});
|
||||
|
||||
// TODO: Refactor texture atlas size number
|
||||
app.texture_atlas_data = try Atlas.init(
|
||||
core.allocator,
|
||||
1280,
|
||||
.rgba,
|
||||
);
|
||||
const atlas_size = gpu.Extent3D{ .width = app.texture_atlas_data.size, .height = app.texture_atlas_data.size };
|
||||
|
||||
const texture = core.device.createTexture(&.{
|
||||
.size = atlas_size,
|
||||
.format = .rgba8_unorm,
|
||||
.usage = .{
|
||||
.texture_binding = true,
|
||||
.copy_dst = true,
|
||||
.render_attachment = true,
|
||||
},
|
||||
});
|
||||
const data_layout = gpu.Texture.DataLayout{
|
||||
.bytes_per_row = @as(u32, @intCast(atlas_size.width * 4)),
|
||||
.rows_per_image = @as(u32, @intCast(atlas_size.height)),
|
||||
};
|
||||
|
||||
var img = try zigimg.Image.fromMemory(core.allocator, assets.gotta_go_fast_png);
|
||||
defer img.deinit();
|
||||
|
||||
const atlas_img_region = try app.texture_atlas_data.reserve(core.allocator, @as(u32, @truncate(img.width)), @as(u32, @truncate(img.height)));
|
||||
const img_uv_data = atlas_img_region.calculateUV(app.texture_atlas_data.size);
|
||||
|
||||
switch (img.pixels) {
|
||||
.rgba32 => |pixels| app.texture_atlas_data.set(
|
||||
atlas_img_region,
|
||||
@as([*]const u8, @ptrCast(pixels.ptr))[0 .. pixels.len * 4],
|
||||
),
|
||||
.rgb24 => |pixels| {
|
||||
const data = try rgb24ToRgba32(core.allocator, pixels);
|
||||
defer data.deinit(core.allocator);
|
||||
app.texture_atlas_data.set(
|
||||
atlas_img_region,
|
||||
@as([*]const u8, @ptrCast(data.rgba32.ptr))[0 .. data.rgba32.len * 4],
|
||||
);
|
||||
},
|
||||
else => @panic("unsupported image color format"),
|
||||
}
|
||||
|
||||
const white_tex_scale = 80;
|
||||
var atlas_white_region = try app.texture_atlas_data.reserve(core.allocator, white_tex_scale, white_tex_scale);
|
||||
atlas_white_region.x += 1;
|
||||
atlas_white_region.y += 1;
|
||||
atlas_white_region.width -= 2;
|
||||
atlas_white_region.height -= 2;
|
||||
const white_texture_uv_data = atlas_white_region.calculateUV(app.texture_atlas_data.size);
|
||||
const white_tex_data = try core.allocator.alloc(zigimg.color.Rgba32, white_tex_scale * white_tex_scale);
|
||||
defer core.allocator.free(white_tex_data);
|
||||
@memset(white_tex_data, zigimg.color.Rgba32.initRgb(0xff, 0xff, 0xff));
|
||||
app.texture_atlas_data.set(atlas_white_region, @as([*]const u8, @ptrCast(white_tex_data.ptr))[0 .. white_tex_data.len * 4]);
|
||||
|
||||
app.vertices = try std.ArrayList(draw.Vertex).initCapacity(core.allocator, 9);
|
||||
app.fragment_uniform_list = try std.ArrayList(draw.FragUniform).initCapacity(core.allocator, 3);
|
||||
|
||||
// Quick test for using freetype
|
||||
const lib = try ft.Library.init();
|
||||
defer lib.deinit();
|
||||
|
||||
const DemoMode = enum {
|
||||
gkurves,
|
||||
bitmap_text, // TODO: broken
|
||||
text,
|
||||
quad,
|
||||
circle,
|
||||
};
|
||||
const demo_mode: DemoMode = .gkurves;
|
||||
|
||||
core.queue.writeTexture(
|
||||
&.{ .texture = texture },
|
||||
&data_layout,
|
||||
&.{ .width = app.texture_atlas_data.size, .height = app.texture_atlas_data.size },
|
||||
app.texture_atlas_data.data,
|
||||
);
|
||||
|
||||
const wsize = core.size();
|
||||
const window_width = @as(f32, @floatFromInt(wsize.width));
|
||||
const window_height = @as(f32, @floatFromInt(wsize.height));
|
||||
const triangle_scale = 250;
|
||||
switch (demo_mode) {
|
||||
.gkurves => {
|
||||
try draw.equilateralTriangle(app, .{ window_width / 2, window_height / 1.9 }, triangle_scale, .{}, img_uv_data, 1.0);
|
||||
try draw.equilateralTriangle(app, .{ window_width / 2, window_height / 1.9 - triangle_scale }, triangle_scale, .{ .type = .quadratic_concave }, img_uv_data, 1.0);
|
||||
try draw.equilateralTriangle(app, .{ window_width / 2 - triangle_scale, window_height / 1.9 }, triangle_scale, .{ .type = .quadratic_convex }, white_texture_uv_data, 1.0);
|
||||
try draw.equilateralTriangle(app, .{ window_width / 2 - triangle_scale, window_height / 1.9 - triangle_scale }, triangle_scale, .{ .type = .quadratic_convex }, white_texture_uv_data, 0.5);
|
||||
},
|
||||
else => @panic("disabled for now"),
|
||||
// TODO: disabled for now because these rely on a Label API that expects a font filepath,
|
||||
// rather than bytes. This gkurve example / experiment test bed should probably be moved
|
||||
// elsewhere anyway.
|
||||
//
|
||||
// .bitmap_text => {
|
||||
// // const character = "Gotta-go-fast!\n0123456789\n~!@#$%^&*()_+è\n:\"<>?`-=[];',./";
|
||||
// // const character = "ABCDEFGHIJ\nKLMNOPQRST\nUVWXYZ";
|
||||
// const size_multiplier = 5;
|
||||
// var label = try Label.init(lib, assets.roboto_medium_ttf.path, 0, 110 * size_multiplier, core.allocator);
|
||||
// defer label.deinit();
|
||||
// try label.print(app, "All your game's bases are belong to us èçòà", .{}, @Vector(2, f32){ 0, 420 }, @Vector(4, f32){ 1, 1, 1, 1 });
|
||||
// try label.print(app, "wow!", .{}, @Vector(2, f32){ 70 * size_multiplier, 70 }, @Vector(4, f32){ 1, 1, 1, 1 });
|
||||
// },
|
||||
// .text => {
|
||||
// const character = "Gotta-go-fast!\n0123456789\n~!@#$%^&*()_+è\n:\"<>?`-=[];',./";
|
||||
// const size_multiplier = 5;
|
||||
|
||||
// var resizable_label: ResizableLabel = undefined;
|
||||
// try resizable_label.init(lib, assets.roboto_medium_ttf.path, 0, core.allocator, white_texture_uv_data);
|
||||
// defer resizable_label.deinit();
|
||||
// try resizable_label.print(app, character, .{}, @Vector(4, f32){ 20, 300, 0, 0 }, @Vector(4, f32){ 1, 1, 1, 1 }, 20 * size_multiplier);
|
||||
// // try resizable_label.print(app, "@", .{}, @Vector(4, f32){ 20, 150, 0, 0 }, @Vector(4, f32){ 1, 1, 1, 1 }, 130 * size_multiplier);
|
||||
// },
|
||||
.quad => {
|
||||
try draw.quad(app, .{ 0, 0 }, .{ 480, 480 }, .{}, .{ .x = 0, .y = 0, .width = 1, .height = 1 });
|
||||
},
|
||||
.circle => {
|
||||
try draw.circle(app, .{ window_width / 2, window_height / 2 }, window_height / 2 - 10, .{ 0, 0.5, 0.75, 1.0 }, white_texture_uv_data);
|
||||
},
|
||||
}
|
||||
|
||||
const vs_module = core.device.createShaderModuleWGSL("vert", @embedFile("vert.wgsl"));
|
||||
const fs_module = core.device.createShaderModuleWGSL("frag", @embedFile("frag.wgsl"));
|
||||
|
||||
const blend = gpu.BlendState{
|
||||
.color = .{
|
||||
.operation = .add,
|
||||
.src_factor = .src_alpha,
|
||||
.dst_factor = .one_minus_src_alpha,
|
||||
},
|
||||
.alpha = .{
|
||||
.operation = .add,
|
||||
.src_factor = .one,
|
||||
.dst_factor = .zero,
|
||||
},
|
||||
};
|
||||
|
||||
const color_target = gpu.ColorTargetState{
|
||||
.format = core.descriptor.format,
|
||||
.blend = &blend,
|
||||
.write_mask = gpu.ColorWriteMaskFlags.all,
|
||||
};
|
||||
const fragment = gpu.FragmentState.init(.{
|
||||
.module = fs_module,
|
||||
.entry_point = "main",
|
||||
.targets = &.{color_target},
|
||||
});
|
||||
|
||||
const vbgle = gpu.BindGroupLayout.Entry.buffer(0, .{ .vertex = true }, .uniform, true, 0);
|
||||
const fbgle = gpu.BindGroupLayout.Entry.buffer(1, .{ .fragment = true }, .read_only_storage, true, 0);
|
||||
const sbgle = gpu.BindGroupLayout.Entry.sampler(2, .{ .fragment = true }, .filtering);
|
||||
const tbgle = gpu.BindGroupLayout.Entry.texture(3, .{ .fragment = true }, .float, .dimension_2d, false);
|
||||
const bgl = core.device.createBindGroupLayout(
|
||||
&gpu.BindGroupLayout.Descriptor.init(.{
|
||||
.entries = &.{ vbgle, fbgle, sbgle, tbgle },
|
||||
}),
|
||||
);
|
||||
const bind_group_layouts = [_]*gpu.BindGroupLayout{bgl};
|
||||
const pipeline_layout = core.device.createPipelineLayout(&gpu.PipelineLayout.Descriptor.init(.{
|
||||
.bind_group_layouts = &bind_group_layouts,
|
||||
}));
|
||||
|
||||
const pipeline_descriptor = gpu.RenderPipeline.Descriptor{
|
||||
.fragment = &fragment,
|
||||
.layout = pipeline_layout,
|
||||
.vertex = gpu.VertexState.init(.{
|
||||
.module = vs_module,
|
||||
.entry_point = "main",
|
||||
.buffers = &.{draw.VERTEX_BUFFER_LAYOUT},
|
||||
}),
|
||||
};
|
||||
|
||||
const vertex_buffer = core.device.createBuffer(&.{
|
||||
.usage = .{ .copy_dst = true, .vertex = true },
|
||||
.size = @sizeOf(draw.Vertex) * app.vertices.items.len,
|
||||
.mapped_at_creation = .false,
|
||||
});
|
||||
|
||||
const vertex_uniform_buffer = core.device.createBuffer(&.{
|
||||
.usage = .{ .copy_dst = true, .uniform = true },
|
||||
.size = @sizeOf(draw.VertexUniform),
|
||||
.mapped_at_creation = .false,
|
||||
});
|
||||
|
||||
const frag_uniform_buffer = core.device.createBuffer(&.{
|
||||
.usage = .{ .copy_dst = true, .storage = true },
|
||||
.size = @sizeOf(draw.FragUniform) * app.fragment_uniform_list.items.len,
|
||||
.mapped_at_creation = .false,
|
||||
});
|
||||
|
||||
const sampler = core.device.createSampler(&.{
|
||||
// .mag_filter = .linear,
|
||||
// .min_filter = .linear,
|
||||
});
|
||||
|
||||
std.debug.assert((app.vertices.items.len / 3) == app.fragment_uniform_list.items.len);
|
||||
const bind_group = core.device.createBindGroup(
|
||||
&gpu.BindGroup.Descriptor.init(.{
|
||||
.layout = bgl,
|
||||
.entries = &.{
|
||||
gpu.BindGroup.Entry.buffer(0, vertex_uniform_buffer, 0, @sizeOf(draw.VertexUniform)),
|
||||
gpu.BindGroup.Entry.buffer(1, frag_uniform_buffer, 0, @sizeOf(draw.FragUniform) * app.fragment_uniform_list.items.len),
|
||||
gpu.BindGroup.Entry.sampler(2, sampler),
|
||||
gpu.BindGroup.Entry.textureView(3, texture.createView(&gpu.TextureView.Descriptor{ .dimension = .dimension_2d })),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
app.pipeline = core.device.createRenderPipeline(&pipeline_descriptor);
|
||||
app.vertex_buffer = vertex_buffer;
|
||||
app.vertex_uniform_buffer = vertex_uniform_buffer;
|
||||
app.frag_uniform_buffer = frag_uniform_buffer;
|
||||
app.bind_group = bind_group;
|
||||
app.update_vertex_buffer = true;
|
||||
app.update_vertex_uniform_buffer = true;
|
||||
app.update_frag_uniform_buffer = true;
|
||||
|
||||
vs_module.release();
|
||||
fs_module.release();
|
||||
pipeline_layout.release();
|
||||
bgl.release();
|
||||
}
|
||||
|
||||
pub fn deinit(app: *App) void {
|
||||
defer core.deinit();
|
||||
|
||||
app.vertex_buffer.release();
|
||||
app.vertex_uniform_buffer.release();
|
||||
app.frag_uniform_buffer.release();
|
||||
app.bind_group.release();
|
||||
app.vertices.deinit();
|
||||
app.fragment_uniform_list.deinit();
|
||||
app.texture_atlas_data.deinit(core.allocator);
|
||||
}
|
||||
|
||||
pub fn update(app: *App) !bool {
|
||||
var iter = core.pollEvents();
|
||||
while (iter.next()) |event| {
|
||||
switch (event) {
|
||||
.key_press => |ev| {
|
||||
if (ev.key == .space) return true;
|
||||
},
|
||||
.framebuffer_resize => {
|
||||
app.update_vertex_uniform_buffer = true;
|
||||
},
|
||||
.close => return true,
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
const back_buffer_view = core.swap_chain.getCurrentTextureView().?;
|
||||
const color_attachment = gpu.RenderPassColorAttachment{
|
||||
.view = back_buffer_view,
|
||||
.clear_value = std.mem.zeroes(gpu.Color),
|
||||
.load_op = .clear,
|
||||
.store_op = .store,
|
||||
};
|
||||
|
||||
const encoder = core.device.createCommandEncoder(null);
|
||||
const render_pass_info = gpu.RenderPassDescriptor.init(.{
|
||||
.color_attachments = &.{color_attachment},
|
||||
});
|
||||
|
||||
{
|
||||
if (app.update_vertex_buffer) {
|
||||
encoder.writeBuffer(app.vertex_buffer, 0, app.vertices.items);
|
||||
app.update_vertex_buffer = false;
|
||||
}
|
||||
if (app.update_frag_uniform_buffer) {
|
||||
encoder.writeBuffer(app.frag_uniform_buffer, 0, app.fragment_uniform_list.items);
|
||||
app.update_frag_uniform_buffer = false;
|
||||
}
|
||||
if (app.update_vertex_uniform_buffer) {
|
||||
encoder.writeBuffer(app.vertex_uniform_buffer, 0, &[_]draw.VertexUniform{try draw.getVertexUniformBufferObject()});
|
||||
app.update_vertex_uniform_buffer = false;
|
||||
}
|
||||
}
|
||||
|
||||
const pass = encoder.beginRenderPass(&render_pass_info);
|
||||
pass.setPipeline(app.pipeline);
|
||||
pass.setVertexBuffer(0, app.vertex_buffer, 0, @sizeOf(draw.Vertex) * app.vertices.items.len);
|
||||
pass.setBindGroup(0, app.bind_group, &.{ 0, 0 });
|
||||
pass.draw(@as(u32, @truncate(app.vertices.items.len)), 1, 0, 0);
|
||||
pass.end();
|
||||
pass.release();
|
||||
|
||||
var command = encoder.finish(null);
|
||||
encoder.release();
|
||||
|
||||
core.queue.submit(&[_]*gpu.CommandBuffer{command});
|
||||
command.release();
|
||||
core.swap_chain.present();
|
||||
back_buffer_view.release();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
fn rgb24ToRgba32(allocator: std.mem.Allocator, in: []zigimg.color.Rgb24) !zigimg.color.PixelStorage {
|
||||
const out = try zigimg.color.PixelStorage.init(allocator, .rgba32, in.len);
|
||||
var i: usize = 0;
|
||||
while (i < in.len) : (i += 1) {
|
||||
out.rgba32[i] = zigimg.color.Rgba32{ .r = in[i].r, .g = in[i].g, .b = in[i].b, .a = 255 };
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
|
@ -1,555 +0,0 @@
|
|||
//! TODO: Refactor the API, maybe use a handle that contains the lib and other things and controls init and deinit of ft.Lib and other things
|
||||
|
||||
const std = @import("std");
|
||||
const mach = @import("mach");
|
||||
const ft = @import("freetype");
|
||||
const App = @import("main.zig").App;
|
||||
const Vertex = @import("draw.zig").Vertex;
|
||||
const math = mach.math;
|
||||
const earcut = mach.earcut;
|
||||
const Atlas = mach.gfx.Atlas;
|
||||
const AtlasErr = Atlas.Error;
|
||||
const AtlasUV = Atlas.Region.UV;
|
||||
|
||||
// If true, show the filled triangles green, the concave beziers blue and the convex ones red
|
||||
const debug_colors = false;
|
||||
|
||||
pub const ResizableLabel = @This();
|
||||
|
||||
const Vec2 = @Vector(2, f32);
|
||||
const Vec4 = @Vector(4, f32);
|
||||
const VertexList = std.ArrayList(Vertex);
|
||||
|
||||
// All the data that a single character needs to be rendered
|
||||
// TODO: hori/vert advance, write file format
|
||||
const CharVertices = struct {
|
||||
filled_vertices: VertexList,
|
||||
filled_vertices_indices: std.ArrayList(u16),
|
||||
// Concave vertices belong to the filled_vertices list, so just index them
|
||||
concave_vertices: std.ArrayList(u16),
|
||||
// The point outside of the convex bezier, doesn't belong to the filled vertices,
|
||||
// But the other two points do, so put those in the indices
|
||||
convex_vertices: VertexList,
|
||||
convex_vertices_indices: std.ArrayList(u16),
|
||||
|
||||
fn deinit(self: CharVertices) void {
|
||||
self.filled_vertices.deinit();
|
||||
self.filled_vertices_indices.deinit();
|
||||
self.concave_vertices.deinit();
|
||||
self.convex_vertices.deinit();
|
||||
self.convex_vertices_indices.deinit();
|
||||
}
|
||||
};
|
||||
|
||||
face: ft.Face,
|
||||
char_map: std.AutoHashMap(u21, CharVertices),
|
||||
allocator: std.mem.Allocator,
|
||||
tessellator: earcut.Processor(f32),
|
||||
white_texture: AtlasUV,
|
||||
|
||||
// The data that the write function needs
|
||||
// TODO: move twxture here, don't limit to just white_texture
|
||||
const WriterContext = struct {
|
||||
label: *ResizableLabel,
|
||||
app: *App,
|
||||
position: Vec4,
|
||||
text_color: Vec4,
|
||||
text_size: u32,
|
||||
};
|
||||
const WriterError = ft.Error || std.mem.Allocator.Error || AtlasErr;
|
||||
const Writer = std.io.Writer(WriterContext, WriterError, write);
|
||||
|
||||
pub fn writer(label: *ResizableLabel, app: *App, position: Vec4, text_color: Vec4, text_size: u32) Writer {
|
||||
return Writer{
|
||||
.context = .{
|
||||
.label = label,
|
||||
.app = app,
|
||||
.position = position,
|
||||
.text_color = text_color,
|
||||
.text_size = text_size,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn init(self: *ResizableLabel, lib: ft.Library, font_path: [*:0]const u8, face_index: i32, allocator: std.mem.Allocator, white_texture: AtlasUV) !void {
|
||||
self.* = ResizableLabel{
|
||||
.face = try lib.createFace(font_path, face_index),
|
||||
.char_map = std.AutoHashMap(u21, CharVertices).init(allocator),
|
||||
.allocator = allocator,
|
||||
.tessellator = earcut.Processor(f32){},
|
||||
.white_texture = white_texture,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(label: *ResizableLabel) void {
|
||||
label.face.deinit();
|
||||
label.tessellator.deinit(label.allocator);
|
||||
|
||||
var iter = label.char_map.valueIterator();
|
||||
while (iter.next()) |ptr| {
|
||||
ptr.deinit();
|
||||
}
|
||||
|
||||
label.char_map.deinit();
|
||||
}
|
||||
|
||||
// TODO: handle offsets
|
||||
// FIXME: many useless allocations for the arraylists
|
||||
fn write(ctx: WriterContext, bytes: []const u8) WriterError!usize {
|
||||
var offset = Vec4{ 0, 0, 0, 0 };
|
||||
var c: usize = 0;
|
||||
while (c < bytes.len) {
|
||||
const len = std.unicode.utf8ByteSequenceLength(bytes[c]) catch unreachable;
|
||||
const char = std.unicode.utf8Decode(bytes[c..(c + len)]) catch unreachable;
|
||||
c += len;
|
||||
switch (char) {
|
||||
'\n' => {
|
||||
offset[0] = 0;
|
||||
offset[1] -= @as(f32, @floatFromInt(ctx.label.face.glyph().metrics().vertAdvance)) * (@as(f32, @floatFromInt(ctx.text_size)) / 1024);
|
||||
},
|
||||
' ' => {
|
||||
@panic("TODO: Space character not implemented yet");
|
||||
// const v = try ctx.label.char_map.getOrPut(char);
|
||||
// if (!v.found_existing) {
|
||||
// try ctx.label.face.setCharSize(ctx.label.size * 64, 0, 50, 0);
|
||||
// try ctx.label.face.loadChar(char, .{ .render = true });
|
||||
// const glyph = ctx.label.face.glyph;
|
||||
// v.value_ptr.* = GlyphInfo{
|
||||
// .uv_data = undefined,
|
||||
// .metrics = glyph.metrics(),
|
||||
// };
|
||||
// }
|
||||
// offset[0] += @intToFloat(f32, v.value_ptr.metrics.horiAdvance >> 6);
|
||||
},
|
||||
else => {
|
||||
const v = try ctx.label.char_map.getOrPut(char);
|
||||
if (!v.found_existing) {
|
||||
try ctx.label.face.loadChar(char, .{ .no_scale = true, .no_bitmap = true });
|
||||
const glyph = ctx.label.face.glyph();
|
||||
|
||||
// Use a big scale and then scale to the actual text size
|
||||
const multiplier = 1024 << 6;
|
||||
const matrix = ft.Matrix{
|
||||
.xx = 1 * multiplier,
|
||||
.xy = 0 * multiplier,
|
||||
.yx = 0 * multiplier,
|
||||
.yy = 1 * multiplier,
|
||||
};
|
||||
glyph.outline().?.transform(matrix);
|
||||
|
||||
v.value_ptr.* = CharVertices{
|
||||
.filled_vertices = VertexList.init(ctx.label.allocator),
|
||||
.filled_vertices_indices = std.ArrayList(u16).init(ctx.label.allocator),
|
||||
.concave_vertices = std.ArrayList(u16).init(ctx.label.allocator),
|
||||
.convex_vertices = VertexList.init(ctx.label.allocator),
|
||||
.convex_vertices_indices = std.ArrayList(u16).init(ctx.label.allocator),
|
||||
};
|
||||
|
||||
var outline_ctx = OutlineContext{
|
||||
.outline_verts = std.ArrayList(std.ArrayList(Vec2)).init(ctx.label.allocator),
|
||||
.inside_verts = std.ArrayList(Vec2).init(ctx.label.allocator),
|
||||
.concave_vertices = std.ArrayList(Vec2).init(ctx.label.allocator),
|
||||
.convex_vertices = std.ArrayList(Vec2).init(ctx.label.allocator),
|
||||
};
|
||||
defer outline_ctx.outline_verts.deinit();
|
||||
defer {
|
||||
for (outline_ctx.outline_verts.items) |*item| item.deinit();
|
||||
}
|
||||
defer outline_ctx.inside_verts.deinit();
|
||||
defer outline_ctx.concave_vertices.deinit();
|
||||
defer outline_ctx.convex_vertices.deinit();
|
||||
|
||||
const callbacks = ft.Outline.Funcs(*OutlineContext){
|
||||
.move_to = moveToFunction,
|
||||
.line_to = lineToFunction,
|
||||
.conic_to = conicToFunction,
|
||||
.cubic_to = cubicToFunction,
|
||||
.shift = 0,
|
||||
.delta = 0,
|
||||
};
|
||||
try ctx.label.face.glyph().outline().?.decompose(&outline_ctx, callbacks);
|
||||
uniteOutsideAndInsideVertices(&outline_ctx);
|
||||
|
||||
// Tessellator.triangulatePolygons() doesn't seem to work, so just
|
||||
// call triangulatePolygon() for each polygon, and put the results all
|
||||
// in all_outlines and all_indices
|
||||
var all_outlines = std.ArrayList(Vec2).init(ctx.label.allocator);
|
||||
defer all_outlines.deinit();
|
||||
var all_indices = std.ArrayList(u16).init(ctx.label.allocator);
|
||||
defer all_indices.deinit();
|
||||
var idx_offset: u16 = 0;
|
||||
for (outline_ctx.outline_verts.items) |item| {
|
||||
if (item.items.len == 0) continue;
|
||||
// TODO(gkurve): don't discard this, make tessellator use Vec2 / avoid copy?
|
||||
var polygon = std.ArrayListUnmanaged(f32){};
|
||||
defer polygon.deinit(ctx.label.allocator);
|
||||
if (ctx.label.face.glyph().outline().?.orientation() == .truetype) {
|
||||
// TrueType orientation has clockwise contours, so reverse the list
|
||||
// since we need CCW.
|
||||
var i = item.items.len - 1;
|
||||
while (i > 0) : (i -= 1) {
|
||||
try polygon.append(ctx.label.allocator, item.items[i][0]);
|
||||
try polygon.append(ctx.label.allocator, item.items[i][1]);
|
||||
}
|
||||
} else {
|
||||
for (item.items) |vert| {
|
||||
try polygon.append(ctx.label.allocator, vert[0]);
|
||||
try polygon.append(ctx.label.allocator, vert[1]);
|
||||
}
|
||||
}
|
||||
|
||||
try ctx.label.tessellator.process(ctx.label.allocator, polygon.items, null, 2);
|
||||
|
||||
for (ctx.label.tessellator.triangles.items) |idx| {
|
||||
try all_outlines.append(Vec2{ polygon.items[idx * 2], polygon.items[(idx * 2) + 1] });
|
||||
try all_indices.append(@as(u16, @intCast((idx * 2) + idx_offset)));
|
||||
}
|
||||
idx_offset += @as(u16, @intCast(ctx.label.tessellator.triangles.items.len));
|
||||
}
|
||||
|
||||
for (all_outlines.items) |item| {
|
||||
// FIXME: The uv_data is wrong, should be pushed up by the lowest a character can be
|
||||
const vertex_uv = item / math.vec.splat(@Vector(2, f32), 1024 << 6);
|
||||
const vertex_pos = Vec4{ item[0], item[1], 0, 1 };
|
||||
try v.value_ptr.filled_vertices.append(Vertex{ .pos = vertex_pos, .uv = vertex_uv });
|
||||
}
|
||||
try v.value_ptr.filled_vertices_indices.appendSlice(all_indices.items);
|
||||
|
||||
// TODO(gkurve): could more optimally find index (e.g. already know it from
|
||||
// data structure, instead of finding equal point.)
|
||||
for (outline_ctx.concave_vertices.items) |concave_control| {
|
||||
for (all_outlines.items, 0..) |item, j| {
|
||||
if (vec2Equal(item, concave_control)) {
|
||||
try v.value_ptr.concave_vertices.append(@as(u16, @truncate(j)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std.debug.assert((outline_ctx.convex_vertices.items.len % 3) == 0);
|
||||
var i: usize = 0;
|
||||
while (i < outline_ctx.convex_vertices.items.len) : (i += 3) {
|
||||
const vert = outline_ctx.convex_vertices.items[i];
|
||||
const vertex_uv = vert / math.vec.splat(@Vector(2, f32), 1024 << 6);
|
||||
const vertex_pos = Vec4{ vert[0], vert[1], 0, 1 };
|
||||
try v.value_ptr.convex_vertices.append(Vertex{ .pos = vertex_pos, .uv = vertex_uv });
|
||||
|
||||
var found: usize = 0;
|
||||
for (all_outlines.items, 0..) |item, j| {
|
||||
if (vec2Equal(item, outline_ctx.convex_vertices.items[i + 1])) {
|
||||
try v.value_ptr.convex_vertices_indices.append(@as(u16, @truncate(j)));
|
||||
found += 1;
|
||||
}
|
||||
if (vec2Equal(item, outline_ctx.convex_vertices.items[i + 2])) {
|
||||
try v.value_ptr.convex_vertices_indices.append(@as(u16, @truncate(j)));
|
||||
found += 1;
|
||||
}
|
||||
if (found == 2) break;
|
||||
}
|
||||
std.debug.assert(found == 2);
|
||||
}
|
||||
std.debug.assert(((v.value_ptr.convex_vertices.items.len + v.value_ptr.convex_vertices_indices.items.len) % 3) == 0);
|
||||
}
|
||||
|
||||
// Read the data and apply resizing of pos and uv
|
||||
const filled_vertices_after_offset = try ctx.label.allocator.alloc(Vertex, v.value_ptr.filled_vertices.items.len);
|
||||
defer ctx.label.allocator.free(filled_vertices_after_offset);
|
||||
for (filled_vertices_after_offset, 0..) |*vert, i| {
|
||||
vert.* = v.value_ptr.filled_vertices.items[i];
|
||||
vert.pos *= Vec4{ @as(f32, @floatFromInt(ctx.text_size)) / 1024, @as(f32, @floatFromInt(ctx.text_size)) / 1024, 0, 1 };
|
||||
vert.pos += ctx.position + offset;
|
||||
vert.uv = .{
|
||||
vert.uv[0] * ctx.label.white_texture.width + ctx.label.white_texture.x,
|
||||
vert.uv[1] * ctx.label.white_texture.height + ctx.label.white_texture.y,
|
||||
};
|
||||
}
|
||||
try ctx.app.vertices.appendSlice(filled_vertices_after_offset);
|
||||
|
||||
if (debug_colors) {
|
||||
try ctx.app.fragment_uniform_list.appendNTimes(.{ .blend_color = .{ 0, 1, 0, 1 } }, filled_vertices_after_offset.len / 3);
|
||||
} else {
|
||||
try ctx.app.fragment_uniform_list.appendNTimes(.{ .blend_color = ctx.text_color }, filled_vertices_after_offset.len / 3);
|
||||
}
|
||||
|
||||
var convex_vertices_after_offset = try ctx.label.allocator.alloc(Vertex, v.value_ptr.convex_vertices.items.len + v.value_ptr.convex_vertices_indices.items.len);
|
||||
defer ctx.label.allocator.free(convex_vertices_after_offset);
|
||||
var j: u16 = 0;
|
||||
var k: u16 = 0;
|
||||
var convex_vertices_consumed: usize = 0;
|
||||
while (j < convex_vertices_after_offset.len) : (j += 3) {
|
||||
convex_vertices_after_offset[j] = v.value_ptr.convex_vertices.items[j / 3];
|
||||
convex_vertices_consumed += 1;
|
||||
|
||||
convex_vertices_after_offset[j].pos *= Vec4{ @as(f32, @floatFromInt(ctx.text_size)) / 1024, @as(f32, @floatFromInt(ctx.text_size)) / 1024, 0, 1 };
|
||||
convex_vertices_after_offset[j].pos += ctx.position + offset;
|
||||
convex_vertices_after_offset[j].uv = .{
|
||||
convex_vertices_after_offset[j].uv[0] * ctx.label.white_texture.width + ctx.label.white_texture.x,
|
||||
convex_vertices_after_offset[j].uv[1] * ctx.label.white_texture.height + ctx.label.white_texture.y,
|
||||
};
|
||||
|
||||
convex_vertices_after_offset[j + 1] = filled_vertices_after_offset[v.value_ptr.convex_vertices_indices.items[k]];
|
||||
convex_vertices_after_offset[j + 2] = filled_vertices_after_offset[v.value_ptr.convex_vertices_indices.items[k + 1]];
|
||||
k += 2;
|
||||
}
|
||||
std.debug.assert(convex_vertices_consumed == v.value_ptr.convex_vertices.items.len);
|
||||
try ctx.app.vertices.appendSlice(convex_vertices_after_offset);
|
||||
|
||||
if (debug_colors) {
|
||||
try ctx.app.fragment_uniform_list.appendNTimes(.{ .type = .quadratic_convex, .blend_color = .{ 1, 0, 0, 1 } }, convex_vertices_after_offset.len / 3);
|
||||
} else {
|
||||
try ctx.app.fragment_uniform_list.appendNTimes(.{ .type = .quadratic_convex, .blend_color = ctx.text_color }, convex_vertices_after_offset.len / 3);
|
||||
}
|
||||
|
||||
const concave_vertices_after_offset = try ctx.label.allocator.alloc(Vertex, v.value_ptr.concave_vertices.items.len);
|
||||
defer ctx.label.allocator.free(concave_vertices_after_offset);
|
||||
for (concave_vertices_after_offset, 0..) |*vert, i| {
|
||||
vert.* = filled_vertices_after_offset[v.value_ptr.concave_vertices.items[i]];
|
||||
}
|
||||
try ctx.app.vertices.appendSlice(concave_vertices_after_offset);
|
||||
|
||||
if (debug_colors) {
|
||||
try ctx.app.fragment_uniform_list.appendNTimes(.{ .type = .quadratic_concave, .blend_color = .{ 0, 0, 1, 1 } }, concave_vertices_after_offset.len / 3);
|
||||
} else {
|
||||
try ctx.app.fragment_uniform_list.appendNTimes(.{ .type = .quadratic_concave, .blend_color = ctx.text_color }, concave_vertices_after_offset.len / 3);
|
||||
}
|
||||
|
||||
ctx.app.update_vertex_buffer = true;
|
||||
ctx.app.update_frag_uniform_buffer = true;
|
||||
|
||||
offset[0] += @as(f32, @floatFromInt(ctx.label.face.glyph().metrics().horiAdvance)) * (@as(f32, @floatFromInt(ctx.text_size)) / 1024);
|
||||
},
|
||||
}
|
||||
}
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
// First move to initialize the outline, (first point),
|
||||
// After many Q L or C, we will come back to the first point and then call M again if we need to hollow
|
||||
// On the second M, we instead use an L to connect the first point to the start of the hollow path.
|
||||
// We then follow like normal and at the end of the hollow path we use another L to close the path.
|
||||
|
||||
// This is basically how an o would be drawn, each ┌... character is a Vertex
|
||||
// ┌--------┐
|
||||
// | |
|
||||
// | |
|
||||
// | |
|
||||
// | ┌----┐ |
|
||||
// └-┘ | | Consider the vertices here and below to be at the same height, they are coincident
|
||||
// ┌-┐ | |
|
||||
// | └----┘ |
|
||||
// | |
|
||||
// | |
|
||||
// | |
|
||||
// └--------┘
|
||||
|
||||
const OutlineContext = struct {
|
||||
/// There may be more than one polygon (for example with 'i' we have the polygon of the base and
|
||||
/// another for the circle)
|
||||
outline_verts: std.ArrayList(std.ArrayList(Vec2)),
|
||||
|
||||
/// The internal outline, used for carving the shape. For example in 'a', we would first get the
|
||||
/// outline of the entire 'a', but if we stopped there, the center hole would be filled, so we
|
||||
/// need another outline for carving the filled polygon.
|
||||
inside_verts: std.ArrayList(Vec2),
|
||||
|
||||
/// For the concave (inner 'o') and convex (outer 'o') beziers
|
||||
concave_vertices: std.ArrayList(Vec2),
|
||||
convex_vertices: std.ArrayList(Vec2),
|
||||
};
|
||||
|
||||
/// If there are elements in inside_verts, unite them with the outline_verts, effectively carving
|
||||
/// the shape
|
||||
fn uniteOutsideAndInsideVertices(ctx: *OutlineContext) void {
|
||||
if (ctx.inside_verts.items.len != 0) {
|
||||
// Check which point of outline is closer to the first of inside
|
||||
var last_outline = &ctx.outline_verts.items[ctx.outline_verts.items.len - 1];
|
||||
if (last_outline.items.len == 0 and ctx.outline_verts.items.len >= 2) {
|
||||
last_outline = &ctx.outline_verts.items[ctx.outline_verts.items.len - 2];
|
||||
}
|
||||
std.debug.assert(last_outline.items.len != 0);
|
||||
const closest_to_inside: usize = blk: {
|
||||
const first_point_inside = ctx.inside_verts.items[0];
|
||||
var min = math.floatMax(f32);
|
||||
var closest_index: usize = undefined;
|
||||
|
||||
for (last_outline.items, 0..) |item, i| {
|
||||
const dist = @reduce(.Add, (item - first_point_inside) * (item - first_point_inside));
|
||||
if (dist < min) {
|
||||
min = dist;
|
||||
closest_index = i;
|
||||
}
|
||||
}
|
||||
break :blk closest_index;
|
||||
};
|
||||
|
||||
ctx.inside_verts.append(last_outline.items[closest_to_inside]) catch unreachable;
|
||||
last_outline.insertSlice(closest_to_inside + 1, ctx.inside_verts.items) catch unreachable;
|
||||
ctx.inside_verts.clearRetainingCapacity();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Return also allocation error
|
||||
fn moveToFunction(ctx: *OutlineContext, _to: ft.Vector) ft.Error!void {
|
||||
uniteOutsideAndInsideVertices(ctx);
|
||||
|
||||
const to = Vec2{ @as(f32, @floatFromInt(_to.x)), @as(f32, @floatFromInt(_to.y)) };
|
||||
|
||||
// To check wether a point is carving a polygon, use the point-in-polygon test to determine if
|
||||
// we're inside or outside of the polygon.
|
||||
const new_point_is_inside = pointInPolygon(to, ctx.outline_verts.items);
|
||||
|
||||
if (ctx.outline_verts.items.len == 0 or ctx.outline_verts.items[ctx.outline_verts.items.len - 1].items.len > 0) {
|
||||
// The last polygon we were building is now finished.
|
||||
const new_outline_list = std.ArrayList(Vec2).init(ctx.outline_verts.allocator);
|
||||
ctx.outline_verts.append(new_outline_list) catch unreachable;
|
||||
}
|
||||
|
||||
if (new_point_is_inside) {
|
||||
ctx.inside_verts.append(to) catch unreachable;
|
||||
} else {
|
||||
ctx.outline_verts.items[ctx.outline_verts.items.len - 1].append(to) catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
fn lineToFunction(ctx: *OutlineContext, to: ft.Vector) ft.Error!void {
|
||||
// std.log.info("L {} {}", .{ to.x, to.y });
|
||||
|
||||
// If inside_verts is not empty, we need to fill it
|
||||
if (ctx.inside_verts.items.len != 0) {
|
||||
ctx.inside_verts.append(.{ @as(f32, @floatFromInt(to.x)), @as(f32, @floatFromInt(to.y)) }) catch unreachable;
|
||||
} else {
|
||||
// Otherwise append the new point to the last polygon
|
||||
ctx.outline_verts.items[ctx.outline_verts.items.len - 1].append(.{ @as(f32, @floatFromInt(to.x)), @as(f32, @floatFromInt(to.y)) }) catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
/// Called to indicate that a quadratic bezier curve occured between the previous point on the glyph
|
||||
/// outline to the `_to` point on the path, with the specified `_control` quadratic bezier control
|
||||
/// point.
|
||||
fn conicToFunction(ctx: *OutlineContext, _control: ft.Vector, _to: ft.Vector) ft.Error!void {
|
||||
// std.log.info("C {} {} {} {}", .{ control.x, control.y, to.x, to.y });
|
||||
const control = Vec2{ @as(f32, @floatFromInt(_control.x)), @as(f32, @floatFromInt(_control.y)) };
|
||||
const to = Vec2{ @as(f32, @floatFromInt(_to.x)), @as(f32, @floatFromInt(_to.y)) };
|
||||
|
||||
// If our last point was inside the glyph (e.g. the hole in the letter 'o') then this is a
|
||||
// continuation of that path, and we should write this vertex to inside_verts. Otherwise we're
|
||||
// on the outside and the vertex should go in outline_verts.
|
||||
//
|
||||
// We derive if we're on the inside or outside based on whether inside_verts has items in it,
|
||||
// because only a lineTo callback can move us from the inside to the outside or vice-versa. A
|
||||
// quadratic bezier would *always* be the continuation of an inside or outside path.
|
||||
var verts_to_write = if (ctx.inside_verts.items.len != 0) &ctx.inside_verts else &ctx.outline_verts.items[ctx.outline_verts.items.len - 1];
|
||||
const previous_point = verts_to_write.items[verts_to_write.items.len - 1];
|
||||
|
||||
var vertices = [_]Vec2{ control, to, previous_point };
|
||||
|
||||
const vec1 = control - previous_point;
|
||||
const vec2 = to - control;
|
||||
|
||||
// CCW (convex) or CW (concave)?
|
||||
if ((vec1[0] * vec2[1] - vec1[1] * vec2[0]) <= 0) {
|
||||
// Convex
|
||||
ctx.convex_vertices.appendSlice(&vertices) catch unreachable;
|
||||
verts_to_write.append(to) catch unreachable;
|
||||
return;
|
||||
}
|
||||
|
||||
// Concave
|
||||
//
|
||||
// In this case, we need to write a vertex (for the filled triangle) to the quadratic
|
||||
// control point. However, since this is the concave case the control point could be outside
|
||||
// the shape itself. We need to ensure it is not, otherwise the triangle would end up filling
|
||||
// space outside the shape.
|
||||
//
|
||||
// Diagram: https://user-images.githubusercontent.com/3173176/189944586-bc1b109a-62c4-4ef5-a605-4c6a7e4a1abd.png
|
||||
//
|
||||
// To fix this, we must determine if the control point intersects with any of our outline
|
||||
// segments. If it does, we use that intersection point as the vertex. Otherwise, it doesn't go
|
||||
// past an outline segment and we can use the control point just fine.
|
||||
var intersection: ?Vec2 = null;
|
||||
for (ctx.outline_verts.items) |polygon| {
|
||||
var i: usize = 1;
|
||||
while (i < polygon.items.len) : (i += 1) {
|
||||
const v1 = polygon.items[i - 1];
|
||||
const v2 = polygon.items[i];
|
||||
if (vec2Equal(v1, previous_point) or vec2Equal(v1, control) or vec2Equal(v1, to) or vec2Equal(v2, previous_point) or vec2Equal(v2, control) or vec2Equal(v2, to)) continue;
|
||||
|
||||
intersection = intersectLineSegments(v1, v2, previous_point, control);
|
||||
if (intersection != null) break;
|
||||
}
|
||||
if (intersection != null) break;
|
||||
}
|
||||
|
||||
if (intersection) |intersect| {
|
||||
// TODO: properly scale control/intersection point a little bit towards the previous_point,
|
||||
// so our tessellator doesn't get confused about it being exactly on the path.
|
||||
//
|
||||
// TODO(gkurve): Moving this control point changes the bezier shape (obviously) which means
|
||||
// it is no longer true to the original shape. Need to fix this with some type of negative
|
||||
// border on the gkurve primitive.
|
||||
vertices[0] = Vec2{ intersect[0] * 0.99, intersect[1] * 0.99 };
|
||||
}
|
||||
ctx.concave_vertices.appendSlice(&vertices) catch unreachable;
|
||||
verts_to_write.append(vertices[0]) catch unreachable;
|
||||
verts_to_write.append(to) catch unreachable;
|
||||
}
|
||||
|
||||
// Doesn't seem to be used much
|
||||
fn cubicToFunction(ctx: *OutlineContext, control_0: ft.Vector, control_1: ft.Vector, to: ft.Vector) ft.Error!void {
|
||||
_ = ctx;
|
||||
_ = control_0;
|
||||
_ = control_1;
|
||||
_ = to;
|
||||
@panic("TODO: search how to approximate cubic bezier with quadratic ones");
|
||||
}
|
||||
|
||||
pub fn print(label: *ResizableLabel, app: *App, comptime fmt: []const u8, args: anytype, position: Vec4, text_color: Vec4, text_size: u32) !void {
|
||||
const w = writer(label, app, position, text_color, text_size);
|
||||
try w.print(fmt, args);
|
||||
}
|
||||
|
||||
/// Intersects the line segments [p0, p1] and [p2, p3], returning the intersection point if any.
|
||||
fn intersectLineSegments(p0: Vec2, p1: Vec2, p2: Vec2, p3: Vec2) ?Vec2 {
|
||||
const s1 = Vec2{ p1[0] - p0[0], p1[1] - p0[1] };
|
||||
const s2 = Vec2{ p3[0] - p2[0], p3[1] - p2[1] };
|
||||
const s = (-s1[1] * (p0[0] - p2[0]) + s1[0] * (p0[1] - p2[1])) / (-s2[0] * s1[1] + s1[0] * s2[1]);
|
||||
const t = (s2[0] * (p0[1] - p2[1]) - s2[1] * (p0[0] - p2[0])) / (-s2[0] * s1[1] + s1[0] * s2[1]);
|
||||
|
||||
if (s >= 0 and s <= 1 and t >= 0 and t <= 1) {
|
||||
// Collision
|
||||
return Vec2{ p0[0] + (t * s1[0]), p0[1] + (t * s1[1]) };
|
||||
}
|
||||
return null; // No collision
|
||||
}
|
||||
|
||||
fn intersectRayToLineSegment(ray_origin: Vec2, ray_direction: Vec2, p1: Vec2, p2: Vec2) ?Vec2 {
|
||||
return intersectLineSegments(ray_origin, ray_origin * (ray_direction * Vec2{ 10000000.0, 10000000.0 }), p1, p2);
|
||||
}
|
||||
|
||||
fn vec2Equal(a: Vec2, b: Vec2) bool {
|
||||
return a[0] == b[0] and a[1] == b[1];
|
||||
}
|
||||
|
||||
fn vec2CrossProduct(a: Vec2, b: Vec2) f32 {
|
||||
return (a[0] * b[1]) - (a[1] * b[0]);
|
||||
}
|
||||
|
||||
fn pointInPolygon(p: Vec2, polygon: []std.ArrayList(Vec2)) bool {
|
||||
// Cast a ray to the right of the point and check
|
||||
// when this ray intersects the edges of the polygons,
|
||||
// if the number of intersections is odd -> inside,
|
||||
// if it's even -> outside
|
||||
var is_inside = false;
|
||||
for (polygon) |contour| {
|
||||
var i: usize = 1;
|
||||
while (i < contour.items.len) : (i += 1) {
|
||||
const v1 = contour.items[i - 1];
|
||||
const v2 = contour.items[i];
|
||||
|
||||
if (intersectRayToLineSegment(p, Vec2{ 1, p[1] }, v1, v2)) |_| {
|
||||
is_inside = !is_inside;
|
||||
}
|
||||
}
|
||||
}
|
||||
return is_inside;
|
||||
}
|
||||
|
|
@ -1,314 +0,0 @@
|
|||
// Copied from zig/src/tracy.zig
|
||||
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
// TODO: integrate with tracy?
|
||||
// const build_options = @import("build_options");
|
||||
|
||||
// pub const enable = if (builtin.is_test) false else build_options.enable_tracy;
|
||||
// pub const enable_allocation = enable and build_options.enable_tracy_allocation;
|
||||
// pub const enable_callstack = enable and build_options.enable_tracy_callstack;
|
||||
pub const enable = false;
|
||||
pub const enable_allocation = enable and false;
|
||||
pub const enable_callstack = enable and false;
|
||||
|
||||
// TODO: make this configurable
|
||||
const callstack_depth = 10;
|
||||
|
||||
const ___tracy_c_zone_context = extern struct {
|
||||
id: u32,
|
||||
active: c_int,
|
||||
|
||||
pub inline fn end(self: @This()) void {
|
||||
___tracy_emit_zone_end(self);
|
||||
}
|
||||
|
||||
pub inline fn addText(self: @This(), text: []const u8) void {
|
||||
___tracy_emit_zone_text(self, text.ptr, text.len);
|
||||
}
|
||||
|
||||
pub inline fn setName(self: @This(), name: []const u8) void {
|
||||
___tracy_emit_zone_name(self, name.ptr, name.len);
|
||||
}
|
||||
|
||||
pub inline fn setColor(self: @This(), color: u32) void {
|
||||
___tracy_emit_zone_color(self, color);
|
||||
}
|
||||
|
||||
pub inline fn setValue(self: @This(), value: u64) void {
|
||||
___tracy_emit_zone_value(self, value);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Ctx = if (enable) ___tracy_c_zone_context else struct {
|
||||
pub inline fn end(self: @This()) void {
|
||||
_ = self;
|
||||
}
|
||||
|
||||
pub inline fn addText(self: @This(), text: []const u8) void {
|
||||
_ = self;
|
||||
_ = text;
|
||||
}
|
||||
|
||||
pub inline fn setName(self: @This(), name: []const u8) void {
|
||||
_ = self;
|
||||
_ = name;
|
||||
}
|
||||
|
||||
pub inline fn setColor(self: @This(), color: u32) void {
|
||||
_ = self;
|
||||
_ = color;
|
||||
}
|
||||
|
||||
pub inline fn setValue(self: @This(), value: u64) void {
|
||||
_ = self;
|
||||
_ = value;
|
||||
}
|
||||
};
|
||||
|
||||
pub inline fn trace(comptime src: std.builtin.SourceLocation) Ctx {
|
||||
if (!enable) return .{};
|
||||
|
||||
if (enable_callstack) {
|
||||
return ___tracy_emit_zone_begin_callstack(&.{
|
||||
.name = null,
|
||||
.function = src.fn_name.ptr,
|
||||
.file = src.file.ptr,
|
||||
.line = src.line,
|
||||
.color = 0,
|
||||
}, callstack_depth, 1);
|
||||
} else {
|
||||
return ___tracy_emit_zone_begin(&.{
|
||||
.name = null,
|
||||
.function = src.fn_name.ptr,
|
||||
.file = src.file.ptr,
|
||||
.line = src.line,
|
||||
.color = 0,
|
||||
}, 1);
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn traceNamed(comptime src: std.builtin.SourceLocation, comptime name: [:0]const u8) Ctx {
|
||||
if (!enable) return .{};
|
||||
|
||||
if (enable_callstack) {
|
||||
return ___tracy_emit_zone_begin_callstack(&.{
|
||||
.name = name.ptr,
|
||||
.function = src.fn_name.ptr,
|
||||
.file = src.file.ptr,
|
||||
.line = src.line,
|
||||
.color = 0,
|
||||
}, callstack_depth, 1);
|
||||
} else {
|
||||
return ___tracy_emit_zone_begin(&.{
|
||||
.name = name.ptr,
|
||||
.function = src.fn_name.ptr,
|
||||
.file = src.file.ptr,
|
||||
.line = src.line,
|
||||
.color = 0,
|
||||
}, 1);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn tracyAllocator(allocator: std.mem.Allocator) TracyAllocator(null) {
|
||||
return TracyAllocator(null).init(allocator);
|
||||
}
|
||||
|
||||
pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
|
||||
return struct {
|
||||
parent_allocator: std.mem.Allocator,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(parent_allocator: std.mem.Allocator) Self {
|
||||
return .{
|
||||
.parent_allocator = parent_allocator,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn allocator(self: *Self) std.mem.Allocator {
|
||||
return std.mem.Allocator.init(self, allocFn, resizeFn, freeFn);
|
||||
}
|
||||
|
||||
fn allocFn(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
|
||||
const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ret_addr);
|
||||
if (result) |data| {
|
||||
if (data.len != 0) {
|
||||
if (name) |n| {
|
||||
allocNamed(data.ptr, data.len, n);
|
||||
} else {
|
||||
alloc(data.ptr, data.len);
|
||||
}
|
||||
}
|
||||
} else |_| {
|
||||
messageColor("allocation failed", 0xFF0000);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
fn resizeFn(self: *Self, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
|
||||
if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ret_addr)) |resized_len| {
|
||||
if (name) |n| {
|
||||
freeNamed(buf.ptr, n);
|
||||
allocNamed(buf.ptr, resized_len, n);
|
||||
} else {
|
||||
free(buf.ptr);
|
||||
alloc(buf.ptr, resized_len);
|
||||
}
|
||||
|
||||
return resized_len;
|
||||
}
|
||||
|
||||
// during normal operation the compiler hits this case thousands of times due to this
|
||||
// emitting messages for it is both slow and causes clutter
|
||||
return null;
|
||||
}
|
||||
|
||||
fn freeFn(self: *Self, buf: []u8, buf_align: u29, ret_addr: usize) void {
|
||||
self.parent_allocator.rawFree(buf, buf_align, ret_addr);
|
||||
// this condition is to handle free being called on an empty slice that was never even allocated
|
||||
// example case: `std.process.getSelfExeSharedLibPaths` can return `&[_][:0]u8{}`
|
||||
if (buf.len != 0) {
|
||||
if (name) |n| {
|
||||
freeNamed(buf.ptr, n);
|
||||
} else {
|
||||
free(buf.ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// This function only accepts comptime known strings, see `messageCopy` for runtime strings
|
||||
pub inline fn message(comptime msg: [:0]const u8) void {
|
||||
if (!enable) return;
|
||||
___tracy_emit_messageL(msg.ptr, if (enable_callstack) callstack_depth else 0);
|
||||
}
|
||||
|
||||
// This function only accepts comptime known strings, see `messageColorCopy` for runtime strings
|
||||
pub inline fn messageColor(comptime msg: [:0]const u8, color: u32) void {
|
||||
if (!enable) return;
|
||||
___tracy_emit_messageLC(msg.ptr, color, if (enable_callstack) callstack_depth else 0);
|
||||
}
|
||||
|
||||
pub inline fn messageCopy(msg: []const u8) void {
|
||||
if (!enable) return;
|
||||
___tracy_emit_message(msg.ptr, msg.len, if (enable_callstack) callstack_depth else 0);
|
||||
}
|
||||
|
||||
pub inline fn messageColorCopy(msg: [:0]const u8, color: u32) void {
|
||||
if (!enable) return;
|
||||
___tracy_emit_messageC(msg.ptr, msg.len, color, if (enable_callstack) callstack_depth else 0);
|
||||
}
|
||||
|
||||
pub inline fn frameMark() void {
|
||||
if (!enable) return;
|
||||
___tracy_emit_frame_mark(null);
|
||||
}
|
||||
|
||||
pub inline fn frameMarkNamed(comptime name: [:0]const u8) void {
|
||||
if (!enable) return;
|
||||
___tracy_emit_frame_mark(name.ptr);
|
||||
}
|
||||
|
||||
pub inline fn namedFrame(comptime name: [:0]const u8) Frame(name) {
|
||||
frameMarkStart(name);
|
||||
return .{};
|
||||
}
|
||||
|
||||
pub fn Frame(comptime name: [:0]const u8) type {
|
||||
return struct {
|
||||
pub fn end(_: @This()) void {
|
||||
frameMarkEnd(name);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
inline fn frameMarkStart(comptime name: [:0]const u8) void {
|
||||
if (!enable) return;
|
||||
___tracy_emit_frame_mark_start(name.ptr);
|
||||
}
|
||||
|
||||
inline fn frameMarkEnd(comptime name: [:0]const u8) void {
|
||||
if (!enable) return;
|
||||
___tracy_emit_frame_mark_end(name.ptr);
|
||||
}
|
||||
|
||||
extern fn ___tracy_emit_frame_mark_start(name: [*:0]const u8) void;
|
||||
extern fn ___tracy_emit_frame_mark_end(name: [*:0]const u8) void;
|
||||
|
||||
inline fn alloc(ptr: [*]u8, len: usize) void {
|
||||
if (!enable) return;
|
||||
|
||||
if (enable_callstack) {
|
||||
___tracy_emit_memory_alloc_callstack(ptr, len, callstack_depth, 0);
|
||||
} else {
|
||||
___tracy_emit_memory_alloc(ptr, len, 0);
|
||||
}
|
||||
}
|
||||
|
||||
inline fn allocNamed(ptr: [*]u8, len: usize, comptime name: [:0]const u8) void {
|
||||
if (!enable) return;
|
||||
|
||||
if (enable_callstack) {
|
||||
___tracy_emit_memory_alloc_callstack_named(ptr, len, callstack_depth, 0, name.ptr);
|
||||
} else {
|
||||
___tracy_emit_memory_alloc_named(ptr, len, 0, name.ptr);
|
||||
}
|
||||
}
|
||||
|
||||
inline fn free(ptr: [*]u8) void {
|
||||
if (!enable) return;
|
||||
|
||||
if (enable_callstack) {
|
||||
___tracy_emit_memory_free_callstack(ptr, callstack_depth, 0);
|
||||
} else {
|
||||
___tracy_emit_memory_free(ptr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
inline fn freeNamed(ptr: [*]u8, comptime name: [:0]const u8) void {
|
||||
if (!enable) return;
|
||||
|
||||
if (enable_callstack) {
|
||||
___tracy_emit_memory_free_callstack_named(ptr, callstack_depth, 0, name.ptr);
|
||||
} else {
|
||||
___tracy_emit_memory_free_named(ptr, 0, name.ptr);
|
||||
}
|
||||
}
|
||||
|
||||
extern fn ___tracy_emit_zone_begin(
|
||||
srcloc: *const ___tracy_source_location_data,
|
||||
active: c_int,
|
||||
) ___tracy_c_zone_context;
|
||||
extern fn ___tracy_emit_zone_begin_callstack(
|
||||
srcloc: *const ___tracy_source_location_data,
|
||||
depth: c_int,
|
||||
active: c_int,
|
||||
) ___tracy_c_zone_context;
|
||||
extern fn ___tracy_emit_zone_text(ctx: ___tracy_c_zone_context, txt: [*]const u8, size: usize) void;
|
||||
extern fn ___tracy_emit_zone_name(ctx: ___tracy_c_zone_context, txt: [*]const u8, size: usize) void;
|
||||
extern fn ___tracy_emit_zone_color(ctx: ___tracy_c_zone_context, color: u32) void;
|
||||
extern fn ___tracy_emit_zone_value(ctx: ___tracy_c_zone_context, value: u64) void;
|
||||
extern fn ___tracy_emit_zone_end(ctx: ___tracy_c_zone_context) void;
|
||||
extern fn ___tracy_emit_memory_alloc(ptr: *const anyopaque, size: usize, secure: c_int) void;
|
||||
extern fn ___tracy_emit_memory_alloc_callstack(ptr: *const anyopaque, size: usize, depth: c_int, secure: c_int) void;
|
||||
extern fn ___tracy_emit_memory_free(ptr: *const anyopaque, secure: c_int) void;
|
||||
extern fn ___tracy_emit_memory_free_callstack(ptr: *const anyopaque, depth: c_int, secure: c_int) void;
|
||||
extern fn ___tracy_emit_memory_alloc_named(ptr: *const anyopaque, size: usize, secure: c_int, name: [*:0]const u8) void;
|
||||
extern fn ___tracy_emit_memory_alloc_callstack_named(ptr: *const anyopaque, size: usize, depth: c_int, secure: c_int, name: [*:0]const u8) void;
|
||||
extern fn ___tracy_emit_memory_free_named(ptr: *const anyopaque, secure: c_int, name: [*:0]const u8) void;
|
||||
extern fn ___tracy_emit_memory_free_callstack_named(ptr: *const anyopaque, depth: c_int, secure: c_int, name: [*:0]const u8) void;
|
||||
extern fn ___tracy_emit_message(txt: [*]const u8, size: usize, callstack: c_int) void;
|
||||
extern fn ___tracy_emit_messageL(txt: [*:0]const u8, callstack: c_int) void;
|
||||
extern fn ___tracy_emit_messageC(txt: [*]const u8, size: usize, color: u32, callstack: c_int) void;
|
||||
extern fn ___tracy_emit_messageLC(txt: [*:0]const u8, color: u32, callstack: c_int) void;
|
||||
extern fn ___tracy_emit_frame_mark(name: ?[*:0]const u8) void;
|
||||
|
||||
const ___tracy_source_location_data = extern struct {
|
||||
name: ?[*:0]const u8,
|
||||
function: [*:0]const u8,
|
||||
file: [*:0]const u8,
|
||||
line: u32,
|
||||
color: u32,
|
||||
};
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
struct VertexUniform {
|
||||
matrix: mat4x4<f32>,
|
||||
}
|
||||
@binding(0) @group(0) var<uniform> ubo: VertexUniform;
|
||||
|
||||
struct VertexOut {
|
||||
@builtin(position) position_clip: vec4<f32>,
|
||||
@location(0) frag_uv: vec2<f32>,
|
||||
@interpolate(linear) @location(1) frag_bary: vec2<f32>,
|
||||
@interpolate(flat) @location(2) triangle_index: u32,
|
||||
}
|
||||
|
||||
@vertex fn main(
|
||||
@builtin(vertex_index) vertex_index: u32,
|
||||
@location(0) position: vec4<f32>,
|
||||
@location(1) uv: vec2<f32>,
|
||||
) -> VertexOut {
|
||||
var output : VertexOut;
|
||||
output.position_clip = ubo.matrix * position;
|
||||
output.frag_uv = uv;
|
||||
|
||||
// Generates [0.0, 0.0], [0.5, 0.0], [1.0, 1.0]
|
||||
//
|
||||
// Equal to:
|
||||
//
|
||||
// if ((vertex_index+1u) % 3u == 0u) {
|
||||
// output.frag_bary = vec2<f32>(0.0, 0.0);
|
||||
// } else if ((vertex_index+1u) % 3u == 1u) {
|
||||
// output.frag_bary = vec2<f32>(0.5, 0.0);
|
||||
// } else {
|
||||
// output.frag_bary = vec2<f32>(1.0, 1.0);
|
||||
// }
|
||||
//
|
||||
output.frag_bary = vec2<f32>(
|
||||
f32((vertex_index+1u) % 3u) * 0.5,
|
||||
1.0 - f32((((vertex_index + 3u) % 3u) + 1u) % 2u),
|
||||
);
|
||||
output.triangle_index = vertex_index / 3u;
|
||||
return output;
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue