{mach,examples}: move examples to github.com/hexops/mach-examples

Signed-off-by: Stephen Gutekanst <stephen@hexops.com>
This commit is contained in:
Stephen Gutekanst 2022-10-16 12:20:30 -07:00
parent 1cbef1f7e1
commit 189997c279
77 changed files with 2 additions and 11016 deletions

View file

@ -1,7 +0,0 @@
Copyright 2022 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1 +0,0 @@
TODO: add license, the tesselator implementation comes from https://github.com/fubark/cosmic/blob/master/graphics/src/tessellator.zig

@ -1 +0,0 @@
Subproject commit b5a8404715e6cfc57e66c4a7cc0625e64b3b3c56

View file

@ -1,325 +0,0 @@
//! Implements a texture atlas (https://en.wikipedia.org/wiki/Texture_atlas).
//!
//! The implementation is based on "A Thousand Ways to Pack the Bin - A
//! Practical Approach to Two-Dimensional Rectangle Bin Packing" by Jukka
//! Jylänki. This specific implementation is based heavily on
//! Nicolas P. Rougier's freetype-gl project as well as Jukka's C++
//! implementation: https://github.com/juj/RectangleBinPack
//!
//! Limitations that are easy to fix, but I didn't need them:
//!
//! * Written data must be packed, no support for custom strides.
//! * Texture is always a square, no ability to set width != height. Note
//! that regions written INTO the atlas do not have to be square, only
//! the full atlas texture itself.
//!
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const testing = std.testing;
const Node = struct {
x: u32,
y: u32,
width: u32,
};
pub const Error = error{
/// Atlas cannot fit the desired region. You must enlarge the atlas.
AtlasFull,
};
/// A region within the texture atlas. These can be acquired using the
/// "reserve" function. A region reservation is required to write data.
pub const Region = struct {
x: u32,
y: u32,
width: u32,
height: u32,
pub fn getUVData(region: Region, atlas_float_size: f32) UVData {
return .{
.bottom_left = .{ @intToFloat(f32, region.x) / atlas_float_size, (atlas_float_size - @intToFloat(f32, region.y + region.height)) / atlas_float_size },
.width_and_height = .{ @intToFloat(f32, region.width) / atlas_float_size, @intToFloat(f32, region.height) / atlas_float_size },
};
}
};
pub const UVData = extern struct {
bottom_left: @Vector(2, f32),
width_and_height: @Vector(2, f32),
};
pub fn Atlas(comptime T: type) type {
return struct {
/// Data is the raw texture data.
data: []T,
/// Width and height of the atlas texture. The current implementation is
/// always square so this is both the width and the height.
size: u32 = 0,
/// The nodes (rectangles) of available space.
nodes: std.ArrayListUnmanaged(Node) = .{},
const Self = @This();
pub fn init(alloc: Allocator, size: u32) !Self {
var result = Self{
.data = try alloc.alloc(T, size * size),
.size = size,
.nodes = .{},
};
// TODO: figure out optimal prealloc based on real world usage
try result.nodes.ensureUnusedCapacity(alloc, 64);
// This sets up our initial state
result.clear();
return result;
}
pub fn deinit(self: *Self, alloc: Allocator) void {
self.nodes.deinit(alloc);
alloc.free(self.data);
self.* = undefined;
}
/// Reserve a region within the atlas with the given width and height.
///
/// May allocate to add a new rectangle into the internal list of rectangles.
/// This will not automatically enlarge the texture if it is full.
pub fn reserve(self: *Self, alloc: Allocator, width: u32, height: u32) !Region {
// x, y are populated within :best_idx below
var region: Region = .{ .x = 0, .y = 0, .width = width, .height = height };
// Find the location in our nodes list to insert the new node for this region.
var best_idx: usize = best_idx: {
var best_height: u32 = std.math.maxInt(u32);
var best_width: u32 = best_height;
var chosen: ?usize = null;
var i: usize = 0;
while (i < self.nodes.items.len) : (i += 1) {
// Check if our region fits within this node.
const y = self.fit(i, width, height) orelse continue;
const node = self.nodes.items[i];
if ((y + height) < best_height or
((y + height) == best_height and
(node.width > 0 and node.width < best_width)))
{
chosen = i;
best_width = node.width;
best_height = y + height;
region.x = node.x;
region.y = y;
}
}
// If we never found a chosen index, the atlas cannot fit our region.
break :best_idx chosen orelse return Error.AtlasFull;
};
// Insert our new node for this rectangle at the exact best index
try self.nodes.insert(alloc, best_idx, .{
.x = region.x,
.y = region.y + height,
.width = width,
});
// Optimize our rectangles
var i: usize = best_idx + 1;
while (i < self.nodes.items.len) : (i += 1) {
const node = &self.nodes.items[i];
const prev = self.nodes.items[i - 1];
if (node.x < (prev.x + prev.width)) {
const shrink = prev.x + prev.width - node.x;
node.x += shrink;
node.width -|= shrink;
if (node.width <= 0) {
_ = self.nodes.orderedRemove(i);
i -= 1;
continue;
}
}
break;
}
self.merge();
return region;
}
/// Attempts to fit a rectangle of width x height into the node at idx.
/// The return value is the y within the texture where the rectangle can be
/// placed. The x is the same as the node.
fn fit(self: Self, idx: usize, width: u32, height: u32) ?u32 {
// If the added width exceeds our texture size, it doesn't fit.
const node = self.nodes.items[idx];
if ((node.x + width) > (self.size - 1)) return null;
// Go node by node looking for space that can fit our width.
var y = node.y;
var i = idx;
var width_left = width;
while (width_left > 0) : (i += 1) {
const n = self.nodes.items[i];
if (n.y > y) y = n.y;
// If the added height exceeds our texture size, it doesn't fit.
if ((y + height) > (self.size - 1)) return null;
width_left -|= n.width;
}
return y;
}
/// Merge adjacent nodes with the same y value.
fn merge(self: *Self) void {
var i: usize = 0;
while (i < self.nodes.items.len - 1) {
const node = &self.nodes.items[i];
const next = self.nodes.items[i + 1];
if (node.y == next.y) {
node.width += next.width;
_ = self.nodes.orderedRemove(i + 1);
continue;
}
i += 1;
}
}
/// Set the data associated with a reserved region. The data is expected
/// to fit exactly within the region.
pub fn set(self: *Self, reg: Region, data: []const T) void {
assert(reg.x < (self.size - 1));
assert((reg.x + reg.width) <= (self.size - 1));
assert(reg.y < (self.size - 1));
assert((reg.y + reg.height) <= (self.size - 1));
var i: u32 = 0;
while (i < reg.height) : (i += 1) {
const tex_offset = ((reg.y + i) * self.size) + reg.x;
const data_offset = i * reg.width;
std.mem.copy(
T,
self.data[tex_offset..],
data[data_offset .. data_offset + reg.width],
);
}
}
// Grow the texture to the new size, preserving all previously written data.
pub fn grow(self: *Self, alloc: Allocator, size_new: u32) Allocator.Error!void {
assert(size_new >= self.size);
if (size_new == self.size) return;
// Preserve our old values so we can copy the old data
const data_old = self.data;
const size_old = self.size;
self.data = try alloc.alloc(T, size_new * size_new);
defer alloc.free(data_old); // Only defer after new data succeeded
self.size = size_new; // Only set size after new alloc succeeded
std.mem.set(T, self.data, std.mem.zeroes(T));
self.set(.{
.x = 0, // don't bother skipping border so we can avoid strides
.y = 1, // skip the first border row
.width = size_old,
.height = size_old - 2, // skip the last border row
}, data_old[size_old..]);
// Add our new rectangle for our added righthand space
try self.nodes.append(alloc, .{
.x = size_old - 1,
.y = 1,
.width = size_new - size_old,
});
}
// Empty the atlas. This doesn't reclaim any previously allocated memory.
pub fn clear(self: *Self) void {
std.mem.set(T, self.data, std.mem.zeroes(T));
self.nodes.clearRetainingCapacity();
// Add our initial rectangle. This is the size of the full texture
// and is the initial rectangle we fit our regions in. We keep a 1px border
// to avoid artifacting when sampling the texture.
self.nodes.appendAssumeCapacity(.{ .x = 1, .y = 1, .width = self.size - 2 });
}
};
}
test "exact fit" {
const alloc = testing.allocator;
var atlas = try Atlas(u32).init(alloc, 34); // +2 for 1px border
defer atlas.deinit(alloc);
_ = try atlas.reserve(alloc, 32, 32);
try testing.expectError(Error.AtlasFull, atlas.reserve(alloc, 1, 1));
}
test "doesnt fit" {
const alloc = testing.allocator;
var atlas = try Atlas(f32).init(alloc, 32);
defer atlas.deinit(alloc);
// doesn't fit due to border
try testing.expectError(Error.AtlasFull, atlas.reserve(alloc, 32, 32));
}
test "fit multiple" {
const alloc = testing.allocator;
var atlas = try Atlas(u16).init(alloc, 32);
defer atlas.deinit(alloc);
_ = try atlas.reserve(alloc, 15, 30);
_ = try atlas.reserve(alloc, 15, 30);
try testing.expectError(Error.AtlasFull, atlas.reserve(alloc, 1, 1));
}
test "writing data" {
const alloc = testing.allocator;
var atlas = try Atlas(u64).init(alloc, 32);
defer atlas.deinit(alloc);
const reg = try atlas.reserve(alloc, 2, 2);
atlas.set(reg, &[_]u64{ 1, 2, 3, 4 });
// 33 because of the 1px border and so on
try testing.expectEqual(@as(u64, 1), atlas.data[33]);
try testing.expectEqual(@as(u64, 2), atlas.data[34]);
try testing.expectEqual(@as(u64, 3), atlas.data[65]);
try testing.expectEqual(@as(u64, 4), atlas.data[66]);
}
test "grow" {
const alloc = testing.allocator;
var atlas = try Atlas(u32).init(alloc, 4); // +2 for 1px border
defer atlas.deinit(alloc);
const reg = try atlas.reserve(alloc, 2, 2);
try testing.expectError(Error.AtlasFull, atlas.reserve(alloc, 1, 1));
// Write some data so we can verify that growing doesn't mess it up
atlas.set(reg, &[_]u32{ 1, 2, 3, 4 });
try testing.expectEqual(@as(u32, 1), atlas.data[5]);
try testing.expectEqual(@as(u32, 2), atlas.data[6]);
try testing.expectEqual(@as(u32, 3), atlas.data[9]);
try testing.expectEqual(@as(u32, 4), atlas.data[10]);
// Expand by exactly 1 should fit our new 1x1 block.
try atlas.grow(alloc, atlas.size + 1);
_ = try atlas.reserve(alloc, 1, 1);
// Ensure our data is still set. Not the offsets change due to size.
try testing.expectEqual(@as(u32, 1), atlas.data[atlas.size + 1]);
try testing.expectEqual(@as(u32, 2), atlas.data[atlas.size + 2]);
try testing.expectEqual(@as(u32, 3), atlas.data[atlas.size * 2 + 1]);
try testing.expectEqual(@as(u32, 4), atlas.data[atlas.size * 2 + 2]);
}

View file

@ -1,67 +0,0 @@
const std = @import("std");
// std.DynamicBitSet doesn't behave like std.ArrayList, it will realloc on every resize.
// For now provide a bitset api and use std.ArrayList(bool) as the implementation.
pub const BitArrayList = struct {
const Self = @This();
buf: std.ArrayList(bool),
pub fn init(alloc: std.mem.Allocator) Self {
return .{
.buf = std.ArrayList(bool).init(alloc),
};
}
pub fn deinit(self: Self) void {
self.buf.deinit();
}
pub fn clearRetainingCapacity(self: *Self) void {
self.buf.clearRetainingCapacity();
}
pub fn appendUnset(self: *Self) !void {
try self.buf.append(false);
}
pub fn appendSet(self: *Self) !void {
try self.buf.append(true);
}
pub fn isSet(self: Self, idx: usize) bool {
return self.buf.items[idx];
}
pub fn set(self: *Self, idx: usize) void {
self.buf.items[idx] = true;
}
pub fn unset(self: *Self, idx: usize) void {
self.buf.items[idx] = false;
}
pub fn setRange(self: *Self, start: usize, end: usize) void {
std.mem.set(bool, self.buf.items[start..end], true);
}
pub fn unsetRange(self: *Self, start: usize, end: usize) void {
std.mem.set(bool, self.buf.items[start..end], false);
}
pub fn resize(self: *Self, size: usize) !void {
try self.buf.resize(size);
}
pub fn resizeFillNew(self: *Self, size: usize, comptime fill: bool) !void {
const start = self.buf.items.len;
try self.resize(size);
if (self.buf.items.len > start) {
if (fill) {
self.setRange(start, self.buf.items.len);
} else {
self.unsetRange(start, self.buf.items.len);
}
}
}
};

View file

@ -1,780 +0,0 @@
const std = @import("std");
const t = std.testing;
const BitArrayList = @import("bit_array_list.zig").BitArrayList;
const log = std.log.scoped(.compact);
/// Useful for keeping elements closer together in memory when you're using a bunch of insert/delete,
/// while keeping realloc to a minimum and preserving the element's initial insert index.
/// Backed by std.ArrayList.
/// Item ids are reused once removed.
/// Items are assigned an id and have O(1) access time by id.
/// TODO: Iterating can be just as fast as a dense array if CompactIdGenerator kept a sorted list of freed id ranges.
/// Although that also means delete ops would need to be O(logn).
pub fn CompactUnorderedList(comptime Id: type, comptime T: type) type {
if (@typeInfo(Id).Int.signedness != .unsigned) {
@compileError("Unsigned id type required.");
}
return struct {
id_gen: CompactIdGenerator(Id),
// TODO: Rename to buf.
data: std.ArrayList(T),
// Keep track of whether an item exists at id in order to perform iteration.
// TODO: Rename to exists.
// TODO: Maybe the user should provide this if it's important. It would also simplify the api and remove optional return types. It also means iteration won't be possible.
data_exists: BitArrayList,
const Self = @This();
const Iterator = struct {
// The current id should reflect the id of the value returned from next or nextPtr.
cur_id: Id,
list: *const Self,
fn init(list: *const Self) @This() {
return .{
.cur_id = std.math.maxInt(Id),
.list = list,
};
}
pub fn reset(self: *@This()) void {
self.idx = std.math.maxInt(Id);
}
pub fn nextPtr(self: *@This()) ?*T {
self.cur_id +%= 1;
while (true) {
if (self.cur_id < self.list.data.items.len) {
if (!self.list.data_exists.isSet(self.cur_id)) {
self.cur_id += 1;
continue;
} else {
return &self.list.data.items[self.cur_id];
}
} else {
return null;
}
}
}
pub fn next(self: *@This()) ?T {
self.cur_id +%= 1;
while (true) {
if (self.cur_id < self.list.data.items.len) {
if (!self.list.data_exists.isSet(self.cur_id)) {
self.cur_id += 1;
continue;
} else {
return self.list.data.items[self.cur_id];
}
} else {
return null;
}
}
}
};
pub fn init(alloc: std.mem.Allocator) @This() {
const new = @This(){
.id_gen = CompactIdGenerator(Id).init(alloc, 0),
.data = std.ArrayList(T).init(alloc),
.data_exists = BitArrayList.init(alloc),
};
return new;
}
pub fn deinit(self: Self) void {
self.id_gen.deinit();
self.data.deinit();
self.data_exists.deinit();
}
pub fn iterator(self: *const Self) Iterator {
return Iterator.init(self);
}
// Returns the id of the item.
pub fn add(self: *Self, item: T) !Id {
const new_id = self.id_gen.getNextId();
errdefer self.id_gen.deleteId(new_id);
if (new_id >= self.data.items.len) {
try self.data.resize(new_id + 1);
try self.data_exists.resize(new_id + 1);
}
self.data.items[new_id] = item;
self.data_exists.set(new_id);
return new_id;
}
pub fn set(self: *Self, id: Id, item: T) void {
self.data.items[id] = item;
}
pub fn remove(self: *Self, id: Id) void {
self.data_exists.unset(id);
self.id_gen.deleteId(id);
}
pub fn clearRetainingCapacity(self: *Self) void {
self.data_exists.clearRetainingCapacity();
self.id_gen.clearRetainingCapacity();
self.data.clearRetainingCapacity();
}
pub fn get(self: Self, id: Id) ?T {
if (self.has(id)) {
return self.data.items[id];
} else return null;
}
pub fn getNoCheck(self: Self, id: Id) T {
return self.data.items[id];
}
pub fn getPtr(self: *const Self, id: Id) ?*T {
if (self.has(id)) {
return &self.data.items[id];
} else return null;
}
pub fn getPtrNoCheck(self: Self, id: Id) *T {
return &self.data.items[id];
}
pub fn has(self: Self, id: Id) bool {
return self.data_exists.isSet(id);
}
pub fn size(self: Self) usize {
return self.data.items.len - self.id_gen.next_ids.count;
}
};
}
test "CompactUnorderedList" {
{
// General test.
var arr = CompactUnorderedList(u32, u8).init(t.allocator);
defer arr.deinit();
_ = try arr.add(1);
const id = try arr.add(2);
_ = try arr.add(3);
arr.remove(id);
// Test adding to a removed slot.
_ = try arr.add(4);
const id2 = try arr.add(5);
// Test iterator skips removed slot.
arr.remove(id2);
var iter = arr.iterator();
try t.expectEqual(iter.next(), 1);
try t.expectEqual(iter.next(), 4);
try t.expectEqual(iter.next(), 3);
try t.expectEqual(iter.next(), null);
try t.expectEqual(arr.size(), 3);
}
{
// Empty test.
var arr = CompactUnorderedList(u32, u8).init(t.allocator);
defer arr.deinit();
var iter = arr.iterator();
try t.expectEqual(iter.next(), null);
try t.expectEqual(arr.size(), 0);
}
}
/// Buffer is a CompactUnorderedList.
pub fn CompactSinglyLinkedList(comptime Id: type, comptime T: type) type {
const Null = CompactNull(Id);
const Node = CompactSinglyLinkedListNode(Id, T);
return struct {
const Self = @This();
first: Id,
nodes: CompactUnorderedList(Id, Node),
pub fn init(alloc: std.mem.Allocator) Self {
return .{
.first = Null,
.nodes = CompactUnorderedList(Id, Node).init(alloc),
};
}
pub fn deinit(self: Self) void {
self.nodes.deinit();
}
pub fn insertAfter(self: *Self, id: Id, data: T) !Id {
if (self.nodes.has(id)) {
const new = try self.nodes.add(.{
.next = self.nodes.getNoCheck(id).next,
.data = data,
});
self.nodes.getPtrNoCheck(id).next = new;
return new;
} else return error.NoElement;
}
pub fn removeNext(self: *Self, id: Id) !bool {
if (self.nodes.has(id)) {
const at = self.nodes.getPtrNoCheck(id);
if (at.next != Null) {
const next = at.next;
at.next = self.nodes.getNoCheck(next).next;
self.nodes.remove(next);
return true;
} else return false;
} else return error.NoElement;
}
pub fn getNode(self: *const Self, id: Id) ?Node {
return self.nodes.get(id);
}
pub fn getNodeAssumeExists(self: *const Self, id: Id) Node {
return self.nodes.getNoCheck(id);
}
pub fn get(self: *const Self, id: Id) ?T {
if (self.nodes.has(id)) {
return self.nodes.getNoCheck(id).data;
} else return null;
}
pub fn getNoCheck(self: *const Self, id: Id) T {
return self.nodes.getNoCheck(id).data;
}
pub fn getAt(self: *const Self, idx: usize) Id {
var i: u32 = 0;
var cur = self.first.?;
while (i != idx) : (i += 1) {
cur = self.getNext(cur).?;
}
return cur;
}
pub fn getFirst(self: *const Self) Id {
return self.first;
}
pub fn getNext(self: Self, id: Id) ?Id {
if (self.nodes.has(id)) {
return self.nodes.getNoCheck(id).next;
} else return null;
}
pub fn prepend(self: *Self, data: T) !Id {
const node = Node{
.next = self.first,
.data = data,
};
self.first = try self.nodes.add(node);
return self.first;
}
pub fn removeFirst(self: *Self) bool {
if (self.first != Null) {
const next = self.getNodeAssumeExists(self.first).next;
self.nodes.remove(self.first);
self.first = next;
return true;
} else return false;
}
};
}
test "CompactSinglyLinkedList" {
const Null = CompactNull(u32);
{
// General test.
var list = CompactSinglyLinkedList(u32, u8).init(t.allocator);
defer list.deinit();
const first = try list.prepend(1);
var last = first;
last = try list.insertAfter(last, 2);
last = try list.insertAfter(last, 3);
// Test remove next.
_ = try list.removeNext(first);
// Test remove first.
_ = list.removeFirst();
var id = list.getFirst();
try t.expectEqual(list.get(id), 3);
id = list.getNext(id).?;
try t.expectEqual(id, Null);
}
{
// Empty test.
var list = CompactSinglyLinkedList(u32, u8).init(t.allocator);
defer list.deinit();
try t.expectEqual(list.getFirst(), Null);
}
}
/// Id should be an unsigned integer type.
/// Max value of Id is used to indicate null. (An optional would increase the struct size.)
pub fn CompactSinglyLinkedListNode(comptime Id: type, comptime T: type) type {
return struct {
next: Id,
data: T,
};
}
pub fn CompactNull(comptime Id: type) Id {
return comptime std.math.maxInt(Id);
}
/// Stores multiple linked lists together in memory.
pub fn CompactManySinglyLinkedList(comptime ListId: type, comptime Index: type, comptime T: type) type {
const Node = CompactSinglyLinkedListNode(Index, T);
const Null = CompactNull(Index);
return struct {
const Self = @This();
const List = struct {
head: ?Index,
};
nodes: CompactUnorderedList(Index, Node),
lists: CompactUnorderedList(ListId, List),
pub fn init(alloc: std.mem.Allocator) Self {
return .{
.nodes = CompactUnorderedList(Index, Node).init(alloc),
.lists = CompactUnorderedList(ListId, List).init(alloc),
};
}
pub fn deinit(self: Self) void {
self.nodes.deinit();
self.lists.deinit();
}
// Returns detached item.
pub fn detachAfter(self: *Self, id: Index) !Index {
if (self.nodes.has(id)) {
const item = self.getNodePtrAssumeExists(id);
const detached = item.next;
item.next = Null;
return detached;
} else return error.NoElement;
}
pub fn insertAfter(self: *Self, id: Index, data: T) !Index {
if (self.nodes.has(id)) {
const new = try self.nodes.add(.{
.next = self.nodes.getNoCheck(id).next,
.data = data,
});
self.nodes.getPtrNoCheck(id).next = new;
return new;
} else return error.NoElement;
}
pub fn setDetachedToEnd(self: *Self, id: Index, detached_id: Index) void {
const item = self.nodes.getPtr(id).?;
item.next = detached_id;
}
pub fn addListWithDetachedHead(self: *Self, id: Index) !ListId {
return self.lists.add(.{ .head = id });
}
pub fn addListWithHead(self: *Self, data: T) !ListId {
const item_id = try self.addDetachedItem(data);
return self.addListWithDetachedHead(item_id);
}
pub fn addEmptyList(self: *Self) !ListId {
return self.lists.add(.{ .head = Null });
}
pub fn addDetachedItem(self: *Self, data: T) !Index {
return try self.nodes.add(.{
.next = Null,
.data = data,
});
}
pub fn prepend(self: *Self, list_id: ListId, data: T) !Index {
const list = self.getList(list_id);
const item = Node{
.next = list.first,
.data = data,
};
list.first = try self.nodes.add(item);
return list.first.?;
}
pub fn removeFirst(self: *Self, list_id: ListId) bool {
const list = self.getList(list_id);
if (list.first == null) {
return false;
} else {
const next = self.getNext(list.first.?);
self.nodes.remove(list.first.?);
list.first = next;
return true;
}
}
pub fn removeNext(self: *Self, id: Index) !bool {
if (self.nodes.has(id)) {
const at = self.nodes.getPtrNoCheck(id);
if (at.next != Null) {
const next = at.next;
at.next = self.nodes.getNoCheck(next).next;
self.nodes.remove(next);
return true;
} else return false;
} else return error.NoElement;
}
pub fn removeDetached(self: *Self, id: Index) void {
self.nodes.remove(id);
}
pub fn getListPtr(self: *const Self, id: ListId) *List {
return self.lists.getPtr(id);
}
pub fn getListHead(self: *const Self, id: ListId) ?Index {
if (self.lists.has(id)) {
return self.lists.getNoCheck(id).head;
} else return null;
}
pub fn findInList(self: Self, list_id: ListId, ctx: anytype, pred: fn (ctx: @TypeOf(ctx), buf: Self, item_id: Index) bool) ?Index {
var id = self.getListHead(list_id) orelse return null;
while (id != Null) {
if (pred(ctx, self, id)) {
return id;
}
id = self.getNextIdNoCheck(id);
}
return null;
}
pub fn has(self: Self, id: Index) bool {
return self.nodes.has(id);
}
pub fn getNode(self: Self, id: Index) ?Node {
return self.nodes.get(id);
}
pub fn getNodeAssumeExists(self: Self, id: Index) Node {
return self.nodes.getNoCheck(id);
}
pub fn getNodePtr(self: Self, id: Index) ?*Node {
return self.nodes.getPtr(id);
}
pub fn getNodePtrAssumeExists(self: Self, id: Index) *Node {
return self.nodes.getPtrNoCheck(id);
}
pub fn get(self: Self, id: Index) ?T {
if (self.nodes.has(id)) {
return self.nodes.getNoCheck(id).data;
} else return null;
}
pub fn getNoCheck(self: Self, id: Index) T {
return self.nodes.getNoCheck(id).data;
}
pub fn getIdAt(self: Self, list_id: ListId, idx: usize) Index {
var i: u32 = 0;
var cur: Index = self.getListHead(list_id).?;
while (i != idx) : (i += 1) {
cur = self.getNextId(cur).?;
}
return cur;
}
pub fn getPtr(self: Self, id: Index) ?*T {
if (self.nodes.has(id)) {
return &self.nodes.getPtrNoCheck(id).data;
} else return null;
}
pub fn getPtrNoCheck(self: Self, id: Index) *T {
return &self.nodes.getPtrNoCheck(id).data;
}
pub fn getNextId(self: Self, id: Index) ?Index {
if (self.nodes.get(id)) |node| {
return node.next;
} else return null;
}
pub fn getNextIdNoCheck(self: Self, id: Index) Index {
return self.nodes.getNoCheck(id).next;
}
pub fn getNextNode(self: Self, id: Index) ?Node {
if (self.getNext(id)) |next| {
return self.getNode(next);
} else return null;
}
pub fn getNextData(self: *const Self, id: Index) ?T {
if (self.getNext(id)) |next| {
return self.get(next);
} else return null;
}
};
}
test "CompactManySinglyLinkedList" {
const Null = CompactNull(u32);
var lists = CompactManySinglyLinkedList(u32, u32, u32).init(t.allocator);
defer lists.deinit();
const list_id = try lists.addListWithHead(10);
const head = lists.getListHead(list_id).?;
// Test detachAfter.
const after = try lists.insertAfter(head, 20);
try t.expectEqual(lists.getNextIdNoCheck(head), after);
try t.expectEqual(lists.detachAfter(head), after);
try t.expectEqual(lists.getNextIdNoCheck(head), Null);
}
/// Reuses deleted ids.
/// Uses a fifo id buffer to get the next id if not empty, otherwise it uses the next id counter.
pub fn CompactIdGenerator(comptime T: type) type {
return struct {
const Self = @This();
start_id: T,
next_default_id: T,
next_ids: std.fifo.LinearFifo(T, .Dynamic),
pub fn init(alloc: std.mem.Allocator, start_id: T) Self {
return .{
.start_id = start_id,
.next_default_id = start_id,
.next_ids = std.fifo.LinearFifo(T, .Dynamic).init(alloc),
};
}
pub fn peekNextId(self: Self) T {
if (self.next_ids.readableLength() == 0) {
return self.next_default_id;
} else {
return self.next_ids.peekItem(0);
}
}
pub fn getNextId(self: *Self) T {
if (self.next_ids.readableLength() == 0) {
defer self.next_default_id += 1;
return self.next_default_id;
} else {
return self.next_ids.readItem().?;
}
}
pub fn clearRetainingCapacity(self: *Self) void {
self.next_default_id = self.start_id;
self.next_ids.head = 0;
self.next_ids.count = 0;
}
pub fn deleteId(self: *Self, id: T) void {
self.next_ids.writeItem(id) catch unreachable;
}
pub fn deinit(self: Self) void {
self.next_ids.deinit();
}
};
}
test "CompactIdGenerator" {
var gen = CompactIdGenerator(u16).init(t.allocator, 1);
defer gen.deinit();
try t.expectEqual(gen.getNextId(), 1);
try t.expectEqual(gen.getNextId(), 2);
gen.deleteId(1);
try t.expectEqual(gen.getNextId(), 1);
try t.expectEqual(gen.getNextId(), 3);
}
/// Holds linked lists in a compact buffer. Does not keep track of list heads.
/// This might replace CompactManySinglyLinkedList.
pub fn CompactSinglyLinkedListBuffer(comptime Id: type, comptime T: type) type {
const Null = comptime CompactNull(Id);
const OptId = Id;
return struct {
const Self = @This();
pub const Node = CompactSinglyLinkedListNode(Id, T);
nodes: CompactUnorderedList(Id, Node),
pub fn init(alloc: std.mem.Allocator) Self {
return .{
.nodes = CompactUnorderedList(Id, Node).init(alloc),
};
}
pub fn deinit(self: Self) void {
self.nodes.deinit();
}
pub fn clearRetainingCapacity(self: *Self) void {
self.nodes.clearRetainingCapacity();
}
pub fn getNode(self: Self, idx: Id) ?Node {
return self.nodes.get(idx);
}
pub fn getNodeNoCheck(self: Self, idx: Id) Node {
return self.nodes.getNoCheck(idx);
}
pub fn getNodePtrNoCheck(self: Self, idx: Id) *Node {
return self.nodes.getPtrNoCheck(idx);
}
pub fn iterator(self: Self) CompactUnorderedList(Id, Node).Iterator {
return self.nodes.iterator();
}
pub fn iterFirstNoCheck(self: Self) Id {
var iter = self.nodes.iterator();
_ = iter.next();
return iter.cur_id;
}
pub fn iterFirstValueNoCheck(self: Self) T {
var iter = self.nodes.iterator();
return iter.next().?.data;
}
pub fn size(self: Self) usize {
return self.nodes.size();
}
pub fn getLast(self: Self, id: Id) ?Id {
if (id == Null) {
return null;
}
if (self.nodes.has(id)) {
var cur = id;
while (cur != Null) {
const next = self.getNextNoCheck(cur);
if (next == Null) {
return cur;
}
cur = next;
}
unreachable;
} else return null;
}
pub fn get(self: Self, id: Id) ?T {
if (self.nodes.has(id)) {
return self.nodes.getNoCheck(id).data;
} else return null;
}
pub fn getNoCheck(self: Self, idx: Id) T {
return self.nodes.getNoCheck(idx).data;
}
pub fn getPtrNoCheck(self: Self, idx: Id) *T {
return &self.nodes.getPtrNoCheck(idx).data;
}
pub fn getNextNoCheck(self: Self, id: Id) OptId {
return self.nodes.getNoCheck(id).next;
}
pub fn getNext(self: Self, id: Id) ?OptId {
if (self.nodes.has(id)) {
return self.nodes.getNoCheck(id).next;
} else return null;
}
/// Adds a new head node.
pub fn add(self: *Self, data: T) !Id {
return try self.nodes.add(.{
.next = Null,
.data = data,
});
}
pub fn insertBeforeHead(self: *Self, head_id: Id, data: T) !Id {
if (self.nodes.has(head_id)) {
return try self.nodes.add(.{
.next = head_id,
.data = data,
});
} else return error.NoElement;
}
pub fn insertBeforeHeadNoCheck(self: *Self, head_id: Id, data: T) !Id {
return try self.nodes.add(.{
.next = head_id,
.data = data,
});
}
pub fn insertAfter(self: *Self, id: Id, data: T) !Id {
if (self.nodes.has(id)) {
const new = try self.nodes.add(.{
.next = self.nodes.getNoCheck(id).next,
.data = data,
});
self.nodes.getPtrNoCheck(id).next = new;
return new;
} else return error.NoElement;
}
pub fn removeAfter(self: *Self, id: Id) !void {
if (self.nodes.has(id)) {
const next = self.getNextNoCheck(id);
if (next != Null) {
const next_next = self.getNextNoCheck(next);
self.nodes.getNoCheck(id).next = next_next;
self.nodes.remove(next);
}
} else return error.NoElement;
}
pub fn removeAssumeNoPrev(self: *Self, id: Id) !void {
if (self.nodes.has(id)) {
self.nodes.remove(id);
} else return error.NoElement;
}
};
}
test "CompactSinglyLinkedListBuffer" {
var buf = CompactSinglyLinkedListBuffer(u32, u32).init(t.allocator);
defer buf.deinit();
const head = try buf.add(1);
try t.expectEqual(buf.get(head).?, 1);
try t.expectEqual(buf.getNoCheck(head), 1);
try t.expectEqual(buf.getNode(head).?.data, 1);
try t.expectEqual(buf.getNodeNoCheck(head).data, 1);
const second = try buf.insertAfter(head, 2);
try t.expectEqual(buf.getNodeNoCheck(head).next, second);
try t.expectEqual(buf.getNoCheck(second), 2);
try buf.removeAssumeNoPrev(head);
try t.expectEqual(buf.get(head), null);
}

File diff suppressed because it is too large Load diff

View file

@ -1,170 +0,0 @@
const std = @import("std");
const ArrayList = std.ArrayList;
const gpu = @import("gpu");
const App = @import("main.zig").App;
const zm = @import("zmath");
const UVData = @import("atlas.zig").UVData;
const Vec2 = @Vector(2, f32);
pub const Vertex = extern struct {
pos: @Vector(4, f32),
uv: Vec2,
};
const VERTEX_ATTRIBUTES = [_]gpu.VertexAttribute{
.{ .format = .float32x4, .offset = @offsetOf(Vertex, "pos"), .shader_location = 0 },
.{ .format = .float32x2, .offset = @offsetOf(Vertex, "uv"), .shader_location = 1 },
};
pub const VERTEX_BUFFER_LAYOUT = gpu.VertexBufferLayout{
.array_stride = @sizeOf(Vertex),
.step_mode = .vertex,
.attribute_count = VERTEX_ATTRIBUTES.len,
.attributes = &VERTEX_ATTRIBUTES,
};
pub const VertexUniform = struct {
mat: zm.Mat,
};
const GkurveType = enum(u32) {
concave = 0,
convex = 1,
filled = 2,
};
pub const FragUniform = extern struct {
type: GkurveType = .filled,
// Padding for struct alignment to 16 bytes (minimum in WebGPU uniform).
padding: @Vector(3, f32) = undefined,
blend_color: @Vector(4, f32) = @Vector(4, f32){ 1, 1, 1, 1 },
};
pub fn equilateralTriangle(app: *App, position: Vec2, scale: f32, uniform: FragUniform, uv_data: UVData) !void {
const triangle_height = scale * @sqrt(0.75);
try app.vertices.appendSlice(&[3]Vertex{
.{ .pos = .{ position[0] + scale / 2, position[1] + triangle_height, 0, 1 }, .uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 0.5, 1 } },
.{ .pos = .{ position[0], position[1], 0, 1 }, .uv = uv_data.bottom_left },
.{ .pos = .{ position[0] + scale, position[1], 0, 1 }, .uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 1, 0 } },
});
try app.fragment_uniform_list.append(uniform);
app.update_vertex_buffer = true;
app.update_frag_uniform_buffer = true;
}
pub fn quad(app: *App, position: Vec2, scale: Vec2, uniform: FragUniform, uv_data: UVData) !void {
const bottom_right_uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 1, 0 };
const up_left_uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 0, 1 };
const up_right_uv = uv_data.bottom_left + uv_data.width_and_height;
try app.vertices.appendSlice(&[6]Vertex{
.{ .pos = .{ position[0], position[1] + scale[1], 0, 1 }, .uv = up_left_uv },
.{ .pos = .{ position[0], position[1], 0, 1 }, .uv = uv_data.bottom_left },
.{ .pos = .{ position[0] + scale[0], position[1], 0, 1 }, .uv = bottom_right_uv },
.{ .pos = .{ position[0] + scale[0], position[1] + scale[1], 0, 1 }, .uv = up_right_uv },
.{ .pos = .{ position[0], position[1] + scale[1], 0, 1 }, .uv = up_left_uv },
.{ .pos = .{ position[0] + scale[0], position[1], 0, 1 }, .uv = bottom_right_uv },
});
try app.fragment_uniform_list.appendSlice(&.{ uniform, uniform });
app.update_vertex_buffer = true;
app.update_frag_uniform_buffer = true;
}
pub fn circle(app: *App, position: Vec2, radius: f32, blend_color: @Vector(4, f32), uv_data: UVData) !void {
const low_mid = Vertex{
.pos = .{ position[0], position[1] - radius, 0, 1 },
.uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 0.5, 0 },
};
const high_mid = Vertex{
.pos = .{ position[0], position[1] + radius, 0, 1 },
.uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 0.5, 1 },
};
const mid_left = Vertex{
.pos = .{ position[0] - radius, position[1], 0, 1 },
.uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 0, 0.5 },
};
const mid_right = Vertex{
.pos = .{ position[0] + radius, position[1], 0, 1 },
.uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 1, 0.5 },
};
const p = 0.95 * radius;
const high_right = Vertex{
.pos = .{ position[0] + p, position[1] + p, 0, 1 },
.uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 1, 0.75 },
};
const high_left = Vertex{
.pos = .{ position[0] - p, position[1] + p, 0, 1 },
.uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 0, 0.75 },
};
const low_right = Vertex{
.pos = .{ position[0] + p, position[1] - p, 0, 1 },
.uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 1, 0.25 },
};
const low_left = Vertex{
.pos = .{ position[0] - p, position[1] - p, 0, 1 },
.uv = uv_data.bottom_left + uv_data.width_and_height * Vec2{ 0, 0.25 },
};
try app.vertices.appendSlice(&[_]Vertex{
low_mid,
mid_right,
high_mid,
high_mid,
mid_left,
low_mid,
low_right,
mid_right,
low_mid,
high_right,
high_mid,
mid_right,
high_left,
mid_left,
high_mid,
low_left,
low_mid,
mid_left,
});
try app.fragment_uniform_list.appendSlice(&[_]FragUniform{
.{
.type = .filled,
.blend_color = blend_color,
},
.{
.type = .filled,
.blend_color = blend_color,
},
.{
.type = .convex,
.blend_color = blend_color,
},
.{
.type = .convex,
.blend_color = blend_color,
},
.{
.type = .convex,
.blend_color = blend_color,
},
.{
.type = .convex,
.blend_color = blend_color,
},
});
app.update_vertex_buffer = true;
app.update_frag_uniform_buffer = true;
}

View file

@ -1,73 +0,0 @@
struct FragUniform {
type_: u32,
padding: vec3<f32>,
blend_color: vec4<f32>,
}
@binding(1) @group(0) var<storage> ubos: array<FragUniform>;
@binding(2) @group(0) var mySampler: sampler;
@binding(3) @group(0) var myTexture: texture_2d<f32>;
@fragment fn main(
@location(0) uv: vec2<f32>,
@interpolate(linear) @location(1) bary: vec2<f32>,
@interpolate(flat) @location(2) triangle_index: u32,
) -> @location(0) vec4<f32> {
// Example 1: Visualize barycentric coordinates:
// return vec4<f32>(bary.x, bary.y, 0.0, 1.0);
// return vec4<f32>(0.0, bary.x, 0.0, 1.0); // [1.0 (bottom-left vertex), 0.0 (bottom-right vertex)]
// return vec4<f32>(0.0, bary.y, 0.0, 1.0); // [1.0 (bottom-left vertex), 1.0 (top-right face)]
// Example 2: Render gkurve primitives
// Concave (inverted quadratic bezier curve)
// inversion = -1.0;
// Convex (inverted quadratic bezier curve)
// inversion = 1.0;
let inversion = select( 1.0, -1.0, ubos[triangle_index].type_ == 1u);
// Texture uvs
// (These two could be cut with vec2(0.0,1.0) + uv * vec2(1.0,-1.0))
var correct_uv = uv;
correct_uv.y = 1.0 - correct_uv.y;
var color = textureSample(myTexture, mySampler, correct_uv) * ubos[triangle_index].blend_color;
// Gradients
let px = dpdx(bary.xy);
let py = dpdy(bary.xy);
// Chain rule
let fx = (2.0 * bary.x) * px.x - px.y;
let fy = (2.0 * bary.x) * py.x - py.y;
// Signed distance
var dist = (bary.x * bary.x - bary.y) / sqrt(fx * fx + fy * fy);
dist *= inversion;
dist /= 300.0;
// Border rendering.
let border_color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
let border_width = 3.0;
let border_smoothing = 1.0;
// if (dist > 0.0 && dist <= 0.1) { return vec4<f32>(1.0, 0.0, 0.0, 1.0); }
// if (dist > 0.2 && dist <= 0.3) { return vec4<f32>(0.0, 0.0, 1.0, 1.0); }
// // Wireframe rendering.
// let right_face_dist = bary.y;
// let bottom_face_dist = bary.x-bary.y;
// let left_face_dist = 1.0 - ((bottom_face_dist*2.0) + bary.y);
// let normal_bary = vec3<f32>(right_face_dist, bottom_face_dist, left_face_dist);
// let fwd = fwidth(normal_bary);
// let w = smoothstep(border_width * fwd, (border_width + border_smoothing) * fwd, normal_bary);
// let width = 1.0 - min(min(w.x, w.y), w.z);
// let epsilon = 0.001;
// if (right_face_dist >= -epsilon && right_face_dist <= width
// || left_face_dist >= -epsilon && left_face_dist <= width
// || bottom_face_dist >= -epsilon && bottom_face_dist <= width) {
// color = mix(color, border_color, width);
// if (dist < 0.0 && ubos[triangle_index].type_ != 2u) {
// return vec4<f32>(border_color.rgb, width);
// }
// }
return color * f32(dist >= 0.0 || ubos[triangle_index].type_ == 2u);
}

View file

@ -1,146 +0,0 @@
//! At the moment we use only rgba32, but maybe it could be useful to use also other types
const std = @import("std");
const ft = @import("freetype");
const zigimg = @import("zigimg");
const Atlas = @import("atlas.zig").Atlas;
const AtlasErr = @import("atlas.zig").Error;
const UVData = @import("atlas.zig").UVData;
const App = @import("main.zig").App;
const draw = @import("draw.zig");
pub const Label = @This();
const Vec2 = @Vector(2, f32);
const Vec4 = @Vector(4, f32);
const GlyphInfo = struct {
uv_data: UVData,
metrics: ft.GlyphMetrics,
};
face: ft.Face,
size: i32,
char_map: std.AutoHashMap(u21, GlyphInfo),
allocator: std.mem.Allocator,
const WriterContext = struct {
label: *Label,
app: *App,
position: Vec2,
text_color: Vec4,
};
const WriterError = ft.Error || std.mem.Allocator.Error || AtlasErr;
const Writer = std.io.Writer(WriterContext, WriterError, write);
pub fn writer(label: *Label, app: *App, position: Vec2, text_color: Vec4) Writer {
return Writer{
.context = .{
.label = label,
.app = app,
.position = position,
.text_color = text_color,
},
};
}
pub fn init(lib: ft.Library, font_path: []const u8, face_index: i32, char_size: i32, allocator: std.mem.Allocator) !Label {
return Label{
.face = try lib.createFace(font_path, face_index),
.size = char_size,
.char_map = std.AutoHashMap(u21, GlyphInfo).init(allocator),
.allocator = allocator,
};
}
pub fn deinit(label: *Label) void {
label.face.deinit();
label.char_map.deinit();
}
fn write(ctx: WriterContext, bytes: []const u8) WriterError!usize {
var offset = Vec2{ 0, 0 };
var j: usize = 0;
while (j < bytes.len) {
const len = std.unicode.utf8ByteSequenceLength(bytes[j]) catch unreachable;
const char = std.unicode.utf8Decode(bytes[j..(j + len)]) catch unreachable;
j += len;
switch (char) {
'\n' => {
offset[0] = 0;
offset[1] -= @intToFloat(f32, ctx.label.face.size().metrics().height >> 6);
},
' ' => {
const v = try ctx.label.char_map.getOrPut(char);
if (!v.found_existing) {
try ctx.label.face.setCharSize(ctx.label.size * 64, 0, 50, 0);
try ctx.label.face.loadChar(char, .{ .render = true });
const glyph = ctx.label.face.glyph();
v.value_ptr.* = GlyphInfo{
.uv_data = undefined,
.metrics = glyph.metrics(),
};
}
offset[0] += @intToFloat(f32, v.value_ptr.metrics.horiAdvance >> 6);
},
else => {
const v = try ctx.label.char_map.getOrPut(char);
if (!v.found_existing) {
try ctx.label.face.setCharSize(ctx.label.size * 64, 0, 50, 0);
try ctx.label.face.loadChar(char, .{ .render = true });
const glyph = ctx.label.face.glyph();
const glyph_bitmap = glyph.bitmap();
const glyph_width = glyph_bitmap.width();
const glyph_height = glyph_bitmap.rows();
// Add 1 pixel padding to texture to avoid bleeding over other textures
var glyph_data = try ctx.label.allocator.alloc(zigimg.color.Rgba32, (glyph_width + 2) * (glyph_height + 2));
defer ctx.label.allocator.free(glyph_data);
const glyph_buffer = glyph_bitmap.buffer().?;
for (glyph_data) |*data, i| {
const x = i % (glyph_width + 2);
const y = i / (glyph_width + 2);
// zig fmt: off
const glyph_col =
if (x == 0 or x == (glyph_width + 1) or y == 0 or y == (glyph_height + 1))
0
else
glyph_buffer[(y - 1) * glyph_width + (x - 1)];
// zig fmt: on
data.* = zigimg.color.Rgba32.initRgb(glyph_col, glyph_col, glyph_col);
}
var glyph_atlas_region = try ctx.app.texture_atlas_data.reserve(ctx.label.allocator, glyph_width + 2, glyph_height + 2);
ctx.app.texture_atlas_data.set(glyph_atlas_region, glyph_data);
glyph_atlas_region.x += 1;
glyph_atlas_region.y += 1;
glyph_atlas_region.width -= 2;
glyph_atlas_region.height -= 2;
const glyph_uv_data = glyph_atlas_region.getUVData(@intToFloat(f32, ctx.app.texture_atlas_data.size));
v.value_ptr.* = GlyphInfo{
.uv_data = glyph_uv_data,
.metrics = glyph.metrics(),
};
}
try draw.quad(
ctx.app,
ctx.position + offset + Vec2{ @intToFloat(f32, v.value_ptr.metrics.horiBearingX >> 6), @intToFloat(f32, (v.value_ptr.metrics.horiBearingY - v.value_ptr.metrics.height) >> 6) },
.{ @intToFloat(f32, v.value_ptr.metrics.width >> 6), @intToFloat(f32, v.value_ptr.metrics.height >> 6) },
.{ .blend_color = ctx.text_color },
v.value_ptr.uv_data,
);
offset[0] += @intToFloat(f32, v.value_ptr.metrics.horiAdvance >> 6);
},
}
}
return bytes.len;
}
pub fn print(label: *Label, app: *App, comptime fmt: []const u8, args: anytype, position: Vec2, text_color: Vec4) !void {
const w = writer(label, app, position, text_color);
try w.print(fmt, args);
}

View file

@ -1,323 +0,0 @@
// TODO:
// - handle textures better with texture atlas
// - handle adding and removing triangles and quads better
const std = @import("std");
const mach = @import("mach");
const gpu = @import("gpu");
const zm = @import("zmath");
const zigimg = @import("zigimg");
const glfw = @import("glfw");
const draw = @import("draw.zig");
const Atlas = @import("atlas.zig").Atlas;
const ft = @import("freetype");
const Label = @import("label.zig");
const ResizableLabel = @import("resizable_label.zig");
pub const App = @This();
const AtlasRGB8 = Atlas(zigimg.color.Rgba32);
pipeline: *gpu.RenderPipeline,
queue: *gpu.Queue,
vertex_buffer: *gpu.Buffer,
vertices: std.ArrayList(draw.Vertex),
update_vertex_buffer: bool,
vertex_uniform_buffer: *gpu.Buffer,
update_vertex_uniform_buffer: bool,
frag_uniform_buffer: *gpu.Buffer,
fragment_uniform_list: std.ArrayList(draw.FragUniform),
update_frag_uniform_buffer: bool,
bind_group: *gpu.BindGroup,
texture_atlas_data: AtlasRGB8,
pub fn init(app: *App, core: *mach.Core) !void {
const queue = core.device.getQueue();
// TODO: Refactor texture atlas size number
app.texture_atlas_data = try AtlasRGB8.init(core.allocator, 1280);
const atlas_size = gpu.Extent3D{ .width = app.texture_atlas_data.size, .height = app.texture_atlas_data.size };
const atlas_float_size = @intToFloat(f32, app.texture_atlas_data.size);
const texture = core.device.createTexture(&.{
.size = atlas_size,
.format = .rgba8_unorm,
.usage = .{
.texture_binding = true,
.copy_dst = true,
.render_attachment = true,
},
});
const data_layout = gpu.Texture.DataLayout{
.bytes_per_row = @intCast(u32, atlas_size.width * 4),
.rows_per_image = @intCast(u32, atlas_size.height),
};
var img = try zigimg.Image.fromMemory(core.allocator, @embedFile("./assets/gotta-go-fast.png"));
defer img.deinit();
const atlas_img_region = try app.texture_atlas_data.reserve(core.allocator, @truncate(u32, img.width), @truncate(u32, img.height));
const img_uv_data = atlas_img_region.getUVData(atlas_float_size);
switch (img.pixels) {
.rgba32 => |pixels| app.texture_atlas_data.set(atlas_img_region, pixels),
.rgb24 => |pixels| {
const data = try rgb24ToRgba32(core.allocator, pixels);
defer data.deinit(core.allocator);
app.texture_atlas_data.set(atlas_img_region, data.rgba32);
},
else => @panic("unsupported image color format"),
}
const white_tex_scale = 80;
var atlas_white_region = try app.texture_atlas_data.reserve(core.allocator, white_tex_scale, white_tex_scale);
atlas_white_region.x += 1;
atlas_white_region.y += 1;
atlas_white_region.width -= 2;
atlas_white_region.height -= 2;
const white_texture_uv_data = atlas_white_region.getUVData(atlas_float_size);
var white_tex_data = try core.allocator.alloc(zigimg.color.Rgba32, white_tex_scale * white_tex_scale);
defer core.allocator.free(white_tex_data);
std.mem.set(zigimg.color.Rgba32, white_tex_data, zigimg.color.Rgba32.initRgb(0xff, 0xff, 0xff));
app.texture_atlas_data.set(atlas_white_region, white_tex_data);
app.vertices = try std.ArrayList(draw.Vertex).initCapacity(core.allocator, 9);
app.fragment_uniform_list = try std.ArrayList(draw.FragUniform).initCapacity(core.allocator, 3);
// Quick test for using freetype
const lib = try ft.Library.init();
defer lib.deinit();
const size_multiplier = 5;
const character = "è";
var label = try Label.init(lib, "libs/freetype/upstream/assets/FiraSans-Regular.ttf", 0, 110 * size_multiplier, core.allocator);
defer label.deinit();
// try label.print(app, "All your game's bases are belong to us èçòà", .{}, @Vector(2, f32){ 0, 420 }, @Vector(4, f32){ 1, 1, 1, 1 });
try label.print(app, character, .{}, @Vector(2, f32){ 50 * size_multiplier, 40 }, @Vector(4, f32){ 1, 1, 1, 1 });
var resizable_label: ResizableLabel = undefined;
try resizable_label.init(lib, "libs/freetype/upstream/assets/FiraSans-Regular.ttf", 0, core.allocator, white_texture_uv_data);
defer resizable_label.deinit();
try resizable_label.print(app, character, .{}, @Vector(4, f32){ 0, 40, 0, 0 }, @Vector(4, f32){ 1, 1, 1, 1 }, 80 * size_multiplier);
queue.writeTexture(
&.{ .texture = texture },
&data_layout,
&.{ .width = app.texture_atlas_data.size, .height = app.texture_atlas_data.size },
app.texture_atlas_data.data,
);
const wsize = core.getWindowSize();
const window_width = @intToFloat(f32, wsize.width);
const window_height = @intToFloat(f32, wsize.height);
const triangle_scale = 250;
_ = window_width;
_ = window_height;
_ = triangle_scale;
_ = img_uv_data;
// try draw.equilateralTriangle(app, .{ window_width / 2, window_height / 2 }, triangle_scale, .{}, img_uv_data);
// try draw.equilateralTriangle(app, .{ window_width / 2, window_height / 2 - triangle_scale }, triangle_scale, .{ .type = .concave }, img_uv_data);
// try draw.equilateralTriangle(app, .{ window_width / 2 - triangle_scale, window_height / 2 - triangle_scale / 2 }, triangle_scale, .{ .type = .convex }, white_texture_uv_data);
// try draw.quad(app, .{ 0, 0 }, .{ 480, 480 }, .{}, .{ .bottom_left = .{ 0, 0 }, .width_and_height = .{ 1, 1 } });
// try draw.circle(app, .{ window_width / 2, window_height / 2 }, window_height / 2 - 10, .{ 0, 0.5, 0.75, 1.0 }, white_texture_uv_data);
const vs_module = core.device.createShaderModuleWGSL("vert.wgsl", @embedFile("vert.wgsl"));
const fs_module = core.device.createShaderModuleWGSL("frag.wgsl", @embedFile("frag.wgsl"));
const blend = gpu.BlendState{
.color = .{
.operation = .add,
.src_factor = .src_alpha,
.dst_factor = .one_minus_src_alpha,
},
.alpha = .{
.operation = .add,
.src_factor = .one,
.dst_factor = .zero,
},
};
const color_target = gpu.ColorTargetState{
.format = core.swap_chain_format,
.blend = &blend,
.write_mask = gpu.ColorWriteMaskFlags.all,
};
const fragment = gpu.FragmentState.init(.{
.module = fs_module,
.entry_point = "main",
.targets = &.{color_target},
});
const vbgle = gpu.BindGroupLayout.Entry.buffer(0, .{ .vertex = true }, .uniform, true, 0);
const fbgle = gpu.BindGroupLayout.Entry.buffer(1, .{ .fragment = true }, .read_only_storage, true, 0);
const sbgle = gpu.BindGroupLayout.Entry.sampler(2, .{ .fragment = true }, .filtering);
const tbgle = gpu.BindGroupLayout.Entry.texture(3, .{ .fragment = true }, .float, .dimension_2d, false);
const bgl = core.device.createBindGroupLayout(
&gpu.BindGroupLayout.Descriptor.init(.{
.entries = &.{ vbgle, fbgle, sbgle, tbgle },
}),
);
const bind_group_layouts = [_]*gpu.BindGroupLayout{bgl};
const pipeline_layout = core.device.createPipelineLayout(&gpu.PipelineLayout.Descriptor.init(.{
.bind_group_layouts = &bind_group_layouts,
}));
const pipeline_descriptor = gpu.RenderPipeline.Descriptor{
.fragment = &fragment,
.layout = pipeline_layout,
.vertex = gpu.VertexState.init(.{
.module = vs_module,
.entry_point = "main",
.buffers = &.{draw.VERTEX_BUFFER_LAYOUT},
}),
};
const vertex_buffer = core.device.createBuffer(&.{
.usage = .{ .copy_dst = true, .vertex = true },
.size = @sizeOf(draw.Vertex) * app.vertices.items.len,
.mapped_at_creation = false,
});
const vertex_uniform_buffer = core.device.createBuffer(&.{
.usage = .{ .copy_dst = true, .uniform = true },
.size = @sizeOf(draw.VertexUniform),
.mapped_at_creation = false,
});
const frag_uniform_buffer = core.device.createBuffer(&.{
.usage = .{ .copy_dst = true, .storage = true },
.size = @sizeOf(draw.FragUniform) * app.fragment_uniform_list.items.len,
.mapped_at_creation = false,
});
const sampler = core.device.createSampler(&.{
// .mag_filter = .linear,
// .min_filter = .linear,
});
const bind_group = core.device.createBindGroup(
&gpu.BindGroup.Descriptor.init(.{
.layout = bgl,
.entries = &.{
gpu.BindGroup.Entry.buffer(0, vertex_uniform_buffer, 0, @sizeOf(draw.VertexUniform)),
gpu.BindGroup.Entry.buffer(1, frag_uniform_buffer, 0, @sizeOf(draw.FragUniform) * app.vertices.items.len / 3),
gpu.BindGroup.Entry.sampler(2, sampler),
gpu.BindGroup.Entry.textureView(3, texture.createView(&gpu.TextureView.Descriptor{ .dimension = .dimension_2d })),
},
}),
);
app.pipeline = core.device.createRenderPipeline(&pipeline_descriptor);
app.queue = queue;
app.vertex_buffer = vertex_buffer;
app.vertex_uniform_buffer = vertex_uniform_buffer;
app.frag_uniform_buffer = frag_uniform_buffer;
app.bind_group = bind_group;
app.update_vertex_buffer = true;
app.update_vertex_uniform_buffer = true;
app.update_frag_uniform_buffer = true;
vs_module.release();
fs_module.release();
pipeline_layout.release();
bgl.release();
}
pub fn deinit(app: *App, core: *mach.Core) void {
app.vertex_buffer.release();
app.vertex_uniform_buffer.release();
app.frag_uniform_buffer.release();
app.bind_group.release();
app.vertices.deinit();
app.fragment_uniform_list.deinit();
app.texture_atlas_data.deinit(core.allocator);
}
pub fn update(app: *App, core: *mach.Core) !void {
while (core.pollEvent()) |event| {
switch (event) {
.key_press => |ev| {
if (ev.key == .space)
core.close();
},
else => {},
}
}
const back_buffer_view = core.swap_chain.?.getCurrentTextureView();
const color_attachment = gpu.RenderPassColorAttachment{
.view = back_buffer_view,
.clear_value = std.mem.zeroes(gpu.Color),
.load_op = .clear,
.store_op = .store,
};
const encoder = core.device.createCommandEncoder(null);
const render_pass_info = gpu.RenderPassDescriptor.init(.{
.color_attachments = &.{color_attachment},
});
{
if (app.update_vertex_buffer) {
encoder.writeBuffer(app.vertex_buffer, 0, app.vertices.items);
app.update_vertex_buffer = false;
}
if (app.update_frag_uniform_buffer) {
encoder.writeBuffer(app.frag_uniform_buffer, 0, app.fragment_uniform_list.items);
app.update_frag_uniform_buffer = false;
}
if (app.update_vertex_uniform_buffer) {
encoder.writeBuffer(app.vertex_uniform_buffer, 0, &[_]draw.VertexUniform{try getVertexUniformBufferObject(core)});
app.update_vertex_uniform_buffer = false;
}
}
const pass = encoder.beginRenderPass(&render_pass_info);
pass.setPipeline(app.pipeline);
pass.setVertexBuffer(0, app.vertex_buffer, 0, @sizeOf(draw.Vertex) * app.vertices.items.len);
pass.setBindGroup(0, app.bind_group, &.{ 0, 0 });
pass.draw(@truncate(u32, app.vertices.items.len), 1, 0, 0);
pass.end();
pass.release();
var command = encoder.finish(null);
encoder.release();
app.queue.submit(&.{command});
command.release();
core.swap_chain.?.present();
back_buffer_view.release();
}
pub fn resize(app: *App, _: *mach.Core, _: u32, _: u32) !void {
app.update_vertex_uniform_buffer = true;
}
fn rgb24ToRgba32(allocator: std.mem.Allocator, in: []zigimg.color.Rgb24) !zigimg.color.PixelStorage {
const out = try zigimg.color.PixelStorage.init(allocator, .rgba32, in.len);
var i: usize = 0;
while (i < in.len) : (i += 1) {
out.rgba32[i] = zigimg.color.Rgba32{ .r = in[i].r, .g = in[i].g, .b = in[i].b, .a = 255 };
}
return out;
}
// Move to draw.zig
pub fn getVertexUniformBufferObject(core: *mach.Core) !draw.VertexUniform {
// Note: We use window width/height here, not framebuffer width/height.
// On e.g. macOS, window size may be 640x480 while framebuffer size may be
// 1280x960 (subpixels.) Doing this lets us use a pixel, not subpixel,
// coordinate system.
const window_size = core.getWindowSize();
const proj = zm.orthographicRh(
@intToFloat(f32, window_size.width),
@intToFloat(f32, window_size.height),
-100,
100,
);
const mvp = zm.mul(proj, zm.translation(-1, -1, 0));
return draw.VertexUniform{
.mat = mvp,
};
}

View file

@ -1,453 +0,0 @@
//! TODO: Refactor the API, maybe use a handle that contains the lib and other things and controls init and deinit of ft.Lib and other things
const std = @import("std");
const ft = @import("freetype");
const zigimg = @import("zigimg");
const Atlas = @import("atlas.zig").Atlas;
const AtlasErr = @import("atlas.zig").Error;
const UVData = @import("atlas.zig").UVData;
const App = @import("main.zig").App;
const draw = @import("draw.zig");
const Vertex = draw.Vertex;
const Tessellator = @import("tessellator.zig").Tessellator;
// If true, show the filled triangles green, the concave beziers blue and the convex ones red
const debug_colors = false;
pub const ResizableLabel = @This();
const Vec2 = @Vector(2, f32);
const Vec4 = @Vector(4, f32);
const VertexList = std.ArrayList(Vertex);
// All the data that a single character needs to be rendered
// TODO: hori/vert advance, write file format
const CharVertices = struct {
filled_vertices: VertexList,
filled_vertices_indices: std.ArrayList(u16),
// Concave vertices belong to the filled_vertices list, so just index them
concave_vertices: std.ArrayList(u16),
// The point outside of the convex bezier, doesn't belong to the filled vertices,
// But the other two points do, so put those in the indices
convex_vertices: VertexList,
convex_vertices_indices: std.ArrayList(u16),
fn deinit(self: CharVertices) void {
self.filled_vertices.deinit();
self.filled_vertices_indices.deinit();
self.concave_vertices.deinit();
self.convex_vertices.deinit();
self.convex_vertices_indices.deinit();
}
};
face: ft.Face,
char_map: std.AutoHashMap(u21, CharVertices),
allocator: std.mem.Allocator,
tessellator: Tessellator,
white_texture: UVData,
// The data that the write function needs
// TODO: move twxture here, don't limit to just white_texture
const WriterContext = struct {
label: *ResizableLabel,
app: *App,
position: Vec4,
text_color: Vec4,
text_size: u32,
};
const WriterError = ft.Error || std.mem.Allocator.Error || AtlasErr;
const Writer = std.io.Writer(WriterContext, WriterError, write);
pub fn writer(label: *ResizableLabel, app: *App, position: Vec4, text_color: Vec4, text_size: u32) Writer {
return Writer{
.context = .{
.label = label,
.app = app,
.position = position,
.text_color = text_color,
.text_size = text_size,
},
};
}
pub fn init(self: *ResizableLabel, lib: ft.Library, font_path: []const u8, face_index: i32, allocator: std.mem.Allocator, white_texture: UVData) !void {
self.* = ResizableLabel{
.face = try lib.createFace(font_path, face_index),
.char_map = std.AutoHashMap(u21, CharVertices).init(allocator),
.allocator = allocator,
.tessellator = undefined,
.white_texture = white_texture,
};
self.tessellator.init(self.allocator);
}
pub fn deinit(label: *ResizableLabel) void {
label.face.deinit();
label.tessellator.deinit();
var iter = label.char_map.valueIterator();
while (iter.next()) |ptr| {
ptr.deinit();
}
label.char_map.deinit();
}
// TODO: handle offsets
// FIXME: many useless allocations for the arraylists
fn write(ctx: WriterContext, bytes: []const u8) WriterError!usize {
var offset = Vec4{ 0, 0, 0, 0 };
var c: usize = 0;
while (c < bytes.len) {
const len = std.unicode.utf8ByteSequenceLength(bytes[c]) catch unreachable;
const char = std.unicode.utf8Decode(bytes[c..(c + len)]) catch unreachable;
c += len;
switch (char) {
'\n' => {
offset[0] = 0;
offset[1] -= @intToFloat(f32, ctx.label.face.size().metrics().height >> 6);
std.debug.todo("New line not implemented yet");
},
' ' => {
std.debug.todo("Space character not implemented yet");
// const v = try ctx.label.char_map.getOrPut(char);
// if (!v.found_existing) {
// try ctx.label.face.setCharSize(ctx.label.size * 64, 0, 50, 0);
// try ctx.label.face.loadChar(char, .{ .render = true });
// const glyph = ctx.label.face.glyph;
// v.value_ptr.* = GlyphInfo{
// .uv_data = undefined,
// .metrics = glyph.metrics(),
// };
// }
// offset[0] += @intToFloat(f32, v.value_ptr.metrics.horiAdvance >> 6);
},
else => {
const v = try ctx.label.char_map.getOrPut(char);
if (!v.found_existing) {
try ctx.label.face.loadChar(char, .{ .no_scale = true, .no_bitmap = true });
const glyph = ctx.label.face.glyph();
// Use a big scale and then scale to the actual text size
const multiplier = 1024 << 6;
const matrix = ft.Matrix{
.xx = 1 * multiplier,
.xy = 0 * multiplier,
.yx = 0 * multiplier,
.yy = 1 * multiplier,
};
glyph.outline().?.transform(matrix);
v.value_ptr.* = CharVertices{
.filled_vertices = VertexList.init(ctx.label.allocator),
.filled_vertices_indices = std.ArrayList(u16).init(ctx.label.allocator),
.concave_vertices = std.ArrayList(u16).init(ctx.label.allocator),
.convex_vertices = VertexList.init(ctx.label.allocator),
.convex_vertices_indices = std.ArrayList(u16).init(ctx.label.allocator),
};
var outline_ctx = OutlineContext{
.outline_verts = std.ArrayList(std.ArrayList(Vec2)).init(ctx.label.allocator),
.inside_verts = std.ArrayList(Vec2).init(ctx.label.allocator),
.concave_vertices = std.ArrayList(Vec2).init(ctx.label.allocator),
.convex_vertices = std.ArrayList(Vec2).init(ctx.label.allocator),
};
defer outline_ctx.outline_verts.deinit();
defer {
for (outline_ctx.outline_verts.items) |*item| {
item.deinit();
}
}
defer outline_ctx.inside_verts.deinit();
defer outline_ctx.concave_vertices.deinit();
defer outline_ctx.convex_vertices.deinit();
const callbacks = ft.Outline.Funcs(*OutlineContext){
.move_to = moveToFunction,
.line_to = lineToFunction,
.conic_to = conicToFunction,
.cubic_to = cubicToFunction,
.shift = 0,
.delta = 0,
};
try ctx.label.face.glyph().outline().?.decompose(&outline_ctx, callbacks);
uniteOutsideAndInsideVertices(&outline_ctx);
// Tessellator.triangulatePolygons() doesn't seem to work, so just
// call triangulatePolygon() for each polygon, and put the results all
// in all_outlines and all_indices
var all_outlines = std.ArrayList(Vec2).init(ctx.label.allocator);
defer all_outlines.deinit();
var all_indices = std.ArrayList(u16).init(ctx.label.allocator);
defer all_indices.deinit();
var idx_offset: u16 = 0;
for (outline_ctx.outline_verts.items) |item| {
ctx.label.tessellator.triangulatePolygon(item.items);
defer ctx.label.tessellator.clearBuffers();
try all_outlines.appendSlice(ctx.label.tessellator.out_verts.items);
for (ctx.label.tessellator.out_idxes.items) |idx| {
try all_indices.append(idx + idx_offset);
}
idx_offset += @intCast(u16, ctx.label.tessellator.out_verts.items.len);
}
for (all_outlines.items) |item| {
// FIXME: The uv_data is wrong, should be pushed up by the lowest a character can be
const vertex_uv = item / @splat(2, @as(f32, 1024 << 6));
const vertex_pos = Vec4{ item[0], item[1], 0, 1 };
try v.value_ptr.filled_vertices.append(Vertex{ .pos = vertex_pos, .uv = vertex_uv });
}
try v.value_ptr.filled_vertices_indices.appendSlice(all_indices.items);
// FIXME: instead of finding the closest vertex and use its index maybe use indices directly in the moveTo,... functions
var i: usize = 0;
while (i < outline_ctx.concave_vertices.items.len) : (i += 1) {
for (all_outlines.items) |item, j| {
const dist = @reduce(.Add, (item - outline_ctx.concave_vertices.items[i]) * (item - outline_ctx.concave_vertices.items[i]));
if (dist < 0.1) {
try v.value_ptr.concave_vertices.append(@truncate(u16, j));
break;
}
}
}
i = 0;
while (i < outline_ctx.convex_vertices.items.len) : (i += 3) {
const vert = outline_ctx.convex_vertices.items[i];
const vertex_uv = vert / @splat(2, @as(f32, 1024 << 6));
const vertex_pos = Vec4{ vert[0], vert[1], 0, 1 };
try v.value_ptr.convex_vertices.append(Vertex{ .pos = vertex_pos, .uv = vertex_uv });
for (all_outlines.items) |item, j| {
const dist1 = @reduce(.Add, (item - outline_ctx.convex_vertices.items[i + 1]) * (item - outline_ctx.convex_vertices.items[i + 1]));
if (dist1 < 0.1) {
try v.value_ptr.convex_vertices_indices.append(@truncate(u16, j));
}
const dist2 = @reduce(.Add, (item - outline_ctx.convex_vertices.items[i + 2]) * (item - outline_ctx.convex_vertices.items[i + 2]));
if (dist2 < 0.1) {
try v.value_ptr.convex_vertices_indices.append(@truncate(u16, j));
}
}
}
ctx.label.tessellator.clearBuffers();
}
// Read the data and apply resizing of pos and uv
var filled_vertices_after_offset = try ctx.label.allocator.alloc(Vertex, v.value_ptr.filled_vertices.items.len);
defer ctx.label.allocator.free(filled_vertices_after_offset);
for (filled_vertices_after_offset) |*vert, i| {
vert.* = v.value_ptr.filled_vertices.items[i];
vert.pos *= Vec4{ @intToFloat(f32, ctx.text_size) / 1024, @intToFloat(f32, ctx.text_size) / 1024, 0, 1 };
vert.pos += ctx.position + offset;
vert.uv = vert.uv * ctx.label.white_texture.width_and_height + ctx.label.white_texture.bottom_left;
}
var actual_filled_vertices_to_use = try ctx.label.allocator.alloc(Vertex, v.value_ptr.filled_vertices_indices.items.len);
defer ctx.label.allocator.free(actual_filled_vertices_to_use);
for (actual_filled_vertices_to_use) |*vert, i| {
vert.* = filled_vertices_after_offset[v.value_ptr.filled_vertices_indices.items[i]];
}
try ctx.app.vertices.appendSlice(actual_filled_vertices_to_use);
if (debug_colors) {
try ctx.app.fragment_uniform_list.appendNTimes(.{ .blend_color = .{ 0, 1, 0, 1 } }, actual_filled_vertices_to_use.len / 3);
} else {
try ctx.app.fragment_uniform_list.appendNTimes(.{ .blend_color = ctx.text_color }, actual_filled_vertices_to_use.len / 3);
}
var convex_vertices_after_offset = try ctx.label.allocator.alloc(Vertex, v.value_ptr.convex_vertices.items.len + v.value_ptr.convex_vertices_indices.items.len);
defer ctx.label.allocator.free(convex_vertices_after_offset);
var j: u16 = 0;
var k: u16 = 0;
while (j < convex_vertices_after_offset.len) : (j += 3) {
convex_vertices_after_offset[j] = v.value_ptr.convex_vertices.items[j / 3];
convex_vertices_after_offset[j].pos *= Vec4{ @intToFloat(f32, ctx.text_size) / 1024, @intToFloat(f32, ctx.text_size) / 1024, 0, 1 };
convex_vertices_after_offset[j].pos += ctx.position + offset;
convex_vertices_after_offset[j].uv = convex_vertices_after_offset[j].uv * ctx.label.white_texture.width_and_height + ctx.label.white_texture.bottom_left;
convex_vertices_after_offset[j + 1] = filled_vertices_after_offset[v.value_ptr.convex_vertices_indices.items[k]];
convex_vertices_after_offset[j + 2] = filled_vertices_after_offset[v.value_ptr.convex_vertices_indices.items[k + 1]];
k += 2;
}
try ctx.app.vertices.appendSlice(convex_vertices_after_offset);
if (debug_colors) {
try ctx.app.fragment_uniform_list.appendNTimes(.{ .type = .convex, .blend_color = .{ 1, 0, 0, 1 } }, convex_vertices_after_offset.len / 3);
} else {
try ctx.app.fragment_uniform_list.appendNTimes(.{ .type = .convex, .blend_color = ctx.text_color }, convex_vertices_after_offset.len / 3);
}
var concave_vertices_after_offset = try ctx.label.allocator.alloc(Vertex, v.value_ptr.concave_vertices.items.len);
defer ctx.label.allocator.free(concave_vertices_after_offset);
for (concave_vertices_after_offset) |*vert, i| {
vert.* = filled_vertices_after_offset[v.value_ptr.concave_vertices.items[i]];
}
try ctx.app.vertices.appendSlice(concave_vertices_after_offset);
if (debug_colors) {
try ctx.app.fragment_uniform_list.appendNTimes(.{ .type = .concave, .blend_color = .{ 0, 0, 1, 1 } }, concave_vertices_after_offset.len / 3);
} else {
try ctx.app.fragment_uniform_list.appendNTimes(.{ .type = .concave, .blend_color = ctx.text_color }, concave_vertices_after_offset.len / 3);
}
ctx.app.update_vertex_buffer = true;
ctx.app.update_frag_uniform_buffer = true;
// offset[0] += @intToFloat(f32, v.value_ptr.metrics.horiAdvance >> 6);
},
}
}
return bytes.len;
}
// First move to initialize the outline, (first point),
// After many Q L or C, we will come back to the first point and then call M again if we need to hollow
// On the second M, we instead use an L to connect the first point to the start of the hollow path.
// We then follow like normal and at the end of the hollow path we use another L to close the path.
// This is basically how an o would be drawn, each ... character is a Vertex
// --------
// | |
// | |
// | |
// | ---- |
// - | | Consider the vertices here and below to be at the same height, they are coincident
// - | |
// | ---- |
// | |
// | |
// | |
// --------
const OutlineContext = struct {
// There may be more than one polygon (for example with 'i' we have the polygon of the base and another for the circle)
outline_verts: std.ArrayList(std.ArrayList(Vec2)),
// The internal outline, used for carving the shape (for example in a, we would first get the outline of the a, but if we stopped there, it woul
// be filled, so we need another outline for carving the filled polygon)
inside_verts: std.ArrayList(Vec2),
// For the concave and convex beziers
concave_vertices: std.ArrayList(Vec2),
convex_vertices: std.ArrayList(Vec2),
};
// If there are elements in inside_verts, unite them with the outline_verts, effectively carving the shape
fn uniteOutsideAndInsideVertices(ctx: *OutlineContext) void {
if (ctx.inside_verts.items.len != 0) {
// Check which point of outline is closer to the first of inside
var last_outline = &ctx.outline_verts.items[ctx.outline_verts.items.len - 1];
const closest_to_inside: usize = blk: {
const first_point_inside = ctx.inside_verts.items[0];
var min: f32 = std.math.f32_max;
var closest_index: usize = undefined;
for (last_outline.items) |item, i| {
const dist = @reduce(.Add, (item - first_point_inside) * (item - first_point_inside));
if (dist < min) {
min = dist;
closest_index = i;
}
}
break :blk closest_index;
};
ctx.inside_verts.append(last_outline.items[closest_to_inside]) catch unreachable;
last_outline.insertSlice(closest_to_inside + 1, ctx.inside_verts.items) catch unreachable;
ctx.inside_verts.clearRetainingCapacity();
}
}
// TODO: Return also allocation error
fn moveToFunction(ctx: *OutlineContext, _to: ft.Vector) ft.Error!void {
uniteOutsideAndInsideVertices(ctx);
const to = Vec2{ @intToFloat(f32, _to.x), @intToFloat(f32, _to.y) };
// To check wether a point is carving a polygon,
// Cast a ray to the right of the point and check
// when this ray intersects the edges of the polygons,
// if the number of intersections is odd -> inside,
// if it's even -> outside
var new_point_is_inside = false;
for (ctx.outline_verts.items) |polygon| {
var i: usize = 1;
while (i < polygon.items.len) : (i += 1) {
const v1 = polygon.items[i - 1];
const v2 = polygon.items[i];
const min_y = @minimum(v1[1], v2[1]);
const max_y = @maximum(v1[1], v2[1]);
const min_x = @minimum(v1[0], v2[0]);
// If the point is at the same y as another, it may be counted twice,
// That's why we add the last !=
if (to[1] >= min_y and to[1] <= max_y and to[0] >= min_x and to[1] != v2[1]) {
new_point_is_inside = !new_point_is_inside;
}
}
}
// If the point is inside, put it in the inside verts
if (new_point_is_inside) {
ctx.inside_verts.append(to) catch unreachable;
} else {
// Otherwise create a new polygon
var new_outline_list = std.ArrayList(Vec2).init(ctx.outline_verts.allocator);
new_outline_list.append(to) catch unreachable;
ctx.outline_verts.append(new_outline_list) catch unreachable;
}
}
fn lineToFunction(ctx: *OutlineContext, to: ft.Vector) ft.Error!void {
// std.log.info("L {} {}", .{ to.x, to.y });
// If inside_verts is not empty, we need to fill it
if (ctx.inside_verts.items.len != 0) {
ctx.inside_verts.append(.{ @intToFloat(f32, to.x), @intToFloat(f32, to.y) }) catch unreachable;
} else {
// Otherwise append the new point to the last polygon
ctx.outline_verts.items[ctx.outline_verts.items.len - 1].append(.{ @intToFloat(f32, to.x), @intToFloat(f32, to.y) }) catch unreachable;
}
}
fn conicToFunction(ctx: *OutlineContext, _control: ft.Vector, _to: ft.Vector) ft.Error!void {
// std.log.info("C {} {} {} {}", .{ control.x, control.y, to.x, to.y });
const control = Vec2{ @intToFloat(f32, _control.x), @intToFloat(f32, _control.y) };
const to = Vec2{ @intToFloat(f32, _to.x), @intToFloat(f32, _to.y) };
// Either the inside verts or the outine ones
var verts_to_write = if (ctx.inside_verts.items.len != 0) &ctx.inside_verts else &ctx.outline_verts.items[ctx.outline_verts.items.len - 1];
const previous_point = verts_to_write.items[verts_to_write.items.len - 1];
const vertices = [_]Vec2{ control, to, previous_point };
const vec1 = control - previous_point;
const vec2 = to - control;
// if ccw, it's concave, else it's convex
if ((vec1[0] * vec2[1] - vec1[1] * vec2[0]) > 0) {
ctx.concave_vertices.appendSlice(&vertices) catch unreachable;
verts_to_write.append(control) catch unreachable;
} else {
ctx.convex_vertices.appendSlice(&vertices) catch unreachable;
}
verts_to_write.append(to) catch unreachable;
}
// Doesn't seem to be used much
fn cubicToFunction(ctx: *OutlineContext, control_0: ft.Vector, control_1: ft.Vector, to: ft.Vector) ft.Error!void {
_ = ctx;
_ = control_0;
_ = control_1;
_ = to;
@panic("TODO: search how to approximate cubic bezier with quadratic ones");
}
pub fn print(label: *ResizableLabel, app: *App, comptime fmt: []const u8, args: anytype, position: Vec4, text_color: Vec4, text_size: u32) !void {
const w = writer(label, app, position, text_color, text_size);
try w.print(fmt, args);
}

File diff suppressed because it is too large Load diff

View file

@ -1,314 +0,0 @@
// Copied from zig/src/tracy.zig
const std = @import("std");
const builtin = @import("builtin");
// TODO: integrate with tracy?
// const build_options = @import("build_options");
// pub const enable = if (builtin.is_test) false else build_options.enable_tracy;
// pub const enable_allocation = enable and build_options.enable_tracy_allocation;
// pub const enable_callstack = enable and build_options.enable_tracy_callstack;
pub const enable = false;
pub const enable_allocation = enable and false;
pub const enable_callstack = enable and false;
// TODO: make this configurable
const callstack_depth = 10;
const ___tracy_c_zone_context = extern struct {
id: u32,
active: c_int,
pub inline fn end(self: @This()) void {
___tracy_emit_zone_end(self);
}
pub inline fn addText(self: @This(), text: []const u8) void {
___tracy_emit_zone_text(self, text.ptr, text.len);
}
pub inline fn setName(self: @This(), name: []const u8) void {
___tracy_emit_zone_name(self, name.ptr, name.len);
}
pub inline fn setColor(self: @This(), color: u32) void {
___tracy_emit_zone_color(self, color);
}
pub inline fn setValue(self: @This(), value: u64) void {
___tracy_emit_zone_value(self, value);
}
};
pub const Ctx = if (enable) ___tracy_c_zone_context else struct {
pub inline fn end(self: @This()) void {
_ = self;
}
pub inline fn addText(self: @This(), text: []const u8) void {
_ = self;
_ = text;
}
pub inline fn setName(self: @This(), name: []const u8) void {
_ = self;
_ = name;
}
pub inline fn setColor(self: @This(), color: u32) void {
_ = self;
_ = color;
}
pub inline fn setValue(self: @This(), value: u64) void {
_ = self;
_ = value;
}
};
pub inline fn trace(comptime src: std.builtin.SourceLocation) Ctx {
if (!enable) return .{};
if (enable_callstack) {
return ___tracy_emit_zone_begin_callstack(&.{
.name = null,
.function = src.fn_name.ptr,
.file = src.file.ptr,
.line = src.line,
.color = 0,
}, callstack_depth, 1);
} else {
return ___tracy_emit_zone_begin(&.{
.name = null,
.function = src.fn_name.ptr,
.file = src.file.ptr,
.line = src.line,
.color = 0,
}, 1);
}
}
pub inline fn traceNamed(comptime src: std.builtin.SourceLocation, comptime name: [:0]const u8) Ctx {
if (!enable) return .{};
if (enable_callstack) {
return ___tracy_emit_zone_begin_callstack(&.{
.name = name.ptr,
.function = src.fn_name.ptr,
.file = src.file.ptr,
.line = src.line,
.color = 0,
}, callstack_depth, 1);
} else {
return ___tracy_emit_zone_begin(&.{
.name = name.ptr,
.function = src.fn_name.ptr,
.file = src.file.ptr,
.line = src.line,
.color = 0,
}, 1);
}
}
pub fn tracyAllocator(allocator: std.mem.Allocator) TracyAllocator(null) {
return TracyAllocator(null).init(allocator);
}
pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
return struct {
parent_allocator: std.mem.Allocator,
const Self = @This();
pub fn init(parent_allocator: std.mem.Allocator) Self {
return .{
.parent_allocator = parent_allocator,
};
}
pub fn allocator(self: *Self) std.mem.Allocator {
return std.mem.Allocator.init(self, allocFn, resizeFn, freeFn);
}
fn allocFn(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ret_addr);
if (result) |data| {
if (data.len != 0) {
if (name) |n| {
allocNamed(data.ptr, data.len, n);
} else {
alloc(data.ptr, data.len);
}
}
} else |_| {
messageColor("allocation failed", 0xFF0000);
}
return result;
}
fn resizeFn(self: *Self, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ret_addr)) |resized_len| {
if (name) |n| {
freeNamed(buf.ptr, n);
allocNamed(buf.ptr, resized_len, n);
} else {
free(buf.ptr);
alloc(buf.ptr, resized_len);
}
return resized_len;
}
// during normal operation the compiler hits this case thousands of times due to this
// emitting messages for it is both slow and causes clutter
return null;
}
fn freeFn(self: *Self, buf: []u8, buf_align: u29, ret_addr: usize) void {
self.parent_allocator.rawFree(buf, buf_align, ret_addr);
// this condition is to handle free being called on an empty slice that was never even allocated
// example case: `std.process.getSelfExeSharedLibPaths` can return `&[_][:0]u8{}`
if (buf.len != 0) {
if (name) |n| {
freeNamed(buf.ptr, n);
} else {
free(buf.ptr);
}
}
}
};
}
// This function only accepts comptime known strings, see `messageCopy` for runtime strings
pub inline fn message(comptime msg: [:0]const u8) void {
if (!enable) return;
___tracy_emit_messageL(msg.ptr, if (enable_callstack) callstack_depth else 0);
}
// This function only accepts comptime known strings, see `messageColorCopy` for runtime strings
pub inline fn messageColor(comptime msg: [:0]const u8, color: u32) void {
if (!enable) return;
___tracy_emit_messageLC(msg.ptr, color, if (enable_callstack) callstack_depth else 0);
}
pub inline fn messageCopy(msg: []const u8) void {
if (!enable) return;
___tracy_emit_message(msg.ptr, msg.len, if (enable_callstack) callstack_depth else 0);
}
pub inline fn messageColorCopy(msg: [:0]const u8, color: u32) void {
if (!enable) return;
___tracy_emit_messageC(msg.ptr, msg.len, color, if (enable_callstack) callstack_depth else 0);
}
pub inline fn frameMark() void {
if (!enable) return;
___tracy_emit_frame_mark(null);
}
pub inline fn frameMarkNamed(comptime name: [:0]const u8) void {
if (!enable) return;
___tracy_emit_frame_mark(name.ptr);
}
pub inline fn namedFrame(comptime name: [:0]const u8) Frame(name) {
frameMarkStart(name);
return .{};
}
pub fn Frame(comptime name: [:0]const u8) type {
return struct {
pub fn end(_: @This()) void {
frameMarkEnd(name);
}
};
}
inline fn frameMarkStart(comptime name: [:0]const u8) void {
if (!enable) return;
___tracy_emit_frame_mark_start(name.ptr);
}
inline fn frameMarkEnd(comptime name: [:0]const u8) void {
if (!enable) return;
___tracy_emit_frame_mark_end(name.ptr);
}
extern fn ___tracy_emit_frame_mark_start(name: [*:0]const u8) void;
extern fn ___tracy_emit_frame_mark_end(name: [*:0]const u8) void;
inline fn alloc(ptr: [*]u8, len: usize) void {
if (!enable) return;
if (enable_callstack) {
___tracy_emit_memory_alloc_callstack(ptr, len, callstack_depth, 0);
} else {
___tracy_emit_memory_alloc(ptr, len, 0);
}
}
inline fn allocNamed(ptr: [*]u8, len: usize, comptime name: [:0]const u8) void {
if (!enable) return;
if (enable_callstack) {
___tracy_emit_memory_alloc_callstack_named(ptr, len, callstack_depth, 0, name.ptr);
} else {
___tracy_emit_memory_alloc_named(ptr, len, 0, name.ptr);
}
}
inline fn free(ptr: [*]u8) void {
if (!enable) return;
if (enable_callstack) {
___tracy_emit_memory_free_callstack(ptr, callstack_depth, 0);
} else {
___tracy_emit_memory_free(ptr, 0);
}
}
inline fn freeNamed(ptr: [*]u8, comptime name: [:0]const u8) void {
if (!enable) return;
if (enable_callstack) {
___tracy_emit_memory_free_callstack_named(ptr, callstack_depth, 0, name.ptr);
} else {
___tracy_emit_memory_free_named(ptr, 0, name.ptr);
}
}
extern fn ___tracy_emit_zone_begin(
srcloc: *const ___tracy_source_location_data,
active: c_int,
) ___tracy_c_zone_context;
extern fn ___tracy_emit_zone_begin_callstack(
srcloc: *const ___tracy_source_location_data,
depth: c_int,
active: c_int,
) ___tracy_c_zone_context;
extern fn ___tracy_emit_zone_text(ctx: ___tracy_c_zone_context, txt: [*]const u8, size: usize) void;
extern fn ___tracy_emit_zone_name(ctx: ___tracy_c_zone_context, txt: [*]const u8, size: usize) void;
extern fn ___tracy_emit_zone_color(ctx: ___tracy_c_zone_context, color: u32) void;
extern fn ___tracy_emit_zone_value(ctx: ___tracy_c_zone_context, value: u64) void;
extern fn ___tracy_emit_zone_end(ctx: ___tracy_c_zone_context) void;
extern fn ___tracy_emit_memory_alloc(ptr: *const anyopaque, size: usize, secure: c_int) void;
extern fn ___tracy_emit_memory_alloc_callstack(ptr: *const anyopaque, size: usize, depth: c_int, secure: c_int) void;
extern fn ___tracy_emit_memory_free(ptr: *const anyopaque, secure: c_int) void;
extern fn ___tracy_emit_memory_free_callstack(ptr: *const anyopaque, depth: c_int, secure: c_int) void;
extern fn ___tracy_emit_memory_alloc_named(ptr: *const anyopaque, size: usize, secure: c_int, name: [*:0]const u8) void;
extern fn ___tracy_emit_memory_alloc_callstack_named(ptr: *const anyopaque, size: usize, depth: c_int, secure: c_int, name: [*:0]const u8) void;
extern fn ___tracy_emit_memory_free_named(ptr: *const anyopaque, secure: c_int, name: [*:0]const u8) void;
extern fn ___tracy_emit_memory_free_callstack_named(ptr: *const anyopaque, depth: c_int, secure: c_int, name: [*:0]const u8) void;
extern fn ___tracy_emit_message(txt: [*]const u8, size: usize, callstack: c_int) void;
extern fn ___tracy_emit_messageL(txt: [*:0]const u8, callstack: c_int) void;
extern fn ___tracy_emit_messageC(txt: [*]const u8, size: usize, color: u32, callstack: c_int) void;
extern fn ___tracy_emit_messageLC(txt: [*:0]const u8, color: u32, callstack: c_int) void;
extern fn ___tracy_emit_frame_mark(name: ?[*:0]const u8) void;
const ___tracy_source_location_data = extern struct {
name: ?[*:0]const u8,
function: [*:0]const u8,
file: [*:0]const u8,
line: u32,
color: u32,
};

View file

@ -1,40 +0,0 @@
struct VertexUniform {
matrix: mat4x4<f32>,
}
@binding(0) @group(0) var<uniform> ubo: VertexUniform;
struct VertexOut {
@builtin(position) position_clip: vec4<f32>,
@location(0) frag_uv: vec2<f32>,
@interpolate(linear) @location(1) frag_bary: vec2<f32>,
@interpolate(flat) @location(2) triangle_index: u32,
}
@vertex fn main(
@builtin(vertex_index) vertex_index: u32,
@location(0) position: vec4<f32>,
@location(1) uv: vec2<f32>,
) -> VertexOut {
var output : VertexOut;
output.position_clip = ubo.matrix * position;
output.frag_uv = uv;
// Generates [0.0, 0.0], [0.5, 0.0], [1.0, 1.0]
//
// Equal to:
//
// if ((vertex_index+1u) % 3u == 0u) {
// output.frag_bary = vec2<f32>(0.0, 0.0);
// } else if ((vertex_index+1u) % 3u == 1u) {
// output.frag_bary = vec2<f32>(0.5, 0.0);
// } else {
// output.frag_bary = vec2<f32>(1.0, 1.0);
// }
//
output.frag_bary = vec2<f32>(
f32((vertex_index+1u) % 3u) * 0.5,
1.0 - f32((((vertex_index + 3u) % 3u) + 1u) % 2u),
);
output.triangle_index = vertex_index / 3u;
return output;
}