module: object recycling
Signed-off-by: Stephen Gutekanst <stephen@hexops.com>
This commit is contained in:
parent
8054d03b4d
commit
9d134dc72d
15 changed files with 396 additions and 90 deletions
|
|
@ -233,12 +233,12 @@ pub fn init(core: *Core) !void {
|
|||
try core.input.start();
|
||||
}
|
||||
|
||||
pub fn tick(core: *Core, core_mod: mach.Functions(Core)) void {
|
||||
pub fn tick(core: *Core, core_mod: mach.Mod(Core)) void {
|
||||
core_mod.run(core.on_tick.?);
|
||||
core_mod.call(.presentFrame);
|
||||
}
|
||||
|
||||
pub fn main(core: *Core, core_mod: mach.Functions(Core)) !void {
|
||||
pub fn main(core: *Core, core_mod: mach.Mod(Core)) !void {
|
||||
if (core.on_tick == null) @panic("core.on_tick callback must be set");
|
||||
if (core.on_exit == null) @panic("core.on_exit callback must be set");
|
||||
|
||||
|
|
@ -277,7 +277,7 @@ pub fn main(core: *Core, core_mod: mach.Functions(Core)) !void {
|
|||
}
|
||||
}
|
||||
|
||||
fn platform_update_callback(core: *Core, core_mod: mach.Functions(Core)) !bool {
|
||||
fn platform_update_callback(core: *Core, core_mod: mach.Mod(Core)) !bool {
|
||||
core_mod.run(core.on_tick.?);
|
||||
core_mod.call(.presentFrame);
|
||||
|
||||
|
|
@ -563,7 +563,7 @@ pub fn mousePosition(core: *@This()) Position {
|
|||
// return core.platform.nativeWindowWin32();
|
||||
// }
|
||||
|
||||
pub fn presentFrame(core: *Core, core_mod: mach.Functions(Core)) !void {
|
||||
pub fn presentFrame(core: *Core, core_mod: mach.Mod(Core)) !void {
|
||||
// TODO(object)(window-title)
|
||||
// // Update windows title
|
||||
// var num_windows: usize = 0;
|
||||
|
|
|
|||
108
src/StringTable.zig
Normal file
108
src/StringTable.zig
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
//! Stores null-terminated strings and maps them to unique 32-bit indices.
|
||||
//!
|
||||
//! Lookups are omnidirectional: both (string -> index) and (index -> string) are supported
|
||||
//! operations.
|
||||
//!
|
||||
//! The implementation is based on:
|
||||
//! https://zig.news/andrewrk/how-to-use-hash-map-contexts-to-save-memory-when-doing-a-string-table-3l33
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
const StringTable = @This();
|
||||
|
||||
string_bytes: std.ArrayListUnmanaged(u8) = .{},
|
||||
|
||||
/// Key is string_bytes index.
|
||||
string_table: std.HashMapUnmanaged(u32, void, IndexContext, std.hash_map.default_max_load_percentage) = .{},
|
||||
|
||||
pub const Index = u32;
|
||||
|
||||
/// Returns the index of a string key, if it exists
|
||||
/// complexity: hashmap lookup
|
||||
pub fn index(table: *StringTable, key: []const u8) ?Index {
|
||||
const slice_context: SliceAdapter = .{ .string_bytes = &table.string_bytes };
|
||||
const found_entry = table.string_table.getEntryAdapted(key, slice_context);
|
||||
if (found_entry) |e| return e.key_ptr.*;
|
||||
return null;
|
||||
}
|
||||
|
||||
/// Returns the index of a string key, inserting if not exists
|
||||
/// complexity: hashmap lookup / update
|
||||
pub fn indexOrPut(table: *StringTable, allocator: std.mem.Allocator, key: []const u8) !Index {
|
||||
const slice_context: SliceAdapter = .{ .string_bytes = &table.string_bytes };
|
||||
const index_context: IndexContext = .{ .string_bytes = &table.string_bytes };
|
||||
const entry = try table.string_table.getOrPutContextAdapted(allocator, key, slice_context, index_context);
|
||||
if (!entry.found_existing) {
|
||||
entry.key_ptr.* = @intCast(table.string_bytes.items.len);
|
||||
try table.string_bytes.appendSlice(allocator, key);
|
||||
try table.string_bytes.append(allocator, '\x00');
|
||||
}
|
||||
return entry.key_ptr.*;
|
||||
}
|
||||
|
||||
/// Returns a null-terminated string given the index
|
||||
/// complexity: O(1)
|
||||
pub fn string(table: *StringTable, idx: Index) [:0]const u8 {
|
||||
return std.mem.span(@as([*:0]const u8, @ptrCast(table.string_bytes.items.ptr)) + idx);
|
||||
}
|
||||
|
||||
pub fn deinit(table: *StringTable, allocator: std.mem.Allocator) void {
|
||||
table.string_bytes.deinit(allocator);
|
||||
table.string_table.deinit(allocator);
|
||||
}
|
||||
|
||||
const IndexContext = struct {
|
||||
string_bytes: *std.ArrayListUnmanaged(u8),
|
||||
|
||||
pub fn eql(ctx: IndexContext, a: u32, b: u32) bool {
|
||||
_ = ctx;
|
||||
return a == b;
|
||||
}
|
||||
|
||||
pub fn hash(ctx: IndexContext, x: u32) u64 {
|
||||
const x_slice = std.mem.span(@as([*:0]const u8, @ptrCast(ctx.string_bytes.items.ptr)) + x);
|
||||
return std.hash_map.hashString(x_slice);
|
||||
}
|
||||
};
|
||||
|
||||
const SliceAdapter = struct {
|
||||
string_bytes: *std.ArrayListUnmanaged(u8),
|
||||
|
||||
pub fn eql(adapter: SliceAdapter, a_slice: []const u8, b: u32) bool {
|
||||
const b_slice = std.mem.span(@as([*:0]const u8, @ptrCast(adapter.string_bytes.items.ptr)) + b);
|
||||
return std.mem.eql(u8, a_slice, b_slice);
|
||||
}
|
||||
|
||||
pub fn hash(adapter: SliceAdapter, adapted_key: []const u8) u64 {
|
||||
_ = adapter;
|
||||
return std.hash_map.hashString(adapted_key);
|
||||
}
|
||||
};
|
||||
|
||||
test {
|
||||
const gpa = std.testing.allocator;
|
||||
|
||||
var table: StringTable = .{};
|
||||
defer table.deinit(gpa);
|
||||
|
||||
const index_context: IndexContext = .{ .string_bytes = &table.string_bytes };
|
||||
_ = index_context;
|
||||
|
||||
// "hello" -> index 0
|
||||
const hello_index = try table.indexOrPut(gpa, "hello");
|
||||
try std.testing.expectEqual(@as(Index, 0), hello_index);
|
||||
|
||||
try std.testing.expectEqual(@as(Index, 6), try table.indexOrPut(gpa, "world"));
|
||||
try std.testing.expectEqual(@as(Index, 12), try table.indexOrPut(gpa, "foo"));
|
||||
try std.testing.expectEqual(@as(Index, 16), try table.indexOrPut(gpa, "bar"));
|
||||
try std.testing.expectEqual(@as(Index, 20), try table.indexOrPut(gpa, "baz"));
|
||||
|
||||
// index 0 -> "hello"
|
||||
try std.testing.expectEqualStrings("hello", table.string(hello_index));
|
||||
|
||||
// Lookup "hello" -> index 0
|
||||
try std.testing.expectEqual(hello_index, table.index("hello").?);
|
||||
|
||||
// Lookup "foobar" -> null
|
||||
try std.testing.expectEqual(@as(?Index, null), table.index("foobar"));
|
||||
}
|
||||
85
src/main.zig
85
src/main.zig
|
|
@ -11,8 +11,10 @@ pub const Core = if (build_options.want_core) @import("Core.zig") else struct {}
|
|||
|
||||
// note: gamemode requires libc on linux
|
||||
pub const gamemode = if (builtin.os.tag != .linux or builtin.link_libc) @import("gamemode.zig");
|
||||
pub const gfx = if (build_options.want_mach) @import("gfx/main.zig") else struct {};
|
||||
pub const Audio = if (build_options.want_sysaudio) @import("Audio.zig") else struct {};
|
||||
// TODO(object)
|
||||
// pub const gfx = if (build_options.want_mach) @import("gfx/main.zig") else struct {};
|
||||
// TODO(object)
|
||||
// pub const Audio = if (build_options.want_sysaudio) @import("Audio.zig") else struct {};
|
||||
pub const math = @import("math/main.zig");
|
||||
pub const testing = @import("testing.zig");
|
||||
pub const time = @import("time/main.zig");
|
||||
|
|
@ -26,77 +28,11 @@ pub const Modules = @import("module.zig").Modules;
|
|||
pub const ModuleID = @import("module.zig").ModuleID;
|
||||
pub const ModuleFunctionID = @import("module.zig").ModuleFunctionID;
|
||||
pub const FunctionID = @import("module.zig").FunctionID;
|
||||
pub const Functions = @import("module.zig").Functions;
|
||||
|
||||
pub const ObjectID = u32;
|
||||
|
||||
pub fn Objects(comptime T: type) type {
|
||||
return struct {
|
||||
internal: struct {
|
||||
allocator: std.mem.Allocator,
|
||||
id_counter: ObjectID = 0,
|
||||
ids: std.AutoArrayHashMapUnmanaged(ObjectID, u32) = .{},
|
||||
data: std.MultiArrayList(T) = .{},
|
||||
},
|
||||
|
||||
pub const IsMachObjects = void;
|
||||
|
||||
// Only iteration, get(i) and set(i) are supported currently.
|
||||
pub const Slice = struct {
|
||||
len: usize,
|
||||
|
||||
internal: std.MultiArrayList(T).Slice,
|
||||
|
||||
pub fn set(s: *Slice, index: usize, elem: T) void {
|
||||
s.internal.set(index, elem);
|
||||
}
|
||||
|
||||
pub fn get(s: Slice, index: usize) T {
|
||||
return s.internal.get(index);
|
||||
}
|
||||
};
|
||||
|
||||
pub fn new(objs: *@This(), value: T) std.mem.Allocator.Error!ObjectID {
|
||||
const allocator = objs.internal.allocator;
|
||||
const ids = &objs.internal.ids;
|
||||
const data = &objs.internal.data;
|
||||
|
||||
const new_index = try data.addOne(allocator);
|
||||
errdefer _ = data.pop();
|
||||
|
||||
const new_object_id = objs.internal.id_counter;
|
||||
try ids.putNoClobber(allocator, new_object_id, @intCast(new_index));
|
||||
objs.internal.id_counter += 1;
|
||||
data.set(new_index, value);
|
||||
return new_object_id;
|
||||
}
|
||||
|
||||
pub fn set(objs: *@This(), id: ObjectID, value: T) void {
|
||||
const ids = &objs.internal.ids;
|
||||
const data = &objs.internal.data;
|
||||
|
||||
const index = ids.get(id) orelse std.debug.panic("invalid object: {any}", .{id});
|
||||
data.set(index, value);
|
||||
}
|
||||
|
||||
pub fn get(objs: *@This(), id: ObjectID) ?T {
|
||||
const ids = &objs.internal.ids;
|
||||
const data = &objs.internal.data;
|
||||
|
||||
const index = ids.get(id) orelse return null;
|
||||
return data.get(index);
|
||||
}
|
||||
|
||||
pub fn slice(objs: *@This()) Slice {
|
||||
return Slice{ .len = objs.internal.data.len, .internal = objs.internal.data };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn Object(comptime T: type) type {
|
||||
return T;
|
||||
}
|
||||
pub const Mod = @import("module.zig").Mod;
|
||||
pub const ObjectID = @import("module.zig").ObjectID;
|
||||
pub const Objects = @import("module.zig").Objects;
|
||||
|
||||
// TODO(object): remove this?
|
||||
pub fn schedule(v: anytype) @TypeOf(v) {
|
||||
return v;
|
||||
}
|
||||
|
|
@ -108,7 +44,10 @@ test {
|
|||
_ = gpu;
|
||||
_ = sysaudio;
|
||||
_ = sysgpu;
|
||||
_ = gfx;
|
||||
// TODO(object)
|
||||
// _ = gfx;
|
||||
// TODO(object)
|
||||
// _ = Audio;
|
||||
_ = math;
|
||||
_ = testing;
|
||||
_ = time;
|
||||
|
|
|
|||
263
src/module.zig
263
src/module.zig
|
|
@ -1,4 +1,249 @@
|
|||
const std = @import("std");
|
||||
const mach = @import("../main.zig");
|
||||
const StringTable = @import("StringTable.zig");
|
||||
|
||||
/// An ID representing a mach object. This is an opaque identifier which effectively encodes:
|
||||
///
|
||||
/// * An array index that can be used to O(1) lookup the actual data / struct fields of the object.
|
||||
/// * The generation (or 'version') of the object, enabling detecting use-after-object-delete in
|
||||
/// many (but not all) cases.
|
||||
/// * Which module the object came from, allowing looking up type information or the module name
|
||||
/// from ID alone.
|
||||
/// * Which list of objects in a module the object came from, allowing looking up type information
|
||||
/// or the object type name - which enables debugging and type safety when passing opaque IDs
|
||||
/// around.
|
||||
///
|
||||
pub const ObjectID = u64;
|
||||
|
||||
const ObjectTypeID = u16;
|
||||
|
||||
const PackedObjectTypeID = packed struct(u16) {
|
||||
// 2^10 (1024) modules in an application
|
||||
module_name_id: u10,
|
||||
// 2^6 (64) lists of objects per module
|
||||
object_name_id: u6,
|
||||
};
|
||||
|
||||
pub fn Objects(comptime T: type) type {
|
||||
return struct {
|
||||
internal: struct {
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
/// Mutex to be held when operating on these objects.
|
||||
mu: std.Thread.Mutex = .{},
|
||||
|
||||
/// A registered ID indicating the type of objects being represented. This can be
|
||||
/// thought of as a hash of the module name + field name where this objects list is
|
||||
/// stored.
|
||||
type_id: ObjectTypeID,
|
||||
|
||||
/// The actual object data
|
||||
data: std.MultiArrayList(T) = .{},
|
||||
|
||||
/// Whether a given slot in data[i] is dead or not
|
||||
dead: std.bit_set.DynamicBitSetUnmanaged = .{},
|
||||
|
||||
/// The current generation number of data[i], when data[i] becomes dead and then alive
|
||||
/// again, this number is incremented by one.
|
||||
generation: std.ArrayListUnmanaged(Generation) = .{},
|
||||
|
||||
/// The recycling bin which tells which data indices are dead and can be reused.
|
||||
recycling_bin: std.ArrayListUnmanaged(Index) = .{},
|
||||
|
||||
/// The number of objects that could not fit in the recycling bin and hence were thrown
|
||||
/// on the floor and forgotten about. This means there are dead items recorded by dead.set(index)
|
||||
/// which aren't in the recycling_bin, and the next call to new() may consider cleaning up.
|
||||
thrown_on_the_floor: u32 = 0,
|
||||
},
|
||||
|
||||
pub const IsMachObjects = void;
|
||||
|
||||
const Generation = u16;
|
||||
const Index = u32;
|
||||
|
||||
const PackedID = packed struct(u64) {
|
||||
type_id: ObjectTypeID,
|
||||
generation: Generation,
|
||||
index: Index,
|
||||
};
|
||||
|
||||
pub const Slice = struct {
|
||||
index: Index,
|
||||
objs: *Objects(T),
|
||||
|
||||
/// Same as Objects(T).set but doesn't employ safety checks
|
||||
pub fn set(objs: *@This(), id: ObjectID, value: T) void {
|
||||
const data = &objs.internal.data;
|
||||
const unpacked: PackedID = @bitCast(id);
|
||||
data.set(unpacked.index, value);
|
||||
}
|
||||
|
||||
/// Same as Objects(T).get but doesn't employ safety checks
|
||||
pub fn get(objs: *@This(), id: ObjectID) ?T {
|
||||
const data = &objs.internal.data;
|
||||
const unpacked: PackedID = @bitCast(id);
|
||||
return data.get(unpacked.index);
|
||||
}
|
||||
|
||||
/// Same as Objects(T).delete but doesn't employ safety checks
|
||||
pub fn delete(objs: *@This(), id: ObjectID) void {
|
||||
const dead = &objs.internal.dead;
|
||||
const recycling_bin = &objs.internal.recycling_bin;
|
||||
|
||||
const unpacked: PackedID = @bitCast(id);
|
||||
if (recycling_bin.items.len < recycling_bin.capacity) {
|
||||
recycling_bin.appendAssumeCapacity(unpacked.index);
|
||||
} else objs.internal.thrown_on_the_floor += 1;
|
||||
|
||||
dead.set(unpacked.index);
|
||||
}
|
||||
|
||||
pub fn next(iter: *Slice) ?ObjectID {
|
||||
const dead = &iter.objs.internal.dead;
|
||||
const generation = &iter.objs.internal.generation;
|
||||
const num_objects = generation.items.len;
|
||||
|
||||
while (true) {
|
||||
if (iter.index == num_objects) {
|
||||
iter.index = 0;
|
||||
return null;
|
||||
}
|
||||
defer iter.index += 1;
|
||||
|
||||
if (!dead.isSet(iter.index)) return @bitCast(PackedID{
|
||||
.generation = generation.items[iter.index],
|
||||
.index = iter.index,
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// Tries to acquire the mutex without blocking the caller's thread.
|
||||
/// Returns `false` if the calling thread would have to block to acquire it.
|
||||
/// Otherwise, returns `true` and the caller should `unlock()` the Mutex to release it.
|
||||
pub fn tryLock(objs: *@This()) bool {
|
||||
return objs.internal.mu.tryLock();
|
||||
}
|
||||
|
||||
/// Acquires the mutex, blocking the caller's thread until it can.
|
||||
/// It is undefined behavior if the mutex is already held by the caller's thread.
|
||||
/// Once acquired, call `unlock()` on the Mutex to release it.
|
||||
pub fn lock(objs: *@This()) void {
|
||||
objs.internal.mu.lock();
|
||||
}
|
||||
|
||||
/// Releases the mutex which was previously acquired with `lock()` or `tryLock()`.
|
||||
/// It is undefined behavior if the mutex is unlocked from a different thread that it was locked from.
|
||||
pub fn unlock(objs: *@This()) void {
|
||||
objs.internal.mu.unlock();
|
||||
}
|
||||
|
||||
pub inline fn new(objs: *@This(), value: T) std.mem.Allocator.Error!ObjectID {
|
||||
const allocator = objs.internal.allocator;
|
||||
const data = &objs.internal.data;
|
||||
const dead = &objs.internal.dead;
|
||||
const generation = &objs.internal.generation;
|
||||
const recycling_bin = &objs.internal.recycling_bin;
|
||||
|
||||
// The recycling bin should always be big enough, but we check at this point if 10% of
|
||||
// all objects have been thrown on the floor. If they have, we find them and grow the
|
||||
// recycling bin to fit them.
|
||||
if (objs.internal.thrown_on_the_floor >= (data.len / 10)) {
|
||||
var iter = dead.iterator(.{});
|
||||
while (iter.next()) |index| try recycling_bin.append(allocator, @intCast(index));
|
||||
objs.internal.thrown_on_the_floor = 0;
|
||||
}
|
||||
|
||||
if (recycling_bin.popOrNull()) |index| {
|
||||
// Reuse a free slot from the recycling bin.
|
||||
dead.unset(index);
|
||||
const gen = generation.items[index] + 1;
|
||||
generation.items[index] = gen;
|
||||
return @bitCast(PackedID{
|
||||
.type_id = objs.internal.type_id,
|
||||
.generation = gen,
|
||||
.index = index,
|
||||
});
|
||||
}
|
||||
|
||||
// Ensure we have space for the new object
|
||||
try data.ensureUnusedCapacity(allocator, 1);
|
||||
try dead.resize(allocator, data.capacity, true);
|
||||
try generation.ensureUnusedCapacity(allocator, 1);
|
||||
|
||||
const index = data.len;
|
||||
data.appendAssumeCapacity(value);
|
||||
dead.unset(index);
|
||||
generation.appendAssumeCapacity(0);
|
||||
return @bitCast(PackedID{
|
||||
.type_id = objs.internal.type_id,
|
||||
.generation = 0,
|
||||
.index = @intCast(index),
|
||||
});
|
||||
}
|
||||
|
||||
pub fn set(objs: *@This(), id: ObjectID, value: T) void {
|
||||
const data = &objs.internal.data;
|
||||
const dead = &objs.internal.dead;
|
||||
const generation = &objs.internal.generation;
|
||||
|
||||
const unpacked: PackedID = @bitCast(id);
|
||||
if (unpacked.generation != generation.items[unpacked.index]) {
|
||||
@panic("mach: set() called with an object that is no longer valid");
|
||||
}
|
||||
if (dead.isSet(unpacked.index)) {
|
||||
@panic("mach: set() called on a dead object");
|
||||
}
|
||||
data.set(unpacked.index, value);
|
||||
}
|
||||
|
||||
pub fn get(objs: *@This(), id: ObjectID) ?T {
|
||||
const data = &objs.internal.data;
|
||||
const dead = &objs.internal.dead;
|
||||
const generation = &objs.internal.generation;
|
||||
|
||||
const unpacked: PackedID = @bitCast(id);
|
||||
if (unpacked.generation != generation.items[unpacked.index]) {
|
||||
@panic("mach: get() called with an object that is no longer valid");
|
||||
}
|
||||
if (dead.isSet(unpacked.index)) {
|
||||
@panic("mach: get() called on a dead object");
|
||||
}
|
||||
return data.get(unpacked.index);
|
||||
}
|
||||
|
||||
pub fn delete(objs: *@This(), id: ObjectID) void {
|
||||
const data = &objs.internal.data;
|
||||
const dead = &objs.internal.dead;
|
||||
const generation = &objs.internal.generation;
|
||||
const recycling_bin = &objs.internal.recycling_bin;
|
||||
|
||||
// TODO(object): decide whether to disable safety checks like this in some conditions,
|
||||
// e.g. in release builds
|
||||
const unpacked: PackedID = @bitCast(id);
|
||||
if (unpacked.generation != generation.items[unpacked.index]) {
|
||||
@panic("mach: delete() called with an object that is no longer valid");
|
||||
}
|
||||
if (dead.isSet(unpacked.index)) {
|
||||
@panic("mach: delete() called on a dead object");
|
||||
}
|
||||
|
||||
if (recycling_bin.items.len < recycling_bin.capacity) {
|
||||
recycling_bin.appendAssumeCapacity(unpacked.index);
|
||||
} else objs.internal.thrown_on_the_floor += 1;
|
||||
|
||||
dead.set(unpacked.index);
|
||||
if (mach.is_debug) data.set(unpacked.index, undefined);
|
||||
}
|
||||
|
||||
pub fn slice(objs: *@This()) Slice {
|
||||
return Slice{
|
||||
.index = 0,
|
||||
.objs = objs,
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Unique identifier for every module in the program, including those only known at runtime.
|
||||
pub const ModuleID = u32;
|
||||
|
|
@ -86,6 +331,9 @@ pub fn Modules(module_lists: anytype) type {
|
|||
|
||||
mods: ModulesByName(modules),
|
||||
|
||||
module_names: StringTable = .{},
|
||||
object_names: StringTable = .{},
|
||||
|
||||
/// Enum describing all declarations for a given comptime-known module.
|
||||
fn ModuleFunctionName(comptime module_name: ModuleName) type {
|
||||
const module = @field(ModuleTypesByName(modules){}, @tagName(module_name));
|
||||
|
|
@ -108,17 +356,28 @@ pub fn Modules(module_lists: anytype) type {
|
|||
});
|
||||
}
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator) @This() {
|
||||
pub fn init(allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() {
|
||||
var m: @This() = .{
|
||||
.mods = undefined,
|
||||
};
|
||||
inline for (@typeInfo(@TypeOf(m.mods)).Struct.fields) |field| {
|
||||
// TODO(objects): module-state-init
|
||||
var mod: @TypeOf(@field(m.mods, field.name)) = undefined;
|
||||
const Mod2 = @TypeOf(@field(m.mods, field.name));
|
||||
var mod: Mod2 = undefined;
|
||||
const module_name_id = try m.module_names.indexOrPut(allocator, @tagName(Mod2.mach_module));
|
||||
inline for (@typeInfo(@TypeOf(mod)).Struct.fields) |mod_field| {
|
||||
if (@typeInfo(mod_field.type) == .Struct and @hasDecl(mod_field.type, "IsMachObjects")) {
|
||||
const object_name_id = try m.module_names.indexOrPut(allocator, mod_field.name);
|
||||
|
||||
// TODO: use packed struct(TypeID) here. Same thing, just get the type from central location
|
||||
const object_type_id: u16 = @bitCast(PackedObjectTypeID{
|
||||
.module_name_id = @intCast(module_name_id),
|
||||
.object_name_id = @intCast(object_name_id),
|
||||
});
|
||||
|
||||
@field(mod, mod_field.name).internal = .{
|
||||
.allocator = allocator,
|
||||
.type_id = object_type_id,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue