feat: error reporting for global reference usage

This commit is contained in:
Brett Broadhurst 2026-03-19 01:54:33 -06:00
parent 47351cd6f9
commit be297047d1
Failed to generate hash of commit
8 changed files with 401 additions and 371 deletions

View file

@ -12,9 +12,9 @@ arena: std.mem.Allocator,
tree: Ast,
ir: Ir,
constants: std.ArrayListUnmanaged(Compilation.Constant) = .empty,
constant_map: std.AutoHashMapUnmanaged(Compilation.Constant, u32) = .empty,
constants_map: std.AutoHashMapUnmanaged(Compilation.Constant, u32) = .empty,
globals_map: std.AutoHashMapUnmanaged(u32, u32) = .empty,
knots: std.ArrayListUnmanaged(Compilation.Knot) = .empty,
globals: std.ArrayListUnmanaged(u32) = .empty,
errors: *std.ArrayListUnmanaged(Compilation.Error),
const InnerError = error{
@ -34,17 +34,8 @@ const Ref = union(enum) {
local: u32,
};
pub fn deinit(sema: *Sema) void {
const gpa = sema.gpa;
sema.constants.deinit(gpa);
sema.constant_map.deinit(gpa);
sema.globals.deinit(gpa);
sema.knots.deinit(gpa);
sema.* = undefined;
}
pub const SrcLoc = struct {
byte_offset: u32,
src_offset: u32,
};
fn fail(
@ -55,7 +46,7 @@ fn fail(
) error{ OutOfMemory, AnalysisFail } {
// TODO: Revisit this
const message = try std.fmt.allocPrint(sema.arena, format, args);
const loc = compile.findLineColumn(sema.tree.source, src.byte_offset);
const loc = compile.findLineColumn(sema.tree.source, src.src_offset);
try sema.errors.append(sema.gpa, .{
.line = loc.line,
.column = loc.column,
@ -65,47 +56,247 @@ fn fail(
return error.AnalysisFail;
}
fn getConstant(sema: *Sema, data: Compilation.Constant) !Ref {
const gpa = sema.gpa;
if (sema.constant_map.get(data)) |index| {
/// Intern a constant.
fn getOrPutConstant(sema: *Sema, data: Compilation.Constant) error{OutOfMemory}!Ref {
if (sema.constants_map.get(data)) |index| {
return .{ .constant = index };
} else {
const gpa = sema.gpa;
const index = sema.constants.items.len;
try sema.constants.append(gpa, data);
try sema.constant_map.put(gpa, data, @intCast(index));
try sema.constants_map.put(gpa, data, @intCast(index));
return .{ .constant = @intCast(index) };
}
}
fn addGlobal(sema: *Sema, name: Ir.NullTerminatedString) !Ref {
const gpa = sema.gpa;
const interned = try sema.getConstant(.{ .string = name });
try sema.globals.append(gpa, interned.constant);
return .{ .global = interned.constant };
/// Intern an integer as a story constant.
fn getOrPutInt(sema: *Sema, value: u64) error{OutOfMemory}!Ref {
return sema.getOrPutConstant(.{ .integer = value });
}
fn getGlobal(sema: *Sema, name: Ir.NullTerminatedString) !?Ref {
const interned = try sema.getConstant(.{ .string = name });
for (sema.ir.globals) |global| {
if (global.name == name) {
return .{ .global = interned.constant };
/// Intern a string as a story constant.
fn getOrPutStr(sema: *Sema, value: Ir.NullTerminatedString) error{OutOfMemory}!Ref {
return sema.getOrPutConstant(.{ .string = value });
}
pub fn deinit(sema: *Sema) void {
const gpa = sema.gpa;
sema.constants.deinit(gpa);
sema.constants_map.deinit(gpa);
sema.globals_map.deinit(gpa);
sema.knots.deinit(gpa);
sema.* = undefined;
}
const Chunk = struct {
sema: *Sema,
knot: *Compilation.Knot,
labels: std.ArrayListUnmanaged(Label) = .empty,
fixups: std.ArrayListUnmanaged(Fixup) = .empty,
inst_map: std.AutoHashMapUnmanaged(Ir.Inst.Index, Ref) = .empty,
constant_map: std.AutoHashMapUnmanaged(u32, u32) = .empty,
const dummy_address = 0xffffffff;
const Label = struct {
code_offset: usize,
};
const Fixup = struct {
mode: enum {
relative,
absolute,
},
label_index: u32,
code_offset: u32,
};
fn deinit(chunk: *Chunk, gpa: std.mem.Allocator) void {
chunk.fixups.deinit(gpa);
chunk.labels.deinit(gpa);
chunk.inst_map.deinit(gpa);
chunk.constant_map.deinit(gpa);
}
fn addByteOp(chunk: *Chunk, op: Story.Opcode) error{OutOfMemory}!Ref {
const gpa = chunk.sema.gpa;
const bytecode = &chunk.knot.bytecode;
const byte_index = bytecode.items.len;
try bytecode.append(gpa, @intFromEnum(op));
return .{ .index = @intCast(byte_index) };
}
fn addConstOp(chunk: *Chunk, op: Story.Opcode, arg: u8) error{OutOfMemory}!Ref {
const gpa = chunk.sema.gpa;
const bytecode = &chunk.knot.bytecode;
const byte_index = bytecode.items.len;
try bytecode.ensureUnusedCapacity(gpa, 2);
bytecode.appendAssumeCapacity(@intFromEnum(op));
bytecode.appendAssumeCapacity(arg);
return .{ .index = @intCast(byte_index) };
}
fn addJumpOp(chunk: *Chunk, op: Story.Opcode) error{OutOfMemory}!Ref {
const gpa = chunk.sema.gpa;
const bytecode = &chunk.knot.bytecode;
try bytecode.ensureUnusedCapacity(gpa, 3);
bytecode.appendAssumeCapacity(@intFromEnum(op));
bytecode.appendAssumeCapacity(0xff);
bytecode.appendAssumeCapacity(0xff);
return .{ .index = @intCast(bytecode.items.len - 2) };
}
fn addFixup(chunk: *Chunk, op: Story.Opcode, label: usize) !void {
const code_ref = try chunk.addJumpOp(op);
return chunk.fixups.append(chunk.sema.gpa, .{
.mode = .relative,
.label_index = @intCast(label),
.code_offset = code_ref.index,
});
}
fn addFixupAbsolute(chunk: *Chunk, op: Story.Opcode, label: usize) !void {
const code_ref = try chunk.addJumpOp(op);
return chunk.fixups.append(chunk.sema.gpa, .{
.mode = .absolute,
.label_index = @intCast(label),
.code_offset = code_ref.index,
});
}
fn addLabel(chunk: *Chunk) error{OutOfMemory}!usize {
const label_index = chunk.labels.items.len;
try chunk.labels.append(chunk.sema.gpa, .{
.code_offset = dummy_address,
});
return label_index;
}
fn setLabel(chunk: *Chunk, label_index: usize) void {
const code_offset = chunk.knot.bytecode.items.len;
assert(label_index <= chunk.labels.items.len);
const label_data = &chunk.labels.items[label_index];
label_data.code_offset = code_offset;
}
fn resolveInst(chunk: *Chunk, ref: Ir.Inst.Ref) Ref {
if (ref.toIndex()) |index| {
return chunk.inst_map.get(index).?;
}
switch (ref) {
.bool_true => return .bool_true,
.bool_false => return .bool_false,
else => return .{ .constant = @intFromEnum(ref) },
}
}
return null;
fn resolveLabels(chunk: *Chunk) !void {
const start_index = 0;
const end_index = chunk.fixups.items.len;
const bytecode = &chunk.knot.bytecode;
for (chunk.fixups.items[start_index..end_index]) |fixup| {
const label = chunk.labels.items[fixup.label_index];
assert(label.code_offset != dummy_address);
const target_offset: usize = switch (fixup.mode) {
.relative => label.code_offset - fixup.code_offset - 2,
.absolute => label.code_offset,
};
if (target_offset >= std.math.maxInt(u16)) {
std.debug.print("Too much code to jump over!\n", .{});
return error.InvalidJump;
}
assert(bytecode.capacity >= label.code_offset + 2);
bytecode.items[fixup.code_offset] = @intCast((target_offset >> 8) & 0xff);
bytecode.items[fixup.code_offset + 1] = @intCast(target_offset & 0xff);
}
}
/// Intern a reference to a global constant within this chunk.
fn getOrPutConstantIndex(chunk: *Chunk, global_index: u32) !u32 {
const gpa = chunk.sema.gpa;
if (chunk.constant_map.get(global_index)) |local_index| return local_index;
const local_index: u32 = @intCast(chunk.knot.constants.items.len);
try chunk.knot.constants.append(gpa, global_index);
try chunk.constant_map.put(gpa, global_index, local_index);
return local_index;
}
fn doLoad(chunk: *Chunk, ref: Ref) InnerError!Ref {
switch (ref) {
.bool_true => return chunk.addByteOp(.true),
.bool_false => return chunk.addByteOp(.false),
.none => return ref,
.constant => |global_index| {
const local_index = try chunk.getOrPutConstantIndex(global_index);
return chunk.addConstOp(.load_const, @intCast(local_index));
},
.global => |global_index| {
const local_index = try chunk.getOrPutConstantIndex(global_index);
return chunk.addConstOp(.load_global, @intCast(local_index));
},
.local => |id| return chunk.addConstOp(.load, @intCast(id)),
.index => return ref,
}
}
};
fn analyzeArithmeticArg(
sema: *Sema,
chunk: *Chunk,
arg: Ref,
arg_src: SrcLoc,
) !void {
switch (arg) {
.global => |index| {
const g = sema.ir.globals[index];
switch (g.tag) {
.variable => {
const name = try sema.getOrPutStr(g.name);
const local_index = try chunk.getOrPutConstantIndex(name.constant);
_ = try chunk.addConstOp(.load_global, @intCast(local_index));
},
.knot => return fail(sema, arg_src, "invalid operand to arithmetic expression", .{}),
}
},
.constant => |index| {
const local_index = try chunk.getOrPutConstantIndex(index);
_ = try chunk.addConstOp(.load_const, @intCast(local_index));
},
else => {},
}
}
fn analyzeDivertTarget(sema: *Sema, chunk: *Chunk, src: SrcLoc, callee: Ref) !Ref {
switch (callee) {
.global => |global_index| {
const g = sema.ir.globals[global_index];
switch (g.tag) {
.knot => {
const name = try sema.getOrPutStr(g.name);
const local_index = try chunk.getOrPutConstantIndex(name.constant);
return chunk.addConstOp(.load_global, @intCast(local_index));
},
.variable => return fail(sema, src, "invalid divert target", .{}),
}
},
else => unreachable,
}
}
fn irInteger(sema: *Sema, inst: Ir.Inst.Index) InnerError!Ref {
const value = sema.ir.instructions[@intFromEnum(inst)].data.int;
return sema.getConstant(.{ .integer = value });
return sema.getOrPutConstant(.{ .integer = value });
}
fn irString(sema: *Sema, inst: Ir.Inst.Index) InnerError!Ref {
const data = sema.ir.instructions[@intFromEnum(inst)].data.str;
return sema.getConstant(.{ .string = data.start });
return sema.getOrPutStr(data.start);
}
fn irUnary(
fn irUnaryOp(
sema: *Sema,
chunk: *Chunk,
inst: Ir.Inst.Index,
@ -113,11 +304,11 @@ fn irUnary(
) InnerError!Ref {
const data = sema.ir.instructions[@intFromEnum(inst)].data.un;
const lhs = chunk.resolveInst(data.lhs);
_ = try chunk.doLoad(lhs);
try sema.analyzeArithmeticArg(chunk, lhs, .{ .src_offset = 0 });
return chunk.addByteOp(op);
}
fn irBinary(
fn irBinaryOp(
sema: *Sema,
chunk: *Chunk,
inst: Ir.Inst.Index,
@ -127,8 +318,8 @@ fn irBinary(
const lhs = chunk.resolveInst(data.lhs);
const rhs = chunk.resolveInst(data.rhs);
_ = try chunk.doLoad(lhs);
_ = try chunk.doLoad(rhs);
try sema.analyzeArithmeticArg(chunk, lhs, .{ .src_offset = 0 });
try sema.analyzeArithmeticArg(chunk, rhs, .{ .src_offset = 0 });
return chunk.addByteOp(op);
}
@ -165,7 +356,7 @@ fn irLoad(sema: *Sema, chunk: *Chunk, inst: Ir.Inst.Index) InnerError!Ref {
fn irCondBr(sema: *Sema, chunk: *Chunk, inst: Ir.Inst.Index) InnerError!Ref {
const data = sema.ir.instructions[@intFromEnum(inst)].data.payload;
const extra = sema.ir.extraData(Ir.Inst.CondBr, data.payload_index);
const extra = sema.ir.extraData(Ir.Inst.CondBr, data.extra_index);
const then_body = sema.ir.bodySlice(extra.end, extra.data.then_body_len);
const else_body = sema.ir.bodySlice(extra.end + then_body.len, extra.data.else_body_len);
const else_label = try chunk.addLabel();
@ -193,14 +384,14 @@ fn irBreak(sema: *Sema, inst: Ir.Inst.Index) InnerError!void {
fn irBlock(sema: *Sema, chunk: *Chunk, inst: Ir.Inst.Index) InnerError!void {
const data = sema.ir.instructions[@intFromEnum(inst)].data.payload;
const extra = sema.ir.extraData(Ir.Inst.Block, data.payload_index);
const extra = sema.ir.extraData(Ir.Inst.Block, data.extra_index);
const body = sema.ir.bodySlice(extra.end, extra.data.body_len);
return blockBodyInner(sema, chunk, body);
}
fn irSwitchBr(sema: *Sema, chunk: *Chunk, inst: Ir.Inst.Index) InnerError!void {
const data = sema.ir.instructions[@intFromEnum(inst)].data.payload;
const extra = sema.ir.extraData(Ir.Inst.SwitchBr, data.payload_index);
const extra = sema.ir.extraData(Ir.Inst.SwitchBr, data.extra_index);
const cases_slice = sema.ir.bodySlice(extra.end, extra.data.cases_len);
var case_labels: std.ArrayListUnmanaged(usize) = .empty;
@ -264,7 +455,7 @@ fn irContentFlush(_: *Sema, chunk: *Chunk, _: Ir.Inst.Index) InnerError!Ref {
fn irChoiceBr(sema: *Sema, chunk: *Chunk, inst: Ir.Inst.Index) InnerError!void {
const data = sema.ir.instructions[@intFromEnum(inst)].data.payload;
const choice_extra = sema.ir.extraData(Ir.Inst.ChoiceBr, data.payload_index);
const choice_extra = sema.ir.extraData(Ir.Inst.ChoiceBr, data.extra_index);
const options_slice = sema.ir.bodySlice(choice_extra.end, choice_extra.data.cases_len);
var branch_labels: std.ArrayListUnmanaged(usize) = .empty;
@ -332,20 +523,13 @@ fn irImplicitRet(_: *Sema, chunk: *Chunk, _: Ir.Inst.Index) InnerError!Ref {
return chunk.addByteOp(.exit);
}
fn resolveGlobal(
sema: *Sema,
byte_offset: u32,
global_name: Ir.NullTerminatedString,
) !Ref {
if (try sema.getGlobal(global_name)) |global| {
return global;
}
return fail(sema, .{ .byte_offset = byte_offset }, "unknown global variable", .{});
}
fn irDeclRef(sema: *Sema, _: *Chunk, inst: Ir.Inst.Index) InnerError!Ref {
const data = sema.ir.instructions[@intFromEnum(inst)].data.str_tok;
return resolveGlobal(sema, data.src_offset, data.start);
const str = try sema.getOrPutStr(data.start);
if (sema.globals_map.get(str.constant)) |global_index| {
return .{ .global = global_index };
}
return fail(sema, .{ .src_offset = data.src_offset }, "unknown global variable", .{});
}
fn irDeclVar(
@ -355,7 +539,7 @@ fn irDeclVar(
inst: Ir.Inst.Index,
) InnerError!void {
const data = sema.ir.instructions[@intFromEnum(inst)].data.payload;
const extra = sema.ir.extraData(Ir.Inst.Block, data.payload_index);
const extra = sema.ir.extraData(Ir.Inst.Block, data.extra_index);
const body = sema.ir.bodySlice(extra.end, extra.data.body_len);
try blockBodyInner(sema, chunk, body);
// FIXME: hack
@ -364,8 +548,8 @@ fn irDeclVar(
const val = chunk.resolveInst(last_inst);
_ = try chunk.doLoad(val);
}
const global = try sema.addGlobal(name);
_ = try chunk.addConstOp(.store_global, @intCast(global.global));
const interned_str = try sema.getOrPutStr(name);
_ = try chunk.addConstOp(.store_global, @intCast(interned_str.constant));
_ = try chunk.addByteOp(.pop);
}
@ -376,7 +560,7 @@ fn irDeclKnot(
) InnerError!void {
const gpa = sema.gpa;
const data = sema.ir.instructions[@intFromEnum(inst)].data.payload;
const extra = sema.ir.extraData(Ir.Inst.Knot, data.payload_index);
const extra = sema.ir.extraData(Ir.Inst.Knot, data.extra_index);
var knot: Compilation.Knot = .{
.name = name_ref,
@ -398,7 +582,7 @@ fn irDeclKnot(
fn irDeclaration(sema: *Sema, parent_chunk: ?*Chunk, inst: Ir.Inst.Index) !void {
const data = sema.ir.instructions[@intFromEnum(inst)].data.payload;
const extra = sema.ir.extraData(Ir.Inst.Declaration, data.payload_index).data;
const extra = sema.ir.extraData(Ir.Inst.Declaration, data.extra_index).data;
const value_data = sema.ir.instructions[@intFromEnum(extra.value)];
switch (value_data.tag) {
.decl_var => try irDeclVar(sema, parent_chunk.?, extra.name, extra.value),
@ -422,16 +606,16 @@ fn irDivert(
.field => Ir.Inst.FieldCall,
};
const data = sema.ir.instructions[@intFromEnum(inst)].data.payload;
const extra = sema.ir.extraData(ExtraType, data.payload_index);
const args_len = extra.data.args_len;
const extra = sema.ir.extraData(ExtraType, data.extra_index);
const body = sema.ir.extra[extra.end..];
const callee = switch (kind) {
.direct => chunk.resolveInst(extra.data.callee),
.field => chunk.resolveInst(extra.data.obj_ptr),
};
_ = try chunk.doLoad(callee);
const callee_src: SrcLoc = .{ .src_offset = data.src_offset };
_ = try analyzeDivertTarget(sema, chunk, callee_src, callee);
const args_len = extra.data.args_len;
var arg_start: u32 = args_len;
var i: u32 = 0;
while (i < args_len) : (i += 1) {
@ -487,23 +671,23 @@ fn blockBodyInner(sema: *Sema, chunk: *Chunk, body: []const Ir.Inst.Index) Inner
continue;
},
.load => try irLoad(sema, chunk, inst),
.add => try irBinary(sema, chunk, inst, .add),
.sub => try irBinary(sema, chunk, inst, .sub),
.mul => try irBinary(sema, chunk, inst, .mul),
.div => try irBinary(sema, chunk, inst, .div),
.mod => try irBinary(sema, chunk, inst, .mod),
.neg => try irUnary(sema, chunk, inst, .neg),
.not => try irUnary(sema, chunk, inst, .not),
.cmp_eq => try irBinary(sema, chunk, inst, .cmp_eq),
.add => try irBinaryOp(sema, chunk, inst, .add),
.sub => try irBinaryOp(sema, chunk, inst, .sub),
.mul => try irBinaryOp(sema, chunk, inst, .mul),
.div => try irBinaryOp(sema, chunk, inst, .div),
.mod => try irBinaryOp(sema, chunk, inst, .mod),
.neg => try irUnaryOp(sema, chunk, inst, .neg),
.not => try irUnaryOp(sema, chunk, inst, .not),
.cmp_eq => try irBinaryOp(sema, chunk, inst, .cmp_eq),
.cmp_neq => blk: {
_ = try irBinary(sema, chunk, inst, .cmp_eq);
_ = try irBinaryOp(sema, chunk, inst, .cmp_eq);
const tmp = try chunk.addByteOp(.not);
break :blk tmp;
},
.cmp_lt => try irBinary(sema, chunk, inst, .cmp_lt),
.cmp_lte => try irBinary(sema, chunk, inst, .cmp_lte),
.cmp_gt => try irBinary(sema, chunk, inst, .cmp_gt),
.cmp_gte => try irBinary(sema, chunk, inst, .cmp_gte),
.cmp_lt => try irBinaryOp(sema, chunk, inst, .cmp_lt),
.cmp_lte => try irBinaryOp(sema, chunk, inst, .cmp_lte),
.cmp_gt => try irBinaryOp(sema, chunk, inst, .cmp_gt),
.cmp_gte => try irBinaryOp(sema, chunk, inst, .cmp_gte),
.decl_ref => try irDeclRef(sema, chunk, inst),
.int => try irInteger(sema, inst),
.str => try irString(sema, inst),
@ -542,162 +726,23 @@ fn blockBodyInner(sema: *Sema, chunk: *Chunk, body: []const Ir.Inst.Index) Inner
pub fn analyzeFile(sema: *Sema, inst: Ir.Inst.Index) InnerError!void {
const data = sema.ir.instructions[@intFromEnum(inst)].data.payload;
const extra = sema.ir.extraData(Ir.Inst.Block, data.payload_index);
const extra = sema.ir.extraData(Ir.Inst.Block, data.extra_index);
const body = sema.ir.bodySlice(extra.end, extra.data.body_len);
// FIXME: We are going to get burned by this if we don't formalize it.
// Adding common constants to the constant pool.
_ = try sema.getConstant(.{ .integer = 0 });
_ = try sema.getConstant(.{ .integer = 1 });
const static_constants = &[_]Compilation.Constant{
.{ .integer = 0 },
.{ .integer = 1 },
};
for (static_constants) |sc| {
_ = try sema.getOrPutConstant(sc);
}
try sema.globals_map.ensureUnusedCapacity(sema.gpa, @intCast(sema.ir.globals.len));
for (sema.ir.globals, 0..) |global, global_index| {
const interned = try sema.getOrPutStr(global.name);
sema.globals_map.putAssumeCapacity(interned.constant, @intCast(global_index));
}
for (body) |body_index| try irDeclaration(sema, null, body_index);
}
const Chunk = struct {
sema: *Sema,
knot: *Compilation.Knot,
labels: std.ArrayListUnmanaged(Label) = .empty,
fixups: std.ArrayListUnmanaged(Fixup) = .empty,
inst_map: std.AutoHashMapUnmanaged(Ir.Inst.Index, Ref) = .empty,
const dummy_address = 0xffffffff;
const Label = struct {
code_offset: usize,
};
const Fixup = struct {
mode: enum {
relative,
absolute,
},
label_index: u32,
code_offset: u32,
};
fn deinit(chunk: *Chunk, gpa: std.mem.Allocator) void {
chunk.fixups.deinit(gpa);
chunk.labels.deinit(gpa);
chunk.inst_map.deinit(gpa);
}
fn addByteOp(chunk: *Chunk, op: Story.Opcode) error{OutOfMemory}!Ref {
const gpa = chunk.sema.gpa;
const bytecode = &chunk.knot.bytecode;
const byte_index = bytecode.items.len;
try bytecode.append(gpa, @intFromEnum(op));
return .{ .index = @intCast(byte_index) };
}
fn addConstOp(chunk: *Chunk, op: Story.Opcode, arg: u8) error{OutOfMemory}!Ref {
const gpa = chunk.sema.gpa;
const bytecode = &chunk.knot.bytecode;
const byte_index = bytecode.items.len;
try bytecode.ensureUnusedCapacity(gpa, 2);
bytecode.appendAssumeCapacity(@intFromEnum(op));
bytecode.appendAssumeCapacity(arg);
return .{ .index = @intCast(byte_index) };
}
fn addJumpOp(chunk: *Chunk, op: Story.Opcode) error{OutOfMemory}!Ref {
const gpa = chunk.sema.gpa;
const bytecode = &chunk.knot.bytecode;
try bytecode.ensureUnusedCapacity(gpa, 3);
bytecode.appendAssumeCapacity(@intFromEnum(op));
bytecode.appendAssumeCapacity(0xff);
bytecode.appendAssumeCapacity(0xff);
return .{ .index = @intCast(bytecode.items.len - 2) };
}
fn resolveInst(chunk: *Chunk, ref: Ir.Inst.Ref) Ref {
if (ref.toIndex()) |index| {
return chunk.inst_map.get(index).?;
}
switch (ref) {
.bool_true => return .bool_true,
.bool_false => return .bool_false,
else => return .{ .constant = @intFromEnum(ref) },
}
}
fn addFixup(chunk: *Chunk, op: Story.Opcode, label: usize) !void {
const code_ref = try chunk.addJumpOp(op);
return chunk.fixups.append(chunk.sema.gpa, .{
.mode = .relative,
.label_index = @intCast(label),
.code_offset = code_ref.index,
});
}
fn addFixupAbsolute(chunk: *Chunk, op: Story.Opcode, label: usize) !void {
const code_ref = try chunk.addJumpOp(op);
return chunk.fixups.append(chunk.sema.gpa, .{
.mode = .absolute,
.label_index = @intCast(label),
.code_offset = code_ref.index,
});
}
fn addLabel(chunk: *Chunk) error{OutOfMemory}!usize {
const label_index = chunk.labels.items.len;
try chunk.labels.append(chunk.sema.gpa, .{
.code_offset = dummy_address,
});
return label_index;
}
fn setLabel(chunk: *Chunk, label_index: usize) void {
const code_offset = chunk.knot.bytecode.items.len;
assert(label_index <= chunk.labels.items.len);
const label_data = &chunk.labels.items[label_index];
label_data.code_offset = code_offset;
}
fn resolveLabels(chunk: *Chunk) !void {
const start_index = 0;
const end_index = chunk.fixups.items.len;
const bytecode = &chunk.knot.bytecode;
for (chunk.fixups.items[start_index..end_index]) |fixup| {
const label = chunk.labels.items[fixup.label_index];
assert(label.code_offset != dummy_address);
const target_offset: usize = switch (fixup.mode) {
.relative => label.code_offset - fixup.code_offset - 2,
.absolute => label.code_offset,
};
if (target_offset >= std.math.maxInt(u16)) {
std.debug.print("Too much code to jump over!\n", .{});
return error.InvalidJump;
}
assert(bytecode.capacity >= label.code_offset + 2);
bytecode.items[fixup.code_offset] = @intCast((target_offset >> 8) & 0xff);
bytecode.items[fixup.code_offset + 1] = @intCast(target_offset & 0xff);
}
}
fn doLoad(chunk: *Chunk, ref: Ref) InnerError!Ref {
const gpa = chunk.sema.gpa;
switch (ref) {
.bool_true => return chunk.addByteOp(.true),
.bool_false => return chunk.addByteOp(.false),
.none => return ref,
.constant => |id| {
// TODO: This isn't great. New constant indexes are
// created each time.
const ref_const = chunk.knot.constants.items.len;
try chunk.knot.constants.append(gpa, id);
return chunk.addConstOp(.load_const, @intCast(ref_const));
},
.global => |id| {
// TODO: This isn't great. New constant indexes are
// created each time.
const ref_const = chunk.knot.constants.items.len;
try chunk.knot.constants.append(gpa, id);
return chunk.addConstOp(.load_global, @intCast(ref_const));
},
.local => |id| return chunk.addConstOp(.load, @intCast(id)),
.index => return ref,
}
}
};