beginning of haku2: a reimplementation of haku in Zig
the goal is to rewrite haku completely, starting with the VM---because it was the most obvious point of improvement the reason is because Rust is kinda too verbose for low level stuff like this. compare the line numbers between haku1 and haku2's VM and how verbose those lines are, and it's kind of an insane difference it also feels like Zig's compilation model can work better for small wasm binary sizes and of course, I also just wanted an excuse to try out Zig :3
This commit is contained in:
parent
598c0348f6
commit
01d4514a65
19 changed files with 1946 additions and 11 deletions
100
crates/haku2/src/haku2.zig
Normal file
100
crates/haku2/src/haku2.zig
Normal file
|
@ -0,0 +1,100 @@
|
|||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const bytecode = @import("bytecode.zig");
|
||||
const Scratch = @import("scratch.zig");
|
||||
const value = @import("value.zig");
|
||||
const Vm = @import("vm.zig");
|
||||
|
||||
const hostAllocator = @import("allocator.zig").hostAllocator;
|
||||
|
||||
// Scratch
|
||||
|
||||
export fn haku2_scratch_new(max: usize) ?*Scratch {
|
||||
return Scratch.create(hostAllocator, max) catch return null;
|
||||
}
|
||||
|
||||
export fn haku2_scratch_destroy(scratch: *Scratch) void {
|
||||
scratch.destroy(hostAllocator);
|
||||
}
|
||||
|
||||
export fn haku2_scratch_reset(scratch: *Scratch) void {
|
||||
scratch.fixedBuffer = std.heap.FixedBufferAllocator.init(scratch.buffer);
|
||||
}
|
||||
|
||||
// Limits
|
||||
|
||||
export fn haku2_limits_new() ?*Vm.Limits {
|
||||
return hostAllocator.create(Vm.Limits) catch null;
|
||||
}
|
||||
|
||||
export fn haku2_limits_destroy(limits: *Vm.Limits) void {
|
||||
hostAllocator.destroy(limits);
|
||||
}
|
||||
|
||||
export fn haku2_limits_set_stack_capacity(limits: *Vm.Limits, new: usize) void {
|
||||
limits.stack_capacity = new;
|
||||
}
|
||||
|
||||
export fn haku2_limits_set_call_stack_capacity(limits: *Vm.Limits, new: usize) void {
|
||||
limits.call_stack_capacity = new;
|
||||
}
|
||||
|
||||
export fn haku2_limits_set_fuel(limits: *Vm.Limits, new: u32) void {
|
||||
limits.fuel = new;
|
||||
}
|
||||
|
||||
// Defs
|
||||
|
||||
export fn haku2_defs_parse(
|
||||
defs_string: [*]const u8,
|
||||
defs_len: usize,
|
||||
tags_string: [*]const u8,
|
||||
tags_len: usize,
|
||||
) ?*bytecode.Defs {
|
||||
return bytecode.Defs.parse(
|
||||
hostAllocator,
|
||||
defs_string[0..defs_len],
|
||||
tags_string[0..tags_len],
|
||||
) catch null;
|
||||
}
|
||||
|
||||
export fn haku2_defs_destroy(defs: *bytecode.Defs) void {
|
||||
defs.destroy(hostAllocator);
|
||||
}
|
||||
|
||||
// VM
|
||||
|
||||
export fn haku2_vm_new(s: *Scratch, defs: *const bytecode.Defs, limits: *const Vm.Limits) ?*Vm {
|
||||
const vm = hostAllocator.create(Vm) catch return null;
|
||||
errdefer hostAllocator.destroy(vm);
|
||||
|
||||
vm.* = Vm.init(s.allocator(), defs, limits) catch return null;
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
export fn haku2_vm_run_main(
|
||||
vm: *Vm,
|
||||
scratch: *Scratch,
|
||||
code: [*]const u8,
|
||||
code_len: usize,
|
||||
local_count: u8,
|
||||
) bool {
|
||||
const chunk = bytecode.Chunk{
|
||||
.bytecode = code[0..code_len],
|
||||
};
|
||||
const closure = value.Closure{
|
||||
.chunk = &chunk,
|
||||
.start = 0,
|
||||
.param_count = 0,
|
||||
.local_count = local_count,
|
||||
.captures = &[_]value.Value{},
|
||||
};
|
||||
vm.run(scratch.allocator(), &closure, vm.stack_top) catch return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
export fn haku2_vm_destroy(vm: *Vm) void {
|
||||
hostAllocator.destroy(vm);
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue