diff --git a/CMakeLists.txt b/CMakeLists.txt index 47bd0da03046..9f75c51dd74d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -643,9 +643,8 @@ set(ZIG_STAGE2_SOURCES src/link/StringTable.zig src/link/Wasm.zig src/link/Wasm/Archive.zig + src/link/Wasm/Flush.zig src/link/Wasm/Object.zig - src/link/Wasm/Symbol.zig - src/link/Wasm/ZigObject.zig src/link/aarch64.zig src/link/riscv.zig src/link/table_section.zig diff --git a/build.zig b/build.zig index c6f95ca80d36..dcc84509f9a8 100644 --- a/build.zig +++ b/build.zig @@ -447,6 +447,7 @@ pub fn build(b: *std.Build) !void { .skip_single_threaded = skip_single_threaded, .skip_non_native = skip_non_native, .skip_libc = skip_libc, + .use_llvm = use_llvm, .max_rss = 1 * 1024 * 1024 * 1024, })); @@ -462,6 +463,7 @@ pub fn build(b: *std.Build) !void { .skip_single_threaded = true, .skip_non_native = skip_non_native, .skip_libc = skip_libc, + .use_llvm = use_llvm, })); test_modules_step.dependOn(tests.addModuleTests(b, .{ @@ -476,6 +478,7 @@ pub fn build(b: *std.Build) !void { .skip_single_threaded = true, .skip_non_native = skip_non_native, .skip_libc = true, + .use_llvm = use_llvm, .no_builtin = true, })); @@ -491,6 +494,7 @@ pub fn build(b: *std.Build) !void { .skip_single_threaded = true, .skip_non_native = skip_non_native, .skip_libc = true, + .use_llvm = use_llvm, .no_builtin = true, })); @@ -506,6 +510,7 @@ pub fn build(b: *std.Build) !void { .skip_single_threaded = skip_single_threaded, .skip_non_native = skip_non_native, .skip_libc = skip_libc, + .use_llvm = use_llvm, // I observed a value of 4572626944 on the M2 CI. .max_rss = 5029889638, })); diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index 00ea5013dea2..289a4ff4f8b4 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -2424,7 +2424,22 @@ const WasmDumper = struct { } var output = std.ArrayList(u8).init(gpa); - errdefer output.deinit(); + defer output.deinit(); + parseAndDumpInner(step, check, bytes, &fbs, &output) catch |err| switch (err) { + error.EndOfStream => try output.appendSlice("\n"), + else => |e| return e, + }; + return output.toOwnedSlice(); + } + + fn parseAndDumpInner( + step: *Step, + check: Check, + bytes: []const u8, + fbs: *std.io.FixedBufferStream([]const u8), + output: *std.ArrayList(u8), + ) !void { + const reader = fbs.reader(); const writer = output.writer(); switch (check.kind) { @@ -2442,8 +2457,6 @@ const WasmDumper = struct { else => return step.fail("invalid check kind for Wasm file format: {s}", .{@tagName(check.kind)}), } - - return output.toOwnedSlice(); } fn parseAndDumpSection( @@ -2682,7 +2695,7 @@ const WasmDumper = struct { else => unreachable, } const end_opcode = try std.leb.readUleb128(u8, reader); - if (end_opcode != std.wasm.opcode(.end)) { + if (end_opcode != @intFromEnum(std.wasm.Opcode.end)) { return step.fail("expected 'end' opcode in init expression", .{}); } } diff --git a/lib/std/Target.zig b/lib/std/Target.zig index 95aef4a7c7bc..accb00098dd2 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -1219,6 +1219,12 @@ pub const Cpu = struct { } else true; } + pub fn count(set: Set) std.math.IntFittingRange(0, needed_bit_count) { + var sum: usize = 0; + for (set.ints) |x| sum += @popCount(x); + return @intCast(sum); + } + pub fn isEnabled(set: Set, arch_feature_index: Index) bool { const usize_index = arch_feature_index / @bitSizeOf(usize); const bit_index: ShiftInt = @intCast(arch_feature_index % @bitSizeOf(usize)); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index aa21a8a0ea6e..69dbcf39477c 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -1018,12 +1018,15 @@ const WasiThreadImpl = struct { return .{ .thread = &instance.thread }; } - /// Bootstrap procedure, called by the host environment after thread creation. - export fn wasi_thread_start(tid: i32, arg: *Instance) void { - if (builtin.single_threaded) { - // ensure function is not analyzed in single-threaded mode - return; + comptime { + if (!builtin.single_threaded) { + @export(wasi_thread_start, .{ .name = "wasi_thread_start" }); } + } + + /// Called by the host environment after thread creation. + fn wasi_thread_start(tid: i32, arg: *Instance) callconv(.c) void { + comptime assert(!builtin.single_threaded); __set_stack_pointer(arg.thread.memory.ptr + arg.stack_offset); __wasm_init_tls(arg.thread.memory.ptr + arg.tls_offset); @atomicStore(u32, &WasiThreadImpl.tls_thread_id, @intCast(tid), .seq_cst); diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 4e18aecf9def..22c736d0a5c0 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -641,10 +641,13 @@ pub fn ArrayHashMapUnmanaged( return self; } + /// An empty `value_list` may be passed, in which case the values array becomes `undefined`. pub fn reinit(self: *Self, gpa: Allocator, key_list: []const K, value_list: []const V) Oom!void { try self.entries.resize(gpa, key_list.len); @memcpy(self.keys(), key_list); - if (@sizeOf(V) != 0) { + if (value_list.len == 0) { + @memset(self.values(), undefined); + } else { assert(key_list.len == value_list.len); @memcpy(self.values(), value_list); } diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index 197b8c7fba33..5eb527e74204 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -267,8 +267,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { /// Never invalidates element pointers. /// Asserts that the list can hold one additional item. pub fn appendAssumeCapacity(self: *Self, item: T) void { - const new_item_ptr = self.addOneAssumeCapacity(); - new_item_ptr.* = item; + self.addOneAssumeCapacity().* = item; } /// Remove the element at index `i`, shift elements after index @@ -879,8 +878,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Never invalidates element pointers. /// Asserts that the list can hold one additional item. pub fn appendAssumeCapacity(self: *Self, item: T) void { - const new_item_ptr = self.addOneAssumeCapacity(); - new_item_ptr.* = item; + self.addOneAssumeCapacity().* = item; } /// Remove the element at index `i` from the list and return its value. diff --git a/lib/std/io.zig b/lib/std/io.zig index 640f575654e2..7336d85cd1f4 100644 --- a/lib/std/io.zig +++ b/lib/std/io.zig @@ -16,10 +16,6 @@ const Allocator = std.mem.Allocator; fn getStdOutHandle() posix.fd_t { if (is_windows) { - if (builtin.zig_backend == .stage2_aarch64) { - // TODO: this is just a temporary workaround until we advance aarch64 backend further along. - return windows.GetStdHandle(windows.STD_OUTPUT_HANDLE) catch windows.INVALID_HANDLE_VALUE; - } return windows.peb().ProcessParameters.hStdOutput; } @@ -36,10 +32,6 @@ pub fn getStdOut() File { fn getStdErrHandle() posix.fd_t { if (is_windows) { - if (builtin.zig_backend == .stage2_aarch64) { - // TODO: this is just a temporary workaround until we advance aarch64 backend further along. - return windows.GetStdHandle(windows.STD_ERROR_HANDLE) catch windows.INVALID_HANDLE_VALUE; - } return windows.peb().ProcessParameters.hStdError; } @@ -56,10 +48,6 @@ pub fn getStdErr() File { fn getStdInHandle() posix.fd_t { if (is_windows) { - if (builtin.zig_backend == .stage2_aarch64) { - // TODO: this is just a temporary workaround until we advance aarch64 backend further along. - return windows.GetStdHandle(windows.STD_INPUT_HANDLE) catch windows.INVALID_HANDLE_VALUE; - } return windows.peb().ProcessParameters.hStdInput; } diff --git a/lib/std/wasm.zig b/lib/std/wasm.zig index 8996a174f155..403cc2f3b994 100644 --- a/lib/std/wasm.zig +++ b/lib/std/wasm.zig @@ -4,8 +4,6 @@ const std = @import("std.zig"); const testing = std.testing; -// TODO: Add support for multi-byte ops (e.g. table operations) - /// Wasm instruction opcodes /// /// All instructions are defined as per spec: @@ -195,27 +193,6 @@ pub const Opcode = enum(u8) { _, }; -/// Returns the integer value of an `Opcode`. Used by the Zig compiler -/// to write instructions to the wasm binary file -pub fn opcode(op: Opcode) u8 { - return @intFromEnum(op); -} - -test "opcodes" { - // Ensure our opcodes values remain intact as certain values are skipped due to them being reserved - const i32_const = opcode(.i32_const); - const end = opcode(.end); - const drop = opcode(.drop); - const local_get = opcode(.local_get); - const i64_extend32_s = opcode(.i64_extend32_s); - - try testing.expectEqual(@as(u16, 0x41), i32_const); - try testing.expectEqual(@as(u16, 0x0B), end); - try testing.expectEqual(@as(u16, 0x1A), drop); - try testing.expectEqual(@as(u16, 0x20), local_get); - try testing.expectEqual(@as(u16, 0xC4), i64_extend32_s); -} - /// Opcodes that require a prefix `0xFC`. /// Each opcode represents a varuint32, meaning /// they are encoded as leb128 in binary. @@ -241,12 +218,6 @@ pub const MiscOpcode = enum(u32) { _, }; -/// Returns the integer value of an `MiscOpcode`. Used by the Zig compiler -/// to write instructions to the wasm binary file -pub fn miscOpcode(op: MiscOpcode) u32 { - return @intFromEnum(op); -} - /// Simd opcodes that require a prefix `0xFD`. /// Each opcode represents a varuint32, meaning /// they are encoded as leb128 in binary. @@ -512,12 +483,6 @@ pub const SimdOpcode = enum(u32) { f32x4_relaxed_dot_bf16x8_add_f32x4 = 0x114, }; -/// Returns the integer value of an `SimdOpcode`. Used by the Zig compiler -/// to write instructions to the wasm binary file -pub fn simdOpcode(op: SimdOpcode) u32 { - return @intFromEnum(op); -} - /// Atomic opcodes that require a prefix `0xFE`. /// Each opcode represents a varuint32, meaning /// they are encoded as leb128 in binary. @@ -592,12 +557,6 @@ pub const AtomicsOpcode = enum(u32) { i64_atomic_rmw32_cmpxchg_u = 0x4E, }; -/// Returns the integer value of an `AtomicsOpcode`. Used by the Zig compiler -/// to write instructions to the wasm binary file -pub fn atomicsOpcode(op: AtomicsOpcode) u32 { - return @intFromEnum(op); -} - /// Enum representing all Wasm value types as per spec: /// https://webassembly.github.io/spec/core/binary/types.html pub const Valtype = enum(u8) { @@ -608,11 +567,6 @@ pub const Valtype = enum(u8) { v128 = 0x7B, }; -/// Returns the integer value of a `Valtype` -pub fn valtype(value: Valtype) u8 { - return @intFromEnum(value); -} - /// Reference types, where the funcref references to a function regardless of its type /// and ref references an object from the embedder. pub const RefType = enum(u8) { @@ -620,41 +574,17 @@ pub const RefType = enum(u8) { externref = 0x6F, }; -/// Returns the integer value of a `Reftype` -pub fn reftype(value: RefType) u8 { - return @intFromEnum(value); -} - -test "valtypes" { - const _i32 = valtype(.i32); - const _i64 = valtype(.i64); - const _f32 = valtype(.f32); - const _f64 = valtype(.f64); - - try testing.expectEqual(@as(u8, 0x7F), _i32); - try testing.expectEqual(@as(u8, 0x7E), _i64); - try testing.expectEqual(@as(u8, 0x7D), _f32); - try testing.expectEqual(@as(u8, 0x7C), _f64); -} - /// Limits classify the size range of resizeable storage associated with memory types and table types. pub const Limits = struct { - flags: u8, + flags: Flags, min: u32, max: u32, - pub const Flags = enum(u8) { - WASM_LIMITS_FLAG_HAS_MAX = 0x1, - WASM_LIMITS_FLAG_IS_SHARED = 0x2, + pub const Flags = packed struct(u8) { + has_max: bool, + is_shared: bool, + reserved: u6 = 0, }; - - pub fn hasFlag(limits: Limits, flag: Flags) bool { - return limits.flags & @intFromEnum(flag) != 0; - } - - pub fn setFlag(limits: *Limits, flag: Flags) void { - limits.flags |= @intFromEnum(flag); - } }; /// Initialization expressions are used to set the initial value on an object @@ -667,18 +597,6 @@ pub const InitExpression = union(enum) { global_get: u32, }; -/// Represents a function entry, holding the index to its type -pub const Func = struct { - type_index: u32, -}; - -/// Tables are used to hold pointers to opaque objects. -/// This can either by any function, or an object from the host. -pub const Table = struct { - limits: Limits, - reftype: RefType, -}; - /// Describes the layout of the memory where `min` represents /// the minimal amount of pages, and the optional `max` represents /// the max pages. When `null` will allow the host to determine the @@ -687,88 +605,6 @@ pub const Memory = struct { limits: Limits, }; -/// Represents the type of a `Global` or an imported global. -pub const GlobalType = struct { - valtype: Valtype, - mutable: bool, -}; - -pub const Global = struct { - global_type: GlobalType, - init: InitExpression, -}; - -/// Notates an object to be exported from wasm -/// to the host. -pub const Export = struct { - name: []const u8, - kind: ExternalKind, - index: u32, -}; - -/// Element describes the layout of the table that can -/// be found at `table_index` -pub const Element = struct { - table_index: u32, - offset: InitExpression, - func_indexes: []const u32, -}; - -/// Imports are used to import objects from the host -pub const Import = struct { - module_name: []const u8, - name: []const u8, - kind: Kind, - - pub const Kind = union(ExternalKind) { - function: u32, - table: Table, - memory: Limits, - global: GlobalType, - }; -}; - -/// `Type` represents a function signature type containing both -/// a slice of parameters as well as a slice of return values. -pub const Type = struct { - params: []const Valtype, - returns: []const Valtype, - - pub fn format(self: Type, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void { - if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self); - _ = opt; - try writer.writeByte('('); - for (self.params, 0..) |param, i| { - try writer.print("{s}", .{@tagName(param)}); - if (i + 1 != self.params.len) { - try writer.writeAll(", "); - } - } - try writer.writeAll(") -> "); - if (self.returns.len == 0) { - try writer.writeAll("nil"); - } else { - for (self.returns, 0..) |return_ty, i| { - try writer.print("{s}", .{@tagName(return_ty)}); - if (i + 1 != self.returns.len) { - try writer.writeAll(", "); - } - } - } - } - - pub fn eql(self: Type, other: Type) bool { - return std.mem.eql(Valtype, self.params, other.params) and - std.mem.eql(Valtype, self.returns, other.returns); - } - - pub fn deinit(self: *Type, gpa: std.mem.Allocator) void { - gpa.free(self.params); - gpa.free(self.returns); - self.* = undefined; - } -}; - /// Wasm module sections as per spec: /// https://webassembly.github.io/spec/core/binary/modules.html pub const Section = enum(u8) { @@ -788,11 +624,6 @@ pub const Section = enum(u8) { _, }; -/// Returns the integer value of a given `Section` -pub fn section(val: Section) u8 { - return @intFromEnum(val); -} - /// The kind of the type when importing or exporting to/from the host environment. /// https://webassembly.github.io/spec/core/syntax/modules.html pub const ExternalKind = enum(u8) { @@ -802,11 +633,6 @@ pub const ExternalKind = enum(u8) { global, }; -/// Returns the integer value of a given `ExternalKind` -pub fn externalKind(val: ExternalKind) u8 { - return @intFromEnum(val); -} - /// Defines the enum values for each subsection id for the "Names" custom section /// as described by: /// https://webassembly.github.io/spec/core/appendix/custom.html?highlight=name#name-section @@ -829,7 +655,18 @@ pub const function_type: u8 = 0x60; pub const result_type: u8 = 0x40; /// Represents a block which will not return a value -pub const block_empty: u8 = 0x40; +pub const BlockType = enum(u8) { + empty = 0x40, + i32 = 0x7F, + i64 = 0x7E, + f32 = 0x7D, + f64 = 0x7C, + v128 = 0x7B, + + pub fn fromValtype(valtype: Valtype) BlockType { + return @enumFromInt(@intFromEnum(valtype)); + } +}; // binary constants pub const magic = [_]u8{ 0x00, 0x61, 0x73, 0x6D }; // \0asm diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index 4612d0762d4c..c8098a219045 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -11,6 +11,11 @@ string_bytes: []const u8, /// The first thing in this array is an `ErrorMessageList`. extra: []const u32, +/// Index into `string_bytes`. +pub const String = u32; +/// Index into `string_bytes`, or null. +pub const OptionalString = u32; + /// Special encoding when there are no errors. pub const empty: ErrorBundle = .{ .string_bytes = &.{}, @@ -33,14 +38,13 @@ pub const ErrorMessageList = struct { len: u32, start: u32, /// null-terminated string index. 0 means no compile log text. - compile_log_text: u32, + compile_log_text: OptionalString, }; /// Trailing: /// * ReferenceTrace for each reference_trace_len pub const SourceLocation = struct { - /// null terminated string index - src_path: u32, + src_path: String, line: u32, column: u32, /// byte offset of starting token @@ -49,17 +53,15 @@ pub const SourceLocation = struct { span_main: u32, /// byte offset of end of last token span_end: u32, - /// null terminated string index, possibly null. /// Does not include the trailing newline. - source_line: u32 = 0, + source_line: OptionalString = 0, reference_trace_len: u32 = 0, }; /// Trailing: /// * MessageIndex for each notes_len. pub const ErrorMessage = struct { - /// null terminated string index - msg: u32, + msg: String, /// Usually one, but incremented for redundant messages. count: u32 = 1, src_loc: SourceLocationIndex = .none, @@ -71,7 +73,7 @@ pub const ReferenceTrace = struct { /// Except for the sentinel ReferenceTrace element, in which case: /// * 0 means remaining references hidden /// * >0 means N references hidden - decl_name: u32, + decl_name: String, /// Index into extra of a SourceLocation /// If this is 0, this is the sentinel ReferenceTrace element. src_loc: SourceLocationIndex, @@ -138,7 +140,7 @@ fn extraData(eb: ErrorBundle, comptime T: type, index: usize) struct { data: T, } /// Given an index into `string_bytes` returns the null-terminated string found there. -pub fn nullTerminatedString(eb: ErrorBundle, index: usize) [:0]const u8 { +pub fn nullTerminatedString(eb: ErrorBundle, index: String) [:0]const u8 { const string_bytes = eb.string_bytes; var end: usize = index; while (string_bytes[end] != 0) { @@ -384,18 +386,18 @@ pub const Wip = struct { }; } - pub fn addString(wip: *Wip, s: []const u8) Allocator.Error!u32 { + pub fn addString(wip: *Wip, s: []const u8) Allocator.Error!String { const gpa = wip.gpa; - const index: u32 = @intCast(wip.string_bytes.items.len); + const index: String = @intCast(wip.string_bytes.items.len); try wip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1); wip.string_bytes.appendSliceAssumeCapacity(s); wip.string_bytes.appendAssumeCapacity(0); return index; } - pub fn printString(wip: *Wip, comptime fmt: []const u8, args: anytype) Allocator.Error!u32 { + pub fn printString(wip: *Wip, comptime fmt: []const u8, args: anytype) Allocator.Error!String { const gpa = wip.gpa; - const index: u32 = @intCast(wip.string_bytes.items.len); + const index: String = @intCast(wip.string_bytes.items.len); try wip.string_bytes.writer(gpa).print(fmt, args); try wip.string_bytes.append(gpa, 0); return index; diff --git a/src/Compilation.zig b/src/Compilation.zig index 7ec06402569c..77596e34ec74 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -113,6 +113,14 @@ link_diags: link.Diags, link_task_queue: ThreadSafeQueue(link.Task) = .empty, /// Ensure only 1 simultaneous call to `flushTaskQueue`. link_task_queue_safety: std.debug.SafetyLock = .{}, +/// If any tasks are queued up that depend on prelink being finished, they are moved +/// here until prelink finishes. +link_task_queue_postponed: std.ArrayListUnmanaged(link.Task) = .empty, +/// Initialized with how many link input tasks are expected. After this reaches zero +/// the linker will begin the prelink phase. +/// Initialized in the Compilation main thread before the pipeline; modified only in +/// the linker task thread. +remaining_prelink_tasks: u32, work_queues: [ len: { @@ -1515,6 +1523,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .file_system_inputs = options.file_system_inputs, .parent_whole_cache = options.parent_whole_cache, .link_diags = .init(gpa), + .remaining_prelink_tasks = 0, }; // Prevent some footguns by making the "any" fields of config reflect @@ -1587,6 +1596,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .pdb_source_path = options.pdb_source_path, .pdb_out_path = options.pdb_out_path, .entry_addr = null, // CLI does not expose this option (yet?) + .object_host_name = "env", }; switch (options.cache_mode) { @@ -1715,6 +1725,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil }; comp.c_object_table.putAssumeCapacityNoClobber(c_object, {}); } + comp.remaining_prelink_tasks += @intCast(comp.c_object_table.count()); // Add a `Win32Resource` for each `rc_source_files` and one for `manifest_file`. const win32_resource_count = @@ -1722,6 +1733,10 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil if (win32_resource_count > 0) { dev.check(.win32_resource); try comp.win32_resource_table.ensureTotalCapacity(gpa, win32_resource_count); + // Add this after adding logic to updateWin32Resource to pass the + // result into link.loadInput. loadInput integration is not implemented + // for Windows linking logic yet. + //comp.remaining_prelink_tasks += @intCast(win32_resource_count); for (options.rc_source_files) |rc_source_file| { const win32_resource = try gpa.create(Win32Resource); errdefer gpa.destroy(win32_resource); @@ -1732,6 +1747,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil }; comp.win32_resource_table.putAssumeCapacityNoClobber(win32_resource, {}); } + if (options.manifest_file) |manifest_path| { const win32_resource = try gpa.create(Win32Resource); errdefer gpa.destroy(win32_resource); @@ -1779,10 +1795,12 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil inline for (fields) |field| { if (@field(paths, field.name)) |path| { comp.link_task_queue.shared.appendAssumeCapacity(.{ .load_object = path }); + comp.remaining_prelink_tasks += 1; } } // Loads the libraries provided by `target_util.libcFullLinkFlags(target)`. comp.link_task_queue.shared.appendAssumeCapacity(.load_host_libc); + comp.remaining_prelink_tasks += 1; } else if (target.isMusl() and !target.isWasm()) { if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; @@ -1791,14 +1809,17 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .{ .musl_crt_file = .crti_o }, .{ .musl_crt_file = .crtn_o }, }); + comp.remaining_prelink_tasks += 2; } if (musl.needsCrt0(comp.config.output_mode, comp.config.link_mode, comp.config.pie)) |f| { try comp.queueJobs(&.{.{ .musl_crt_file = f }}); + comp.remaining_prelink_tasks += 1; } try comp.queueJobs(&.{.{ .musl_crt_file = switch (comp.config.link_mode) { .static => .libc_a, .dynamic => .libc_so, } }}); + comp.remaining_prelink_tasks += 1; } else if (target.isGnuLibC()) { if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; @@ -1807,14 +1828,18 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .{ .glibc_crt_file = .crti_o }, .{ .glibc_crt_file = .crtn_o }, }); + comp.remaining_prelink_tasks += 2; } if (glibc.needsCrt0(comp.config.output_mode)) |f| { try comp.queueJobs(&.{.{ .glibc_crt_file = f }}); + comp.remaining_prelink_tasks += 1; } try comp.queueJobs(&[_]Job{ .{ .glibc_shared_objects = {} }, .{ .glibc_crt_file = .libc_nonshared_a }, }); + comp.remaining_prelink_tasks += 1; + comp.remaining_prelink_tasks += glibc.sharedObjectsCount(&target); } else if (target.isWasm() and target.os.tag == .wasi) { if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; @@ -1822,11 +1847,13 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil try comp.queueJob(.{ .wasi_libc_crt_file = crt_file, }); + comp.remaining_prelink_tasks += 1; } try comp.queueJobs(&[_]Job{ .{ .wasi_libc_crt_file = wasi_libc.execModelCrtFile(comp.config.wasi_exec_model) }, .{ .wasi_libc_crt_file = .libc_a }, }); + comp.remaining_prelink_tasks += 2; } else if (target.isMinGW()) { if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; @@ -1835,6 +1862,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .{ .mingw_crt_file = .mingw32_lib }, crt_job, }); + comp.remaining_prelink_tasks += 2; // When linking mingw-w64 there are some import libs we always need. try comp.windows_libs.ensureUnusedCapacity(gpa, mingw.always_link_libs.len); @@ -1846,6 +1874,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil } } else if (target.os.tag == .freestanding and capable_of_building_zig_libc) { try comp.queueJob(.{ .zig_libc = {} }); + comp.remaining_prelink_tasks += 1; } else { return error.LibCUnavailable; } @@ -1860,13 +1889,16 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil } if (comp.wantBuildLibUnwindFromSource()) { try comp.queueJob(.{ .libunwind = {} }); + comp.remaining_prelink_tasks += 1; } if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.link_libcpp) { try comp.queueJob(.libcxx); try comp.queueJob(.libcxxabi); + comp.remaining_prelink_tasks += 2; } if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.any_sanitize_thread) { try comp.queueJob(.libtsan); + comp.remaining_prelink_tasks += 1; } if (target.isMinGW() and comp.config.any_non_single_threaded) { @@ -1885,22 +1917,27 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil if (is_exe_or_dyn_lib) { log.debug("queuing a job to build compiler_rt_lib", .{}); comp.job_queued_compiler_rt_lib = true; + comp.remaining_prelink_tasks += 1; } else if (output_mode != .Obj) { log.debug("queuing a job to build compiler_rt_obj", .{}); // In this case we are making a static library, so we ask // for a compiler-rt object to put in it. comp.job_queued_compiler_rt_obj = true; + comp.remaining_prelink_tasks += 1; } } if (is_exe_or_dyn_lib and comp.config.any_fuzz and capable_of_building_compiler_rt) { log.debug("queuing a job to build libfuzzer", .{}); comp.job_queued_fuzzer_lib = true; + comp.remaining_prelink_tasks += 1; } } try comp.link_task_queue.shared.append(gpa, .load_explicitly_provided); + comp.remaining_prelink_tasks += 1; } + log.debug("total prelink tasks: {d}", .{comp.remaining_prelink_tasks}); return comp; } @@ -1976,6 +2013,7 @@ pub fn destroy(comp: *Compilation) void { comp.link_diags.deinit(); comp.link_task_queue.deinit(gpa); + comp.link_task_queue_postponed.deinit(gpa); comp.clearMiscFailures(); @@ -2438,9 +2476,8 @@ fn flush( if (comp.bin_file) |lf| { // This is needed before reading the error flags. lf.flush(arena, tid, prog_node) catch |err| switch (err) { - error.FlushFailure, error.LinkFailure => {}, // error reported through link_diags.flags - error.LLDReportedFailure => {}, // error reported via lockAndParseLldStderr - else => |e| return e, + error.LinkFailure => {}, // Already reported. + error.OutOfMemory => return error.OutOfMemory, }; } @@ -3025,8 +3062,120 @@ pub fn saveState(comp: *Compilation) !void { //// TODO: compilation errors //// TODO: namespaces //// TODO: decls - //// TODO: linker state } + + // linker state + switch (lf.tag) { + .wasm => { + const wasm = lf.cast(.wasm).?; + const is_obj = comp.config.output_mode == .Obj; + try bufs.ensureUnusedCapacity(85); + addBuf(&bufs, wasm.string_bytes.items); + // TODO make it well-defined memory layout + //addBuf(&bufs, mem.sliceAsBytes(wasm.objects.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.func_types.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_function_imports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_function_imports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_functions.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_global_imports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_global_imports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_globals.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_table_imports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_table_imports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_tables.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_memory_imports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_memory_imports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_memories.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.tag))); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.offset))); + // TODO handle the union safety field + //addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.pointee))); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.addend))); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_init_funcs.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_data_segments.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_datas.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_data_imports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_data_imports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_custom_segments.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_custom_segments.values())); + // TODO make it well-defined memory layout + // addBuf(&bufs, mem.sliceAsBytes(wasm.object_comdats.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations_table.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations_table.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_comdat_symbols.items(.kind))); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_comdat_symbols.items(.index))); + addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.tag))); + addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.offset))); + // TODO handle the union safety field + //addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.pointee))); + addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.addend))); + addBuf(&bufs, mem.sliceAsBytes(wasm.uav_fixups.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.nav_fixups.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.func_table_fixups.items)); + if (is_obj) { + addBuf(&bufs, mem.sliceAsBytes(wasm.navs_obj.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.navs_obj.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_obj.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_obj.values())); + } else { + addBuf(&bufs, mem.sliceAsBytes(wasm.navs_exe.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.navs_exe.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_exe.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_exe.values())); + } + addBuf(&bufs, mem.sliceAsBytes(wasm.overaligned_uavs.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.overaligned_uavs.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.zcu_funcs.keys())); + // TODO handle the union safety field + // addBuf(&bufs, mem.sliceAsBytes(wasm.zcu_funcs.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.nav_exports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.nav_exports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.uav_exports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.uav_exports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.imports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.missing_exports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.function_exports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.function_exports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.hidden_function_exports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.hidden_function_exports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.global_exports.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.functions.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.function_imports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.function_imports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.data_imports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.data_imports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.data_segments.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.globals.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.global_imports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.global_imports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.tables.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.table_imports.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.table_imports.values())); + addBuf(&bufs, mem.sliceAsBytes(wasm.zcu_indirect_function_set.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_indirect_function_import_set.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.object_indirect_function_set.keys())); + addBuf(&bufs, mem.sliceAsBytes(wasm.mir_instructions.items(.tag))); + // TODO handle the union safety field + //addBuf(&bufs, mem.sliceAsBytes(wasm.mir_instructions.items(.data))); + addBuf(&bufs, mem.sliceAsBytes(wasm.mir_extra.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.all_zcu_locals.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.tag_name_bytes.items)); + addBuf(&bufs, mem.sliceAsBytes(wasm.tag_name_offs.items)); + + // TODO add as header fields + // entry_resolution: FunctionImport.Resolution + // function_exports_len: u32 + // global_exports_len: u32 + // functions_end_prelink: u32 + // globals_end_prelink: u32 + // error_name_table_ref_count: u32 + // tag_name_table_ref_count: u32 + // any_tls_relocs: bool + // any_passive_inits: bool + }, + else => log.err("TODO implement saving linker state for {s}", .{@tagName(lf.tag)}), + } + var basename_buf: [255]u8 = undefined; const basename = std.fmt.bufPrint(&basename_buf, "{s}.zcs", .{ comp.root_name, @@ -3209,6 +3358,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (!zcu.navFileScope(nav).okToReportErrors()) continue; try addModuleErrorMsg(zcu, &bundle, error_msg.*); } + for (zcu.failed_types.keys(), zcu.failed_types.values()) |ty_index, error_msg| { + if (!zcu.typeFileScope(ty_index).okToReportErrors()) continue; + try addModuleErrorMsg(zcu, &bundle, error_msg.*); + } for (zcu.failed_exports.values()) |value| { try addModuleErrorMsg(zcu, &bundle, value.*); } @@ -3252,7 +3405,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { })); } - try comp.link_diags.addMessagesToBundle(&bundle); + try comp.link_diags.addMessagesToBundle(&bundle, comp.bin_file); if (comp.zcu) |zcu| { if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) { @@ -3524,9 +3677,9 @@ pub fn performAllTheWork( defer if (comp.zcu) |zcu| { zcu.sema_prog_node.end(); - zcu.sema_prog_node = std.Progress.Node.none; + zcu.sema_prog_node = .none; zcu.codegen_prog_node.end(); - zcu.codegen_prog_node = std.Progress.Node.none; + zcu.codegen_prog_node = .none; zcu.generation += 1; }; @@ -3659,7 +3812,7 @@ fn performAllTheWorkInner( try zcu.flushRetryableFailures(); zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); - zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); + zcu.codegen_prog_node = if (comp.bin_file != null) main_progress_node.start("Code Generation", 0) else .none; } if (!comp.separateCodegenThreadOk()) { @@ -3689,6 +3842,8 @@ fn performAllTheWorkInner( }); continue; } + zcu.sema_prog_node.end(); + zcu.sema_prog_node = .none; } break; } @@ -3962,6 +4117,7 @@ fn dispatchCodegenTask(comp: *Compilation, tid: usize, link_task: link.Task) voi if (comp.separateCodegenThreadOk()) { comp.queueLinkTasks(&.{link_task}); } else { + assert(comp.remaining_prelink_tasks == 0); link.doTask(comp, tid, link_task); } } diff --git a/src/InternPool.zig b/src/InternPool.zig index 3b9fb29b867c..30a0dd087c62 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -552,6 +552,15 @@ pub const Nav = struct { }; } + /// This function is intended to be used by code generation, since semantic + /// analysis will ensure that any `Nav` which is potentially `extern` is + /// fully resolved. + /// Asserts that `status == .fully_resolved`. + pub fn getResolvedExtern(nav: Nav, ip: *const InternPool) ?Key.Extern { + assert(nav.status == .fully_resolved); + return nav.getExtern(ip); + } + /// Always returns `null` for `status == .type_resolved`. This function is inteded /// to be used by code generation, since semantic analysis will ensure that any `Nav` /// which is potentially `extern` is fully resolved. @@ -585,6 +594,15 @@ pub const Nav = struct { }; } + /// Asserts that `status != .unresolved`. + pub fn getLinkSection(nav: Nav) OptionalNullTerminatedString { + return switch (nav.status) { + .unresolved => unreachable, + .type_resolved => |r| r.@"linksection", + .fully_resolved => |r| r.@"linksection", + }; + } + /// Asserts that `status != .unresolved`. pub fn isThreadlocal(nav: Nav, ip: *const InternPool) bool { return switch (nav.status) { @@ -598,6 +616,20 @@ pub const Nav = struct { }; } + pub fn isFn(nav: Nav, ip: *const InternPool) bool { + return switch (nav.status) { + .unresolved => unreachable, + .type_resolved => |r| { + const tag = ip.zigTypeTagOrPoison(r.type) catch unreachable; + return tag == .@"fn"; + }, + .fully_resolved => |r| { + const tag = ip.zigTypeTagOrPoison(ip.typeOf(r.val)) catch unreachable; + return tag == .@"fn"; + }, + }; + } + /// If this returns `true`, then a pointer to this `Nav` might actually be encoded as a pointer /// to some other `Nav` due to an extern definition or extern alias (see #21027). /// This query is valid on `Nav`s for whom only the type is resolved. @@ -3360,6 +3392,10 @@ pub const LoadedUnionType = struct { return flags.status == .field_types_wip; } + pub fn requiresComptime(u: LoadedUnionType, ip: *const InternPool) RequiresComptime { + return u.flagsUnordered(ip).requires_comptime; + } + pub fn setRequiresComptimeWip(u: LoadedUnionType, ip: *InternPool) RequiresComptime { const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; extra_mutex.lock(); @@ -4014,7 +4050,7 @@ pub const LoadedStructType = struct { } } - pub fn haveLayout(s: LoadedStructType, ip: *InternPool) bool { + pub fn haveLayout(s: LoadedStructType, ip: *const InternPool) bool { return switch (s.layout) { .@"packed" => s.backingIntTypeUnordered(ip) != .none, .auto, .@"extern" => s.flagsUnordered(ip).layout_resolved, @@ -11797,6 +11833,10 @@ pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E { return @enumFromInt(ip.indexToKey(int).int.storage.u64); } +pub fn toFunc(ip: *const InternPool, i: Index) Key.Func { + return ip.indexToKey(i).func; +} + pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { return switch (ip.indexToKey(ty)) { .struct_type => ip.loadStructType(ty).field_types.len, diff --git a/src/Sema.zig b/src/Sema.zig index be98c4fef269..4a62e495c7f7 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -38298,7 +38298,7 @@ pub fn flushExports(sema: *Sema) !void { // So, pick up and delete any existing exports. This strategy performs // redundant work, but that's okay, because this case is exceedingly rare. if (zcu.single_exports.get(sema.owner)) |export_idx| { - try sema.exports.append(gpa, zcu.all_exports.items[export_idx]); + try sema.exports.append(gpa, export_idx.ptr(zcu).*); } else if (zcu.multi_exports.get(sema.owner)) |info| { try sema.exports.appendSlice(gpa, zcu.all_exports.items[info.index..][0..info.len]); } @@ -38307,12 +38307,12 @@ pub fn flushExports(sema: *Sema) !void { // `sema.exports` is completed; store the data into the `Zcu`. if (sema.exports.items.len == 1) { try zcu.single_exports.ensureUnusedCapacity(gpa, 1); - const export_idx = zcu.free_exports.popOrNull() orelse idx: { + const export_idx: Zcu.Export.Index = zcu.free_exports.popOrNull() orelse idx: { _ = try zcu.all_exports.addOne(gpa); - break :idx zcu.all_exports.items.len - 1; + break :idx @enumFromInt(zcu.all_exports.items.len - 1); }; - zcu.all_exports.items[export_idx] = sema.exports.items[0]; - zcu.single_exports.putAssumeCapacityNoClobber(sema.owner, @intCast(export_idx)); + export_idx.ptr(zcu).* = sema.exports.items[0]; + zcu.single_exports.putAssumeCapacityNoClobber(sema.owner, export_idx); } else { try zcu.multi_exports.ensureUnusedCapacity(gpa, 1); const exports_base = zcu.all_exports.items.len; diff --git a/src/Type.zig b/src/Type.zig index bfc883bede2a..023b763bf922 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -441,7 +441,7 @@ pub fn toValue(self: Type) Value { const RuntimeBitsError = SemaError || error{NeedLazy}; -pub fn hasRuntimeBits(ty: Type, zcu: *Zcu) bool { +pub fn hasRuntimeBits(ty: Type, zcu: *const Zcu) bool { return hasRuntimeBitsInner(ty, false, .eager, zcu, {}) catch unreachable; } @@ -452,7 +452,7 @@ pub fn hasRuntimeBitsSema(ty: Type, pt: Zcu.PerThread) SemaError!bool { }; } -pub fn hasRuntimeBitsIgnoreComptime(ty: Type, zcu: *Zcu) bool { +pub fn hasRuntimeBitsIgnoreComptime(ty: Type, zcu: *const Zcu) bool { return hasRuntimeBitsInner(ty, true, .eager, zcu, {}) catch unreachable; } @@ -471,7 +471,7 @@ pub fn hasRuntimeBitsInner( ty: Type, ignore_comptime_only: bool, comptime strat: ResolveStratLazy, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) RuntimeBitsError!bool { const ip = &zcu.intern_pool; @@ -560,7 +560,7 @@ pub fn hasRuntimeBitsInner( }, .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) { + if (strat != .eager and struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) { // In this case, we guess that hasRuntimeBits() for this type is true, // and then later if our guess was incorrect, we emit a compile error. return true; @@ -596,7 +596,7 @@ pub fn hasRuntimeBitsInner( const union_type = ip.loadUnionType(ty.toIntern()); const union_flags = union_type.flagsUnordered(ip); switch (union_flags.runtime_tag) { - .none => { + .none => if (strat != .eager) { // In this case, we guess that hasRuntimeBits() for this type is true, // and then later if our guess was incorrect, we emit a compile error. if (union_type.assumeRuntimeBitsIfFieldTypesWip(ip)) return true; @@ -774,7 +774,7 @@ pub fn fnHasRuntimeBitsSema(ty: Type, pt: Zcu.PerThread) SemaError!bool { pub fn fnHasRuntimeBitsInner( ty: Type, comptime strat: ResolveStrat, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) SemaError!bool { const fn_info = zcu.typeToFunc(ty).?; @@ -815,7 +815,7 @@ pub fn ptrAlignmentSema(ty: Type, pt: Zcu.PerThread) SemaError!Alignment { pub fn ptrAlignmentInner( ty: Type, comptime strat: ResolveStrat, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) !Alignment { return switch (zcu.intern_pool.indexToKey(ty.toIntern())) { @@ -868,14 +868,25 @@ pub const ResolveStratLazy = enum { /// This should typically be used from semantic analysis. sema, - pub fn Tid(comptime strat: ResolveStratLazy) type { + pub fn Tid(strat: ResolveStratLazy) type { return switch (strat) { .lazy, .sema => Zcu.PerThread.Id, .eager => void, }; } - pub fn pt(comptime strat: ResolveStratLazy, zcu: *Zcu, tid: strat.Tid()) switch (strat) { + pub fn ZcuPtr(strat: ResolveStratLazy) type { + return switch (strat) { + .eager => *const Zcu, + .sema, .lazy => *Zcu, + }; + } + + pub fn pt( + comptime strat: ResolveStratLazy, + zcu: strat.ZcuPtr(), + tid: strat.Tid(), + ) switch (strat) { .lazy, .sema => Zcu.PerThread, .eager => void, } { @@ -896,14 +907,21 @@ pub const ResolveStrat = enum { /// This should typically be used from semantic analysis. sema, - pub fn Tid(comptime strat: ResolveStrat) type { + pub fn Tid(strat: ResolveStrat) type { return switch (strat) { .sema => Zcu.PerThread.Id, .normal => void, }; } - pub fn pt(comptime strat: ResolveStrat, zcu: *Zcu, tid: strat.Tid()) switch (strat) { + pub fn ZcuPtr(strat: ResolveStrat) type { + return switch (strat) { + .normal => *const Zcu, + .sema => *Zcu, + }; + } + + pub fn pt(comptime strat: ResolveStrat, zcu: strat.ZcuPtr(), tid: strat.Tid()) switch (strat) { .sema => Zcu.PerThread, .normal => void, } { @@ -922,7 +940,7 @@ pub const ResolveStrat = enum { }; /// Never returns `none`. Asserts that all necessary type resolution is already done. -pub fn abiAlignment(ty: Type, zcu: *Zcu) Alignment { +pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment { return (ty.abiAlignmentInner(.eager, zcu, {}) catch unreachable).scalar; } @@ -939,7 +957,7 @@ pub fn abiAlignmentSema(ty: Type, pt: Zcu.PerThread) SemaError!Alignment { pub fn abiAlignmentInner( ty: Type, comptime strat: ResolveStratLazy, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) SemaError!AbiAlignmentInner { const pt = strat.pt(zcu, tid); @@ -1156,7 +1174,7 @@ pub fn abiAlignmentInner( fn abiAlignmentInnerErrorUnion( ty: Type, comptime strat: ResolveStratLazy, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), payload_ty: Type, ) SemaError!AbiAlignmentInner { @@ -1198,7 +1216,7 @@ fn abiAlignmentInnerErrorUnion( fn abiAlignmentInnerOptional( ty: Type, comptime strat: ResolveStratLazy, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) SemaError!AbiAlignmentInner { const pt = strat.pt(zcu, tid); @@ -1244,7 +1262,7 @@ const AbiSizeInner = union(enum) { /// Asserts the type has the ABI size already resolved. /// Types that return false for hasRuntimeBits() return 0. -pub fn abiSize(ty: Type, zcu: *Zcu) u64 { +pub fn abiSize(ty: Type, zcu: *const Zcu) u64 { return (abiSizeInner(ty, .eager, zcu, {}) catch unreachable).scalar; } @@ -1269,7 +1287,7 @@ pub fn abiSizeSema(ty: Type, pt: Zcu.PerThread) SemaError!u64 { pub fn abiSizeInner( ty: Type, comptime strat: ResolveStratLazy, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) SemaError!AbiSizeInner { const target = zcu.getTarget(); @@ -1542,7 +1560,7 @@ pub fn abiSizeInner( fn abiSizeInnerOptional( ty: Type, comptime strat: ResolveStratLazy, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) SemaError!AbiSizeInner { const child_ty = ty.optionalChild(zcu); @@ -1701,7 +1719,7 @@ pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 { }; } -pub fn bitSize(ty: Type, zcu: *Zcu) u64 { +pub fn bitSize(ty: Type, zcu: *const Zcu) u64 { return bitSizeInner(ty, .normal, zcu, {}) catch unreachable; } @@ -1712,7 +1730,7 @@ pub fn bitSizeSema(ty: Type, pt: Zcu.PerThread) SemaError!u64 { pub fn bitSizeInner( ty: Type, comptime strat: ResolveStrat, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) SemaError!u64 { const target = zcu.getTarget(); @@ -2148,7 +2166,7 @@ pub fn unionBackingType(ty: Type, pt: Zcu.PerThread) !Type { }; } -pub fn unionGetLayout(ty: Type, zcu: *Zcu) Zcu.UnionLayout { +pub fn unionGetLayout(ty: Type, zcu: *const Zcu) Zcu.UnionLayout { const union_obj = zcu.intern_pool.loadUnionType(ty.toIntern()); return Type.getUnionLayout(union_obj, zcu); } @@ -2746,7 +2764,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value { /// During semantic analysis, instead call `ty.comptimeOnlySema` which /// resolves field types rather than asserting they are already resolved. -pub fn comptimeOnly(ty: Type, zcu: *Zcu) bool { +pub fn comptimeOnly(ty: Type, zcu: *const Zcu) bool { return ty.comptimeOnlyInner(.normal, zcu, {}) catch unreachable; } @@ -2759,7 +2777,7 @@ pub fn comptimeOnlySema(ty: Type, pt: Zcu.PerThread) SemaError!bool { pub fn comptimeOnlyInner( ty: Type, comptime strat: ResolveStrat, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) SemaError!bool { const ip = &zcu.intern_pool; @@ -2834,40 +2852,44 @@ pub fn comptimeOnlyInner( if (struct_type.layout == .@"packed") return false; - // A struct with no fields is not comptime-only. - return switch (struct_type.setRequiresComptimeWip(ip)) { - .no, .wip => false, - .yes => true, - .unknown => { - // Inlined `assert` so that the resolution calls below are not statically reachable. - if (strat != .sema) unreachable; - - if (struct_type.flagsUnordered(ip).field_types_wip) { - struct_type.setRequiresComptime(ip, .unknown); - return false; - } + return switch (strat) { + .normal => switch (struct_type.requiresComptime(ip)) { + .wip => unreachable, + .no => false, + .yes => true, + .unknown => unreachable, + }, + .sema => switch (struct_type.setRequiresComptimeWip(ip)) { + .no, .wip => false, + .yes => true, + .unknown => { + if (struct_type.flagsUnordered(ip).field_types_wip) { + struct_type.setRequiresComptime(ip, .unknown); + return false; + } - errdefer struct_type.setRequiresComptime(ip, .unknown); + errdefer struct_type.setRequiresComptime(ip, .unknown); - const pt = strat.pt(zcu, tid); - try ty.resolveFields(pt); - - for (0..struct_type.field_types.len) |i_usize| { - const i: u32 = @intCast(i_usize); - if (struct_type.fieldIsComptime(ip, i)) continue; - const field_ty = struct_type.field_types.get(ip)[i]; - if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) { - // Note that this does not cause the layout to - // be considered resolved. Comptime-only types - // still maintain a layout of their - // runtime-known fields. - struct_type.setRequiresComptime(ip, .yes); - return true; + const pt = strat.pt(zcu, tid); + try ty.resolveFields(pt); + + for (0..struct_type.field_types.len) |i_usize| { + const i: u32 = @intCast(i_usize); + if (struct_type.fieldIsComptime(ip, i)) continue; + const field_ty = struct_type.field_types.get(ip)[i]; + if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) { + // Note that this does not cause the layout to + // be considered resolved. Comptime-only types + // still maintain a layout of their + // runtime-known fields. + struct_type.setRequiresComptime(ip, .yes); + return true; + } } - } - struct_type.setRequiresComptime(ip, .no); - return false; + struct_type.setRequiresComptime(ip, .no); + return false; + }, }, }; }, @@ -2882,35 +2904,40 @@ pub fn comptimeOnlyInner( .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.setRequiresComptimeWip(ip)) { - .no, .wip => return false, - .yes => return true, - .unknown => { - // Inlined `assert` so that the resolution calls below are not statically reachable. - if (strat != .sema) unreachable; - - if (union_type.flagsUnordered(ip).status == .field_types_wip) { - union_type.setRequiresComptime(ip, .unknown); - return false; - } + return switch (strat) { + .normal => switch (union_type.requiresComptime(ip)) { + .wip => unreachable, + .no => false, + .yes => true, + .unknown => unreachable, + }, + .sema => switch (union_type.setRequiresComptimeWip(ip)) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (union_type.flagsUnordered(ip).status == .field_types_wip) { + union_type.setRequiresComptime(ip, .unknown); + return false; + } - errdefer union_type.setRequiresComptime(ip, .unknown); + errdefer union_type.setRequiresComptime(ip, .unknown); - const pt = strat.pt(zcu, tid); - try ty.resolveFields(pt); + const pt = strat.pt(zcu, tid); + try ty.resolveFields(pt); - for (0..union_type.field_types.len) |field_idx| { - const field_ty = union_type.field_types.get(ip)[field_idx]; - if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) { - union_type.setRequiresComptime(ip, .yes); - return true; + for (0..union_type.field_types.len) |field_idx| { + const field_ty = union_type.field_types.get(ip)[field_idx]; + if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) { + union_type.setRequiresComptime(ip, .yes); + return true; + } } - } - union_type.setRequiresComptime(ip, .no); - return false; + union_type.setRequiresComptime(ip, .no); + return false; + }, }, - } + }; }, .opaque_type => false, @@ -3207,7 +3234,7 @@ pub fn fieldAlignmentInner( ty: Type, index: usize, comptime strat: ResolveStrat, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) SemaError!Alignment { const ip = &zcu.intern_pool; @@ -3281,7 +3308,7 @@ pub fn structFieldAlignmentInner( explicit_alignment: Alignment, layout: std.builtin.Type.ContainerLayout, comptime strat: Type.ResolveStrat, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) SemaError!Alignment { assert(layout != .@"packed"); @@ -3323,7 +3350,7 @@ pub fn unionFieldAlignmentInner( explicit_alignment: Alignment, layout: std.builtin.Type.ContainerLayout, comptime strat: Type.ResolveStrat, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) SemaError!Alignment { assert(layout != .@"packed"); @@ -3392,11 +3419,7 @@ pub const FieldOffset = struct { }; /// Supports structs and unions. -pub fn structFieldOffset( - ty: Type, - index: usize, - zcu: *Zcu, -) u64 { +pub fn structFieldOffset(ty: Type, index: usize, zcu: *const Zcu) u64 { const ip = &zcu.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => { @@ -3944,7 +3967,7 @@ fn resolveUnionInner( }; } -pub fn getUnionLayout(loaded_union: InternPool.LoadedUnionType, zcu: *Zcu) Zcu.UnionLayout { +pub fn getUnionLayout(loaded_union: InternPool.LoadedUnionType, zcu: *const Zcu) Zcu.UnionLayout { const ip = &zcu.intern_pool; assert(loaded_union.haveLayout(ip)); var most_aligned_field: u32 = undefined; diff --git a/src/Value.zig b/src/Value.zig index 3048691a5550..3081a28b9b88 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -241,12 +241,12 @@ pub fn getVariable(val: Value, mod: *Zcu) ?InternPool.Key.Variable { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. -pub fn getUnsignedInt(val: Value, zcu: *Zcu) ?u64 { +pub fn getUnsignedInt(val: Value, zcu: *const Zcu) ?u64 { return getUnsignedIntInner(val, .normal, zcu, {}) catch unreachable; } /// Asserts the value is an integer and it fits in a u64 -pub fn toUnsignedInt(val: Value, zcu: *Zcu) u64 { +pub fn toUnsignedInt(val: Value, zcu: *const Zcu) u64 { return getUnsignedInt(val, zcu).?; } @@ -259,7 +259,7 @@ pub fn getUnsignedIntSema(val: Value, pt: Zcu.PerThread) !?u64 { pub fn getUnsignedIntInner( val: Value, comptime strat: ResolveStrat, - zcu: *Zcu, + zcu: strat.ZcuPtr(), tid: strat.Tid(), ) !?u64 { return switch (val.toIntern()) { @@ -304,7 +304,7 @@ pub fn toUnsignedIntSema(val: Value, pt: Zcu.PerThread) !u64 { } /// Asserts the value is an integer and it fits in a i64 -pub fn toSignedInt(val: Value, zcu: *Zcu) i64 { +pub fn toSignedInt(val: Value, zcu: *const Zcu) i64 { return switch (val.toIntern()) { .bool_false => 0, .bool_true => 1, diff --git a/src/Zcu.zig b/src/Zcu.zig index 338c4c55281c..1defb8c2d727 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -19,8 +19,8 @@ const Ast = std.zig.Ast; const Zcu = @This(); const Compilation = @import("Compilation.zig"); const Cache = std.Build.Cache; -const Value = @import("Value.zig"); -const Type = @import("Type.zig"); +pub const Value = @import("Value.zig"); +pub const Type = @import("Type.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); const Air = @import("Air.zig"); @@ -79,11 +79,11 @@ local_zir_cache: Compilation.Directory, all_exports: std.ArrayListUnmanaged(Export) = .empty, /// This is a list of free indices in `all_exports`. These indices may be reused by exports from /// future semantic analysis. -free_exports: std.ArrayListUnmanaged(u32) = .empty, +free_exports: std.ArrayListUnmanaged(Export.Index) = .empty, /// Maps from an `AnalUnit` which performs a single export, to the index into `all_exports` of /// the export it performs. Note that the key is not the `Decl` being exported, but the `AnalUnit` /// whose analysis triggered the export. -single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty, +single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, Export.Index) = .empty, /// Like `single_exports`, but for `AnalUnit`s which perform multiple exports. /// The exports are `all_exports.items[index..][0..len]`. multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { @@ -127,6 +127,7 @@ transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .emp /// This may be a simple "value" `Nav`, or it may be a function. /// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator. failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .empty, +failed_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, *ErrorMsg) = .empty, /// Keep track of one `@compileLog` callsite per `AnalUnit`. /// The value is the source location of the `@compileLog` call, convertible to a `LazySrcLoc`. compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { @@ -144,8 +145,7 @@ compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .empty, /// The ErrorMsg memory is owned by the `EmbedFile`, using Module's general purpose allocator. failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .empty, -/// Key is index into `all_exports`. -failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .empty, +failed_exports: std.AutoArrayHashMapUnmanaged(Export.Index, *ErrorMsg) = .empty, /// If analysis failed due to a cimport error, the corresponding Clang errors /// are stored here. cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .empty, @@ -524,6 +524,15 @@ pub const Export = struct { section: InternPool.OptionalNullTerminatedString = .none, visibility: std.builtin.SymbolVisibility = .default, }; + + /// Index into `all_exports`. + pub const Index = enum(u32) { + _, + + pub fn ptr(i: Index, zcu: *const Zcu) *Export { + return &zcu.all_exports.items[@intFromEnum(i)]; + } + }; }; pub const Reference = struct { @@ -2439,16 +2448,14 @@ pub fn deinit(zcu: *Zcu) void { zcu.local_zir_cache.handle.close(); zcu.global_zir_cache.handle.close(); - for (zcu.failed_analysis.values()) |value| { - value.destroy(gpa); - } - for (zcu.failed_codegen.values()) |value| { - value.destroy(gpa); - } + for (zcu.failed_analysis.values()) |value| value.destroy(gpa); + for (zcu.failed_codegen.values()) |value| value.destroy(gpa); + for (zcu.failed_types.values()) |value| value.destroy(gpa); zcu.analysis_in_progress.deinit(gpa); zcu.failed_analysis.deinit(gpa); zcu.transitive_failed_analysis.deinit(gpa); zcu.failed_codegen.deinit(gpa); + zcu.failed_types.deinit(gpa); for (zcu.failed_files.values()) |value| { if (value) |msg| msg.destroy(gpa); @@ -3093,7 +3100,7 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { const gpa = zcu.gpa; const exports_base, const exports_len = if (zcu.single_exports.fetchSwapRemove(anal_unit)) |kv| - .{ kv.value, 1 } + .{ @intFromEnum(kv.value), 1 } else if (zcu.multi_exports.fetchSwapRemove(anal_unit)) |info| .{ info.value.index, info.value.len } else @@ -3107,11 +3114,12 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { // This case is needed because in some rare edge cases, `Sema` wants to add and delete exports // within a single update. if (dev.env.supports(.incremental)) { - for (exports, exports_base..) |exp, export_idx| { + for (exports, exports_base..) |exp, export_index_usize| { + const export_idx: Export.Index = @enumFromInt(export_index_usize); if (zcu.comp.bin_file) |lf| { lf.deleteExport(exp.exported, exp.opts.name); } - if (zcu.failed_exports.fetchSwapRemove(@intCast(export_idx))) |failed_kv| { + if (zcu.failed_exports.fetchSwapRemove(export_idx)) |failed_kv| { failed_kv.value.destroy(gpa); } } @@ -3123,7 +3131,7 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { return; }; for (exports_base..exports_base + exports_len) |export_idx| { - zcu.free_exports.appendAssumeCapacity(@intCast(export_idx)); + zcu.free_exports.appendAssumeCapacity(@enumFromInt(export_idx)); } } @@ -3269,7 +3277,7 @@ fn lockAndClearFileCompileError(zcu: *Zcu, file: *File) void { pub fn handleUpdateExports( zcu: *Zcu, - export_indices: []const u32, + export_indices: []const Export.Index, result: link.File.UpdateExportsError!void, ) Allocator.Error!void { const gpa = zcu.gpa; @@ -3277,12 +3285,10 @@ pub fn handleUpdateExports( error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { const export_idx = export_indices[0]; - const new_export = &zcu.all_exports.items[export_idx]; + const new_export = export_idx.ptr(zcu); new_export.status = .failed_retryable; try zcu.failed_exports.ensureUnusedCapacity(gpa, 1); - const msg = try ErrorMsg.create(gpa, new_export.src, "unable to export: {s}", .{ - @errorName(err), - }); + const msg = try ErrorMsg.create(gpa, new_export.src, "unable to export: {s}", .{@errorName(err)}); zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); }, }; @@ -3443,7 +3449,7 @@ pub fn atomicPtrAlignment( /// * `@TypeOf(.{})` /// * A struct which has no fields (`struct {}`). /// * Not a struct. -pub fn typeToStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType { +pub fn typeToStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType { if (ty.ip_index == .none) return null; const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.ip_index)) { @@ -3452,7 +3458,7 @@ pub fn typeToStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType { }; } -pub fn typeToPackedStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType { +pub fn typeToPackedStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType { const s = zcu.typeToStruct(ty) orelse return null; if (s.layout != .@"packed") return null; return s; @@ -3477,7 +3483,7 @@ pub fn iesFuncIndex(zcu: *const Zcu, ies_index: InternPool.Index) InternPool.Ind } pub fn funcInfo(zcu: *const Zcu, func_index: InternPool.Index) InternPool.Key.Func { - return zcu.intern_pool.indexToKey(func_index).func; + return zcu.intern_pool.toFunc(func_index); } pub fn toEnum(zcu: *const Zcu, comptime E: type, val: Value) E { @@ -3791,6 +3797,18 @@ pub fn navSrcLoc(zcu: *const Zcu, nav_index: InternPool.Nav.Index) LazySrcLoc { }; } +pub fn typeSrcLoc(zcu: *const Zcu, ty_index: InternPool.Index) LazySrcLoc { + _ = zcu; + _ = ty_index; + @panic("TODO"); +} + +pub fn typeFileScope(zcu: *Zcu, ty_index: InternPool.Index) *File { + _ = zcu; + _ = ty_index; + @panic("TODO"); +} + pub fn navSrcLine(zcu: *Zcu, nav_index: InternPool.Nav.Index) u32 { const ip = &zcu.intern_pool; const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip).?; @@ -4051,3 +4069,54 @@ pub fn navValIsConst(zcu: *const Zcu, val: InternPool.Index) bool { else => true, }; } + +pub const CodegenFailError = error{ + /// Indicates the error message has been already stored at `Zcu.failed_codegen`. + CodegenFail, + OutOfMemory, +}; + +pub fn codegenFail( + zcu: *Zcu, + nav_index: InternPool.Nav.Index, + comptime format: []const u8, + args: anytype, +) CodegenFailError { + const gpa = zcu.gpa; + try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1); + const msg = try Zcu.ErrorMsg.create(gpa, zcu.navSrcLoc(nav_index), format, args); + zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, msg); + return error.CodegenFail; +} + +pub fn codegenFailMsg(zcu: *Zcu, nav_index: InternPool.Nav.Index, msg: *ErrorMsg) CodegenFailError { + const gpa = zcu.gpa; + { + errdefer msg.deinit(gpa); + try zcu.failed_codegen.putNoClobber(gpa, nav_index, msg); + } + return error.CodegenFail; +} + +pub fn codegenFailType( + zcu: *Zcu, + ty_index: InternPool.Index, + comptime format: []const u8, + args: anytype, +) CodegenFailError { + const gpa = zcu.gpa; + try zcu.failed_types.ensureUnusedCapacity(gpa, 1); + const msg = try Zcu.ErrorMsg.create(gpa, zcu.typeSrcLoc(ty_index), format, args); + zcu.failed_types.putAssumeCapacityNoClobber(ty_index, msg); + return error.CodegenFail; +} + +pub fn codegenFailTypeMsg(zcu: *Zcu, ty_index: InternPool.Index, msg: *ErrorMsg) CodegenFailError { + const gpa = zcu.gpa; + { + errdefer msg.deinit(gpa); + try zcu.failed_types.ensureUnusedCapacity(gpa, 1); + } + zcu.failed_types.putAssumeCapacityNoClobber(ty_index, msg); + return error.CodegenFail; +} diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 5de00a96cd9b..ab5f2b2994d0 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -1722,22 +1722,18 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai // Correcting this failure will involve changing a type this function // depends on, hence triggering re-analysis of this function, so this // interacts correctly with incremental compilation. - // TODO: do we need to mark this failure anywhere? I don't think so, since compilation - // will fail due to the type error anyway. } else if (comp.bin_file) |lf| { lf.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - assert(zcu.failed_codegen.contains(nav_index)); - }, - else => { + error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)), + error.Overflow => { try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create( gpa, zcu.navSrcLoc(nav_index), "unable to codegen: {s}", .{@errorName(err)}, )); - try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index })); + // Not a retryable failure. }, }; } else if (zcu.llvm_object) |llvm_object| { @@ -2819,8 +2815,8 @@ pub fn processExports(pt: Zcu.PerThread) !void { const gpa = zcu.gpa; // First, construct a mapping of every exported value and Nav to the indices of all its different exports. - var nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, std.ArrayListUnmanaged(u32)) = .empty; - var uav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(u32)) = .empty; + var nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, std.ArrayListUnmanaged(Zcu.Export.Index)) = .empty; + var uav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(Zcu.Export.Index)) = .empty; defer { for (nav_exports.values()) |*exports| { exports.deinit(gpa); @@ -2839,7 +2835,7 @@ pub fn processExports(pt: Zcu.PerThread) !void { try nav_exports.ensureTotalCapacity(gpa, zcu.single_exports.count() + zcu.multi_exports.count()); for (zcu.single_exports.values()) |export_idx| { - const exp = zcu.all_exports.items[export_idx]; + const exp = export_idx.ptr(zcu); const value_ptr, const found_existing = switch (exp.exported) { .nav => |nav| gop: { const gop = try nav_exports.getOrPut(gpa, nav); @@ -2867,7 +2863,7 @@ pub fn processExports(pt: Zcu.PerThread) !void { }, }; if (!found_existing) value_ptr.* = .{}; - try value_ptr.append(gpa, @intCast(export_idx)); + try value_ptr.append(gpa, @enumFromInt(export_idx)); } } @@ -2886,20 +2882,20 @@ pub fn processExports(pt: Zcu.PerThread) !void { } } -const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, u32); +const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Zcu.Export.Index); fn processExportsInner( pt: Zcu.PerThread, symbol_exports: *SymbolExports, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) error{OutOfMemory}!void { const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; for (export_indices) |export_idx| { - const new_export = &zcu.all_exports.items[export_idx]; + const new_export = export_idx.ptr(zcu); const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name); if (gop.found_existing) { new_export.status = .failed_retryable; @@ -2908,7 +2904,7 @@ fn processExportsInner( new_export.opts.name.fmt(ip), }); errdefer msg.destroy(gpa); - const other_export = zcu.all_exports.items[gop.value_ptr.*]; + const other_export = gop.value_ptr.ptr(zcu); try zcu.errNote(other_export.src, msg, "other symbol here", .{}); zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); new_export.status = .failed; @@ -3100,6 +3096,7 @@ pub fn populateTestFunctions( pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error{OutOfMemory}!void { const zcu = pt.zcu; const comp = zcu.comp; + const gpa = zcu.gpa; const ip = &zcu.intern_pool; const nav = zcu.intern_pool.getNav(nav_index); @@ -3113,26 +3110,15 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error } else if (comp.bin_file) |lf| { lf.updateNav(pt, nav_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - assert(zcu.failed_codegen.contains(nav_index)); - }, - else => { - const gpa = zcu.gpa; - try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1); - zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, try Zcu.ErrorMsg.create( + error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)), + error.Overflow => { + try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create( gpa, zcu.navSrcLoc(nav_index), "unable to codegen: {s}", .{@errorName(err)}, )); - if (nav.analysis != null) { - try zcu.retryable_failures.append(zcu.gpa, .wrap(.{ .nav_val = nav_index })); - } else { - // TODO: we don't have a way to indicate that this failure is retryable! - // Since these are really rare, we could as a cop-out retry the whole build next update. - // But perhaps we can do better... - @panic("TODO: retryable failure codegenning non-declaration Nav"); - } + // Not a retryable failure. }, }; } else if (zcu.llvm_object) |llvm_object| { @@ -3142,24 +3128,26 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error } } -pub fn linkerUpdateContainerType(pt: Zcu.PerThread, ty: InternPool.Index) !void { +pub fn linkerUpdateContainerType(pt: Zcu.PerThread, ty: InternPool.Index) error{OutOfMemory}!void { const zcu = pt.zcu; + const gpa = zcu.gpa; const comp = zcu.comp; const ip = &zcu.intern_pool; const codegen_prog_node = zcu.codegen_prog_node.start(Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), 0); defer codegen_prog_node.end(); + if (zcu.failed_types.fetchSwapRemove(ty)) |*entry| entry.value.deinit(gpa); + if (!Air.typeFullyResolved(Type.fromInterned(ty), zcu)) { // This type failed to resolve. This is a transitive failure. - // TODO: do we need to mark this failure anywhere? I don't think so, since compilation - // will fail due to the type error anyway. - } else if (comp.bin_file) |lf| { - lf.updateContainerType(pt, ty) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - else => |e| log.err("codegen type failed: {s}", .{@errorName(e)}), - }; + return; } + + if (comp.bin_file) |lf| lf.updateContainerType(pt, ty) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.TypeFailureReported => assert(zcu.failed_types.contains(ty)), + }; } pub fn linkerUpdateLineNumber(pt: Zcu.PerThread, ti: InternPool.TrackedInst.Index) !void { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 8fd27d4bb796..33f691959219 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -24,7 +24,6 @@ const build_options = @import("build_options"); const Alignment = InternPool.Alignment; const CodeGenError = codegen.CodeGenError; -const Result = codegen.Result; const bits = @import("bits.zig"); const abi = @import("abi.zig"); @@ -51,7 +50,6 @@ debug_output: link.File.DebugInfoOutput, target: *const std.Target, func_index: InternPool.Index, owner_nav: InternPool.Nav.Index, -err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: MCValue, fn_type: Type, @@ -325,9 +323,9 @@ pub fn generate( func_index: InternPool.Index, air: Air, liveness: Liveness, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), debug_output: link.File.DebugInfoOutput, -) CodeGenError!Result { +) CodeGenError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); @@ -353,7 +351,6 @@ pub fn generate( .bin_file = lf, .func_index = func_index, .owner_nav = func.owner_nav, - .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` .fn_type = fn_type, @@ -370,10 +367,7 @@ pub fn generate( defer function.dbg_info_relocs.deinit(gpa); var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, else => |e| return e, }; defer call_info.deinit(&function); @@ -384,24 +378,23 @@ pub fn generate( function.max_end_stack = call_info.stack_byte_count; function.gen() catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, + error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}), else => |e| return e, }; for (function.dbg_info_relocs.items) |reloc| { - try reloc.genDbgInfo(function); + reloc.genDbgInfo(function) catch |err| + return function.fail("failed to generate debug info: {s}", .{@errorName(err)}); } - var mir = Mir{ + var mir: Mir = .{ .instructions = function.mir_instructions.toOwnedSlice(), .extra = try function.mir_extra.toOwnedSlice(gpa), }; defer mir.deinit(gpa); - var emit = Emit{ + var emit: Emit = .{ .mir = mir, .bin_file = lf, .debug_output = debug_output, @@ -417,15 +410,9 @@ pub fn generate( defer emit.deinit(); emit.emitMir() catch |err| switch (err) { - error.EmitFail => return Result{ .fail = emit.err_msg.? }, + error.EmitFail => return function.failMsg(emit.err_msg.?), else => |e| return e, }; - - if (function.err_msg) |em| { - return Result{ .fail = em }; - } else { - return Result.ok; - } } fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { @@ -567,7 +554,7 @@ fn gen(self: *Self) !void { .data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = size } }, }); } else { - return self.failSymbol("TODO AArch64: allow larger stacks", .{}); + @panic("TODO AArch64: allow larger stacks"); } _ = try self.addInst(.{ @@ -723,7 +710,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .cmp_gt => try self.airCmp(inst, .gt), .cmp_neq => try self.airCmp(inst, .neq), - .cmp_vector => try self.airCmpVector(inst), + .cmp_vector => try self.airCmpVector(inst), .cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst), .alloc => try self.airAlloc(inst), @@ -744,7 +731,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .fpext => try self.airFpext(inst), .intcast => try self.airIntCast(inst), .trunc => try self.airTrunc(inst), - .int_from_bool => try self.airIntFromBool(inst), + .int_from_bool => try self.airIntFromBool(inst), .is_non_null => try self.airIsNonNull(inst), .is_non_null_ptr => try self.airIsNonNullPtr(inst), .is_null => try self.airIsNull(inst), @@ -756,7 +743,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .load => try self.airLoad(inst), .loop => try self.airLoop(inst), .not => try self.airNot(inst), - .int_from_ptr => try self.airIntFromPtr(inst), + .int_from_ptr => try self.airIntFromPtr(inst), .ret => try self.airRet(inst), .ret_safe => try self.airRet(inst), // TODO .ret_load => try self.airRetLoad(inst), @@ -765,8 +752,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .struct_field_ptr=> try self.airStructFieldPtr(inst), .struct_field_val=> try self.airStructFieldVal(inst), .array_to_slice => try self.airArrayToSlice(inst), - .float_from_int => try self.airFloatFromInt(inst), - .int_from_float => try self.airIntFromFloat(inst), + .float_from_int => try self.airFloatFromInt(inst), + .int_from_float => try self.airIntFromFloat(inst), .cmpxchg_strong => try self.airCmpxchg(inst), .cmpxchg_weak => try self.airCmpxchg(inst), .atomic_rmw => try self.airAtomicRmw(inst), @@ -1107,7 +1094,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { /// Copies a value to a register without tracking the register. The register is not considered /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. -fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { +fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) InnerError!Register { const raw_reg = try self.register_manager.allocReg(null, gp); const reg = self.registerAlias(raw_reg, ty); try self.genSetReg(ty, reg, mcv); @@ -1125,12 +1112,12 @@ fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCVa return MCValue{ .register = reg }; } -fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { +fn airAlloc(self: *Self, inst: Air.Inst.Index) InnerError!void { const stack_offset = try self.allocMemPtr(inst); return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); } -fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airRetPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const result: MCValue = switch (self.ret_mcv) { @@ -1152,19 +1139,19 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ .none, .none, .none }); } -fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { +fn airFptrunc(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airFpext(self: *Self, inst: Air.Inst.Index) !void { +fn airFpext(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { +fn airIntCast(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); @@ -1293,7 +1280,7 @@ fn trunc( } } -fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { +fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -1306,14 +1293,14 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void { +fn airIntFromBool(self: *Self, inst: Air.Inst.Index) InnerError!void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else operand; return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airNot(self: *Self, inst: Air.Inst.Index) !void { +fn airNot(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const pt = self.pt; const zcu = pt.zcu; @@ -1484,7 +1471,7 @@ fn minMax( } } -fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { +fn airMinMax(self: *Self, inst: Air.Inst.Index) InnerError!void { const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs_ty = self.typeOf(bin_op.lhs); @@ -1502,7 +1489,7 @@ fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airSlice(self: *Self, inst: Air.Inst.Index) !void { +fn airSlice(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { @@ -2440,7 +2427,7 @@ fn ptrArithmetic( } } -fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { +fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) InnerError!void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); @@ -2490,7 +2477,7 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { +fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs_ty = self.typeOf(bin_op.lhs); @@ -2505,25 +2492,25 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { +fn airAddSat(self: *Self, inst: Air.Inst.Index) InnerError!void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { +fn airSubSat(self: *Self, inst: Air.Inst.Index) InnerError!void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { +fn airMulSat(self: *Self, inst: Air.Inst.Index) InnerError!void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { +fn airOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void { const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -2536,9 +2523,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(zcu))); + const tuple_size: u32 = @intCast(tuple_ty.abiSize(zcu)); const tuple_align = tuple_ty.abiAlignment(zcu); - const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, zcu))); + const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, zcu)); switch (lhs_ty.zigTypeTag(zcu)) { .vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), @@ -2652,7 +2639,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { +fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); @@ -2876,7 +2863,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { +fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); @@ -3012,13 +2999,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { +fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { +fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const optional_ty = self.typeOf(ty_op.operand); @@ -3055,13 +3042,13 @@ fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: } } -fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { +fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload_ptr_set for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -3137,7 +3124,7 @@ fn errUnionErr( } } -fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { +fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; @@ -3218,7 +3205,7 @@ fn errUnionPayload( } } -fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { +fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; @@ -3230,26 +3217,26 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { } // *(E!T) -> E -fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } // *(E!T) -> *T -fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { +fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .errunion_payload_ptr_set for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { +fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) InnerError!void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else @@ -3257,17 +3244,17 @@ fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ .none, .none, .none }); } -fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { +fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) InnerError!void { _ = inst; return self.fail("TODO implement airSetErrReturnTrace for {}", .{self.target.cpu.arch}); } -fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { +fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) InnerError!void { _ = inst; return self.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{self.target.cpu.arch}); } -fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { +fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -3313,7 +3300,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { } /// T to E!T -fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { +fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -3338,7 +3325,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { } /// E to E!T -fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { +fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const pt = self.pt; @@ -3379,7 +3366,7 @@ fn slicePtr(mcv: MCValue) MCValue { } } -fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { +fn airSlicePtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(ty_op.operand); @@ -3388,7 +3375,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { +fn airSliceLen(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_bits = 64; @@ -3412,7 +3399,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_bits = 64; @@ -3429,7 +3416,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(ty_op.operand); @@ -3444,7 +3431,7 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { +fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -3487,7 +3474,7 @@ fn ptrElemVal( } } -fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { @@ -3506,13 +3493,13 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { +fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { +fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -3526,7 +3513,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { @@ -3542,55 +3529,55 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { +fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; _ = bin_op; return self.fail("TODO implement airSetUnionTag for {}", .{self.target.cpu.arch}); } -fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { +fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airGetUnionTag for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airClz(self: *Self, inst: Air.Inst.Index) !void { +fn airClz(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airClz for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airCtz(self: *Self, inst: Air.Inst.Index) !void { +fn airCtz(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airCtz for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { +fn airPopcount(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airPopcount for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airAbs(self: *Self, inst: Air.Inst.Index) !void { +fn airAbs(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airAbs for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { +fn airByteSwap(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airByteSwap for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { +fn airBitReverse(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airBitReverse for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { +fn airUnaryMath(self: *Self, inst: Air.Inst.Index) InnerError!void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead @@ -3885,7 +3872,7 @@ fn genInlineMemsetCode( // end: } -fn airLoad(self: *Self, inst: Air.Inst.Index) !void { +fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -4086,7 +4073,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type } } -fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { +fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) InnerError!void { if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -4103,14 +4090,14 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index); return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } -fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { +fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result = try self.structFieldPtr(inst, ty_op.operand, index); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -4138,7 +4125,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde }; } -fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { +fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const operand = extra.struct_operand; @@ -4194,7 +4181,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } -fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -4218,7 +4205,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.field_ptr, .none, .none }); } -fn airArg(self: *Self, inst: Air.Inst.Index) !void { +fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void { // skip zero-bit arguments as they don't have a corresponding arg instruction var arg_index = self.arg_index; while (self.args[arg_index] == .none) arg_index += 1; @@ -4238,7 +4225,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ .none, .none, .none }); } -fn airTrap(self: *Self) !void { +fn airTrap(self: *Self) InnerError!void { _ = try self.addInst(.{ .tag = .brk, .data = .{ .imm16 = 0x0001 }, @@ -4246,7 +4233,7 @@ fn airTrap(self: *Self) !void { return self.finishAirBookkeeping(); } -fn airBreakpoint(self: *Self) !void { +fn airBreakpoint(self: *Self) InnerError!void { _ = try self.addInst(.{ .tag = .brk, .data = .{ .imm16 = 0xf000 }, @@ -4254,17 +4241,17 @@ fn airBreakpoint(self: *Self) !void { return self.finishAirBookkeeping(); } -fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void { +fn airRetAddr(self: *Self, inst: Air.Inst.Index) InnerError!void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airRetAddr for aarch64", .{}); return self.finishAir(inst, result, .{ .none, .none, .none }); } -fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void { +fn airFrameAddress(self: *Self, inst: Air.Inst.Index) InnerError!void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFrameAddress for aarch64", .{}); return self.finishAir(inst, result, .{ .none, .none, .none }); } -fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { +fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void { if (modifier == .always_tail) return self.fail("TODO implement tail calls for aarch64", .{}); const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const callee = pl_op.operand; @@ -4422,7 +4409,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return bt.finishAir(result); } -fn airRet(self: *Self, inst: Air.Inst.Index) !void { +fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -4455,7 +4442,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } -fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { +fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -4499,7 +4486,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } -fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { +fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) InnerError!void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs_ty = self.typeOf(bin_op.lhs); @@ -4597,12 +4584,12 @@ fn cmp( } } -fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { +fn airCmpVector(self: *Self, inst: Air.Inst.Index) InnerError!void { _ = inst; return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch}); } -fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { +fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) InnerError!void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); _ = operand; @@ -4610,7 +4597,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { +fn airDbgStmt(self: *Self, inst: Air.Inst.Index) InnerError!void { const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; _ = try self.addInst(.{ @@ -4624,7 +4611,7 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { return self.finishAirBookkeeping(); } -fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void { +fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -4635,7 +4622,7 @@ fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void { try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); } -fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { +fn airDbgVar(self: *Self, inst: Air.Inst.Index) InnerError!void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const operand = pl_op.operand; const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; @@ -4686,7 +4673,7 @@ fn condBr(self: *Self, condition: MCValue) !Mir.Inst.Index { } } -fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { +fn airCondBr(self: *Self, inst: Air.Inst.Index) InnerError!void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const cond = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); @@ -4919,7 +4906,7 @@ fn isNonErr( } } -fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { +fn airIsNull(self: *Self, inst: Air.Inst.Index) InnerError!void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); @@ -4930,7 +4917,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -4947,7 +4934,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { +fn airIsNonNull(self: *Self, inst: Air.Inst.Index) InnerError!void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); @@ -4958,7 +4945,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -4975,7 +4962,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { +fn airIsErr(self: *Self, inst: Air.Inst.Index) InnerError!void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; @@ -4986,7 +4973,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -5003,7 +4990,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { +fn airIsNonErr(self: *Self, inst: Air.Inst.Index) InnerError!void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; @@ -5014,7 +5001,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -5031,7 +5018,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airLoop(self: *Self, inst: Air.Inst.Index) !void { +fn airLoop(self: *Self, inst: Air.Inst.Index) InnerError!void { // A loop is a setup to be able to jump back to the beginning. const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); @@ -5052,7 +5039,7 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void { }); } -fn airBlock(self: *Self, inst: Air.Inst.Index) !void { +fn airBlock(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); @@ -5090,7 +5077,7 @@ fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) ! return self.finishAir(inst, result, .{ .none, .none, .none }); } -fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { +fn airSwitch(self: *Self, inst: Air.Inst.Index) InnerError!void { const switch_br = self.air.unwrapSwitch(inst); const condition_ty = self.typeOf(switch_br.operand); const liveness = try self.liveness.getSwitchBr( @@ -5224,7 +5211,7 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { } } -fn airBr(self: *Self, inst: Air.Inst.Index) !void { +fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!void { const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br; try self.br(branch.block_inst, branch.operand); return self.finishAir(inst, .dead, .{ branch.operand, .none, .none }); @@ -5268,7 +5255,7 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void { })); } -fn airAsm(self: *Self, inst: Air.Inst.Index) !void { +fn airAsm(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; @@ -5601,7 +5588,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = .ldr_ptr_stack, .data = .{ .load_store_stack = .{ .rt = reg, - .offset = @as(u32, @intCast(off)), + .offset = @intCast(off), } }, }); }, @@ -5617,13 +5604,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .immediate => |x| { _ = try self.addInst(.{ .tag = .movz, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x)) } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x) } }, }); if (x & 0x0000_0000_ffff_0000 != 0) { _ = try self.addInst(.{ .tag = .movk, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 16)), .hw = 1 } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x >> 16), .hw = 1 } }, }); } @@ -5631,13 +5618,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (x & 0x0000_ffff_0000_0000 != 0) { _ = try self.addInst(.{ .tag = .movk, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 32)), .hw = 2 } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x >> 32), .hw = 2 } }, }); } if (x & 0xffff_0000_0000_0000 != 0) { _ = try self.addInst(.{ .tag = .movk, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 48)), .hw = 3 } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x >> 48), .hw = 3 } }, }); } } @@ -5709,7 +5696,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = tag, .data = .{ .load_store_stack = .{ .rt = reg, - .offset = @as(u32, @intCast(off)), + .offset = @intCast(off), } }, }); }, @@ -5733,7 +5720,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = tag, .data = .{ .load_store_stack = .{ .rt = reg, - .offset = @as(u32, @intCast(off)), + .offset = @intCast(off), } }, }); }, @@ -5918,13 +5905,13 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I } } -fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result = try self.resolveInst(un_op); return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { +fn airBitCast(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); @@ -5945,7 +5932,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { +fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -5963,7 +5950,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { +fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFloatFromInt for {}", .{ self.target.cpu.arch, @@ -5971,7 +5958,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { +fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airIntFromFloat for {}", .{ self.target.cpu.arch, @@ -5979,7 +5966,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { +fn airCmpxchg(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); _ = extra; @@ -5989,23 +5976,23 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { }); } -fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { +fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) InnerError!void { _ = inst; return self.fail("TODO implement airCmpxchg for {}", .{self.target.cpu.arch}); } -fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { +fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) InnerError!void { _ = inst; return self.fail("TODO implement airAtomicLoad for {}", .{self.target.cpu.arch}); } -fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { +fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) InnerError!void { _ = inst; _ = order; return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch}); } -fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { +fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) InnerError!void { _ = inst; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest @@ -6015,12 +6002,12 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch}); } -fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { +fn airMemcpy(self: *Self, inst: Air.Inst.Index) InnerError!void { _ = inst; return self.fail("TODO implement airMemcpy for {}", .{self.target.cpu.arch}); } -fn airTagName(self: *Self, inst: Air.Inst.Index) !void { +fn airTagName(self: *Self, inst: Air.Inst.Index) InnerError!void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else { @@ -6030,7 +6017,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { +fn airErrorName(self: *Self, inst: Air.Inst.Index) InnerError!void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else { @@ -6040,33 +6027,33 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airSplat(self: *Self, inst: Air.Inst.Index) !void { +fn airSplat(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSplat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airSelect(self: *Self, inst: Air.Inst.Index) !void { +fn airSelect(self: *Self, inst: Air.Inst.Index) InnerError!void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSelect for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs }); } -fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { +fn airShuffle(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ extra.a, extra.b, .none }); } -fn airReduce(self: *Self, inst: Air.Inst.Index) !void { +fn airReduce(self: *Self, inst: Air.Inst.Index) InnerError!void { const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airReduce for aarch64", .{}); return self.finishAir(inst, result, .{ reduce.operand, .none, .none }); } -fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { +fn airAggregateInit(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const vector_ty = self.typeOfIndex(inst); @@ -6090,19 +6077,19 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { return bt.finishAir(result); } -fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { +fn airUnionInit(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; _ = extra; return self.fail("TODO implement airUnionInit for aarch64", .{}); } -fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { +fn airPrefetch(self: *Self, inst: Air.Inst.Index) InnerError!void { const prefetch = self.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; return self.finishAir(inst, MCValue.dead, .{ prefetch.ptr, .none, .none }); } -fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { +fn airMulAdd(self: *Self, inst: Air.Inst.Index) InnerError!void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else { @@ -6111,7 +6098,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand }); } -fn airTry(self: *Self, inst: Air.Inst.Index) !void { +fn airTry(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); @@ -6139,7 +6126,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ pl_op.operand, .none, .none }); } -fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airTryPtr(self: *Self, inst: Air.Inst.Index) InnerError!void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; @@ -6191,10 +6178,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { .load_direct => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } }, .load_symbol, .load_tlv, .lea_symbol, .lea_direct => unreachable, // TODO }, - .fail => |msg| { - self.err_msg = msg; - return error.CodegenFail; - }, + .fail => |msg| return self.failMsg(msg), }; return mcv; } @@ -6355,18 +6339,14 @@ fn wantSafety(self: *Self) bool { }; } -fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { +fn fail(self: *Self, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } { @branchHint(.cold); - assert(self.err_msg == null); - self.err_msg = try ErrorMsg.create(self.gpa, self.src_loc, format, args); - return error.CodegenFail; + return self.pt.zcu.codegenFail(self.owner_nav, format, args); } -fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError { +fn failMsg(self: *Self, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } { @branchHint(.cold); - assert(self.err_msg == null); - self.err_msg = try ErrorMsg.create(self.gpa, self.src_loc, format, args); - return error.CodegenFail; + return self.pt.zcu.codegenFailMsg(self.owner_nav, msg); } fn parseRegName(name: []const u8) ?Register { diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index e053b42f410d..f76732125bbb 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -20,7 +20,7 @@ debug_output: link.File.DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, src_loc: Zcu.LazySrcLoc, -code: *std.ArrayList(u8), +code: *std.ArrayListUnmanaged(u8), prev_di_line: u32, prev_di_column: u32, @@ -424,8 +424,10 @@ fn lowerBranches(emit: *Emit) !void { } fn writeInstruction(emit: *Emit, instruction: Instruction) !void { + const comp = emit.bin_file.comp; + const gpa = comp.gpa; const endian = emit.target.cpu.arch.endian(); - std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian); + std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian); } fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 065f4a047de6..65a202803dfe 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -23,7 +23,6 @@ const log = std.log.scoped(.codegen); const build_options = @import("build_options"); const Alignment = InternPool.Alignment; -const Result = codegen.Result; const CodeGenError = codegen.CodeGenError; const bits = @import("bits.zig"); @@ -333,9 +332,9 @@ pub fn generate( func_index: InternPool.Index, air: Air, liveness: Liveness, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), debug_output: link.File.DebugInfoOutput, -) CodeGenError!Result { +) CodeGenError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); @@ -377,10 +376,7 @@ pub fn generate( defer function.dbg_info_relocs.deinit(gpa); var call_info = function.resolveCallingConventionValues(func_ty) catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, else => |e| return e, }; defer call_info.deinit(&function); @@ -391,15 +387,14 @@ pub fn generate( function.max_end_stack = call_info.stack_byte_count; function.gen() catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, + error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}), else => |e| return e, }; for (function.dbg_info_relocs.items) |reloc| { - try reloc.genDbgInfo(function); + reloc.genDbgInfo(function) catch |err| + return function.fail("failed to generate debug info: {s}", .{@errorName(err)}); } var mir = Mir{ @@ -424,15 +419,9 @@ pub fn generate( defer emit.deinit(); emit.emitMir() catch |err| switch (err) { - error.EmitFail => return Result{ .fail = emit.err_msg.? }, + error.EmitFail => return function.failMsg(emit.err_msg.?), else => |e| return e, }; - - if (function.err_msg) |em| { - return Result{ .fail = em }; - } else { - return Result.ok; - } } fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { @@ -6310,20 +6299,19 @@ fn wantSafety(self: *Self) bool { }; } -fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { +fn fail(self: *Self, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } { @branchHint(.cold); - assert(self.err_msg == null); - const gpa = self.gpa; - self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args); - return error.CodegenFail; + const zcu = self.pt.zcu; + const func = zcu.funcInfo(self.func_index); + const msg = try ErrorMsg.create(zcu.gpa, self.src_loc, format, args); + return zcu.codegenFailMsg(func.owner_nav, msg); } -fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError { +fn failMsg(self: *Self, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } { @branchHint(.cold); - assert(self.err_msg == null); - const gpa = self.gpa; - self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args); - return error.CodegenFail; + const zcu = self.pt.zcu; + const func = zcu.funcInfo(self.func_index); + return zcu.codegenFailMsg(func.owner_nav, msg); } fn parseRegName(name: []const u8) ?Register { diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index 03940dfc3cb9..4ec6d3867acf 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -24,7 +24,7 @@ debug_output: link.File.DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, src_loc: Zcu.LazySrcLoc, -code: *std.ArrayList(u8), +code: *std.ArrayListUnmanaged(u8), prev_di_line: u32, prev_di_column: u32, @@ -342,8 +342,10 @@ fn lowerBranches(emit: *Emit) !void { } fn writeInstruction(emit: *Emit, instruction: Instruction) !void { + const comp = emit.bin_file.comp; + const gpa = comp.gpa; const endian = emit.target.cpu.arch.endian(); - std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian); + std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian); } fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index a836d02d711d..820188e188df 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -32,7 +32,6 @@ const wip_mir_log = std.log.scoped(.wip_mir); const Alignment = InternPool.Alignment; const CodeGenError = codegen.CodeGenError; -const Result = codegen.Result; const bits = @import("bits.zig"); const abi = @import("abi.zig"); @@ -62,7 +61,6 @@ gpa: Allocator, mod: *Package.Module, target: *const std.Target, debug_output: link.File.DebugInfoOutput, -err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: InstTracking, fn_type: Type, @@ -759,9 +757,9 @@ pub fn generate( func_index: InternPool.Index, air: Air, liveness: Liveness, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), debug_output: link.File.DebugInfoOutput, -) CodeGenError!Result { +) CodeGenError!void { const zcu = pt.zcu; const comp = zcu.comp; const gpa = zcu.gpa; @@ -788,7 +786,6 @@ pub fn generate( .target = &mod.resolved_target.result, .debug_output = debug_output, .owner = .{ .nav_index = func.owner_nav }, - .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` .fn_type = fn_type, @@ -829,10 +826,7 @@ pub fn generate( const fn_info = zcu.typeToFunc(fn_type).?; var call_info = function.resolveCallingConventionValues(fn_info, &.{}) catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, else => |e| return e, }; @@ -861,10 +855,8 @@ pub fn generate( })); function.gen() catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, + error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}), else => |e| return e, }; @@ -895,28 +887,10 @@ pub fn generate( defer emit.deinit(); emit.emitMir() catch |err| switch (err) { - error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? }, - error.InvalidInstruction => |e| { - const msg = switch (e) { - error.InvalidInstruction => "CodeGen failed to find a viable instruction.", - }; - return Result{ - .fail = try ErrorMsg.create( - gpa, - src_loc, - "{s} This is a bug in the Zig compiler.", - .{msg}, - ), - }; - }, + error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?), + error.InvalidInstruction => |e| return function.fail("emit MIR failed: {s} (Zig compiler bug)", .{@errorName(e)}), else => |e| return e, }; - - if (function.err_msg) |em| { - return Result{ .fail = em }; - } else { - return Result.ok; - } } pub fn generateLazy( @@ -924,9 +898,9 @@ pub fn generateLazy( pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, lazy_sym: link.File.LazySymbol, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), debug_output: link.File.DebugInfoOutput, -) CodeGenError!Result { +) CodeGenError!void { const comp = bin_file.comp; const gpa = comp.gpa; const mod = comp.root_mod; @@ -941,7 +915,6 @@ pub fn generateLazy( .target = &mod.resolved_target.result, .debug_output = debug_output, .owner = .{ .lazy_sym = lazy_sym }, - .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` .fn_type = undefined, @@ -957,10 +930,8 @@ pub fn generateLazy( defer function.mir_instructions.deinit(gpa); function.genLazy(lazy_sym) catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, + error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}), else => |e| return e, }; @@ -991,28 +962,10 @@ pub fn generateLazy( defer emit.deinit(); emit.emitMir() catch |err| switch (err) { - error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? }, - error.InvalidInstruction => |e| { - const msg = switch (e) { - error.InvalidInstruction => "CodeGen failed to find a viable instruction.", - }; - return Result{ - .fail = try ErrorMsg.create( - gpa, - src_loc, - "{s} This is a bug in the Zig compiler.", - .{msg}, - ), - }; - }, + error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?), + error.InvalidInstruction => |e| return function.fail("emit MIR failed: {s} (Zig compiler bug)", .{@errorName(e)}), else => |e| return e, }; - - if (function.err_msg) |em| { - return Result{ .fail = em }; - } else { - return Result.ok; - } } const FormatWipMirData = struct { @@ -4758,19 +4711,19 @@ fn airFieldParentPtr(func: *Func, inst: Air.Inst.Index) !void { return func.fail("TODO implement codegen airFieldParentPtr", .{}); } -fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void { +fn genArgDbgInfo(func: *const Func, inst: Air.Inst.Index, mcv: MCValue) InnerError!void { const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg; const ty = arg.ty.toType(); if (arg.name == .none) return; switch (func.debug_output) { .dwarf => |dw| switch (mcv) { - .register => |reg| try dw.genLocalDebugInfo( + .register => |reg| dw.genLocalDebugInfo( .local_arg, arg.name.toSlice(func.air), ty, .{ .reg = reg.dwarfNum() }, - ), + ) catch |err| return func.fail("failed to generate debug info: {s}", .{@errorName(err)}), .load_frame => {}, else => {}, }, @@ -4779,7 +4732,7 @@ fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void { } } -fn airArg(func: *Func, inst: Air.Inst.Index) !void { +fn airArg(func: *Func, inst: Air.Inst.Index) InnerError!void { var arg_index = func.arg_index; // we skip over args that have no bits @@ -5255,7 +5208,7 @@ fn airDbgInlineBlock(func: *Func, inst: Air.Inst.Index) !void { try func.lowerBlock(inst, @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len])); } -fn airDbgVar(func: *Func, inst: Air.Inst.Index) !void { +fn airDbgVar(func: *Func, inst: Air.Inst.Index) InnerError!void { const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const operand = pl_op.operand; const ty = func.typeOf(operand); @@ -5263,7 +5216,8 @@ fn airDbgVar(func: *Func, inst: Air.Inst.Index) !void { const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload); const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; - try func.genVarDbgInfo(tag, ty, mcv, name.toSlice(func.air)); + func.genVarDbgInfo(tag, ty, mcv, name.toSlice(func.air)) catch |err| + return func.fail("failed to generate variable debug info: {s}", .{@errorName(err)}); return func.finishAir(inst, .unreach, .{ operand, .none, .none }); } @@ -8236,10 +8190,7 @@ fn genTypedValue(func: *Func, val: Value) InnerError!MCValue { return func.fail("TODO: genTypedValue {s}", .{@tagName(mcv)}); }, }, - .fail => |msg| { - func.err_msg = msg; - return error.CodegenFail; - }, + .fail => |msg| return func.failMsg(msg), }; return mcv; } @@ -8427,17 +8378,23 @@ fn wantSafety(func: *Func) bool { }; } -fn fail(func: *Func, comptime format: []const u8, args: anytype) InnerError { +fn fail(func: *const Func, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } { @branchHint(.cold); - assert(func.err_msg == null); - func.err_msg = try ErrorMsg.create(func.gpa, func.src_loc, format, args); + const zcu = func.pt.zcu; + switch (func.owner) { + .nav_index => |i| return zcu.codegenFail(i, format, args), + .lazy_sym => |s| return zcu.codegenFailType(s.ty, format, args), + } return error.CodegenFail; } -fn failSymbol(func: *Func, comptime format: []const u8, args: anytype) InnerError { +fn failMsg(func: *const Func, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } { @branchHint(.cold); - assert(func.err_msg == null); - func.err_msg = try ErrorMsg.create(func.gpa, func.src_loc, format, args); + const zcu = func.pt.zcu; + switch (func.owner) { + .nav_index => |i| return zcu.codegenFailMsg(i, msg), + .lazy_sym => |s| return zcu.codegenFailTypeMsg(s.ty, msg), + } return error.CodegenFail; } diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 2c4c04d5d339..095cfc278b40 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -3,7 +3,7 @@ bin_file: *link.File, lower: Lower, debug_output: link.File.DebugInfoOutput, -code: *std.ArrayList(u8), +code: *std.ArrayListUnmanaged(u8), prev_di_line: u32, prev_di_column: u32, @@ -18,6 +18,7 @@ pub const Error = Lower.Error || error{ }; pub fn emitMir(emit: *Emit) Error!void { + const gpa = emit.bin_file.comp.gpa; log.debug("mir instruction len: {}", .{emit.lower.mir.instructions.len}); for (0..emit.lower.mir.instructions.len) |mir_i| { const mir_index: Mir.Inst.Index = @intCast(mir_i); @@ -30,7 +31,7 @@ pub fn emitMir(emit: *Emit) Error!void { var lowered_relocs = lowered.relocs; for (lowered.insts, 0..) |lowered_inst, lowered_index| { const start_offset: u32 = @intCast(emit.code.items.len); - try lowered_inst.encode(emit.code.writer()); + try lowered_inst.encode(emit.code.writer(gpa)); while (lowered_relocs.len > 0 and lowered_relocs[0].lowered_inst_index == lowered_index) : ({ @@ -56,13 +57,13 @@ pub fn emitMir(emit: *Emit) Error!void { const hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20); const lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I); - try atom_ptr.addReloc(elf_file.base.comp.gpa, .{ + try atom_ptr.addReloc(gpa, .{ .r_offset = start_offset, .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | hi_r_type, .r_addend = 0, }, zo); - try atom_ptr.addReloc(elf_file.base.comp.gpa, .{ + try atom_ptr.addReloc(gpa, .{ .r_offset = start_offset + 4, .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | lo_r_type, .r_addend = 0, @@ -76,19 +77,19 @@ pub fn emitMir(emit: *Emit) Error!void { const R_RISCV = std.elf.R_RISCV; - try atom_ptr.addReloc(elf_file.base.comp.gpa, .{ + try atom_ptr.addReloc(gpa, .{ .r_offset = start_offset, .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_HI20), .r_addend = 0, }, zo); - try atom_ptr.addReloc(elf_file.base.comp.gpa, .{ + try atom_ptr.addReloc(gpa, .{ .r_offset = start_offset + 4, .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_ADD), .r_addend = 0, }, zo); - try atom_ptr.addReloc(elf_file.base.comp.gpa, .{ + try atom_ptr.addReloc(gpa, .{ .r_offset = start_offset + 8, .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_LO12_I), .r_addend = 0, @@ -101,7 +102,7 @@ pub fn emitMir(emit: *Emit) Error!void { const r_type: u32 = @intFromEnum(std.elf.R_RISCV.CALL_PLT); - try atom_ptr.addReloc(elf_file.base.comp.gpa, .{ + try atom_ptr.addReloc(gpa, .{ .r_offset = start_offset, .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | r_type, .r_addend = 0, diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 7bbed29d8fab..32bca3bc9004 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -21,7 +21,6 @@ const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); const Type = @import("../../Type.zig"); const CodeGenError = codegen.CodeGenError; -const Result = @import("../../codegen.zig").Result; const Endian = std.builtin.Endian; const Alignment = InternPool.Alignment; @@ -55,7 +54,7 @@ liveness: Liveness, bin_file: *link.File, target: *const std.Target, func_index: InternPool.Index, -code: *std.ArrayList(u8), +code: *std.ArrayListUnmanaged(u8), debug_output: link.File.DebugInfoOutput, err_msg: ?*ErrorMsg, args: []MCValue, @@ -266,9 +265,9 @@ pub fn generate( func_index: InternPool.Index, air: Air, liveness: Liveness, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), debug_output: link.File.DebugInfoOutput, -) CodeGenError!Result { +) CodeGenError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); @@ -284,7 +283,7 @@ pub fn generate( } try branch_stack.append(.{}); - var function = Self{ + var function: Self = .{ .gpa = gpa, .pt = pt, .air = air, @@ -310,10 +309,7 @@ pub fn generate( defer function.exitlude_jump_relocs.deinit(gpa); var call_info = function.resolveCallingConventionValues(func_ty, .callee) catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, else => |e| return e, }; defer call_info.deinit(&function); @@ -324,10 +320,8 @@ pub fn generate( function.max_end_stack = call_info.stack_byte_count; function.gen() catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, + error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}), else => |e| return e, }; @@ -337,7 +331,7 @@ pub fn generate( }; defer mir.deinit(gpa); - var emit = Emit{ + var emit: Emit = .{ .mir = mir, .bin_file = lf, .debug_output = debug_output, @@ -351,15 +345,9 @@ pub fn generate( defer emit.deinit(); emit.emitMir() catch |err| switch (err) { - error.EmitFail => return Result{ .fail = emit.err_msg.? }, + error.EmitFail => return function.failMsg(emit.err_msg.?), else => |e| return e, }; - - if (function.err_msg) |em| { - return Result{ .fail = em }; - } else { - return Result.ok; - } } fn gen(self: *Self) !void { @@ -1014,7 +1002,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { return bt.finishAir(result); } -fn airArg(self: *Self, inst: Air.Inst.Index) !void { +fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void { const pt = self.pt; const zcu = pt.zcu; const arg_index = self.arg_index; @@ -1036,7 +1024,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { } }; - try self.genArgDbgInfo(inst, mcv); + self.genArgDbgInfo(inst, mcv) catch |err| + return self.fail("failed to generate debug info for parameter: {s}", .{@errorName(err)}); if (self.liveness.isUnused(inst)) return self.finishAirBookkeeping(); @@ -3511,12 +3500,19 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) } } -fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { +fn fail(self: *Self, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } { @branchHint(.cold); - assert(self.err_msg == null); - const gpa = self.gpa; - self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args); - return error.CodegenFail; + const zcu = self.pt.zcu; + const func = zcu.funcInfo(self.func_index); + const msg = try ErrorMsg.create(zcu.gpa, self.src_loc, format, args); + return zcu.codegenFailMsg(func.owner_nav, msg); +} + +fn failMsg(self: *Self, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } { + @branchHint(.cold); + const zcu = self.pt.zcu; + const func = zcu.funcInfo(self.func_index); + return zcu.codegenFailMsg(func.owner_nav, msg); } /// Called when there are no operands, and the instruction is always unreferenced. diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index ca50aa50c637..74537f023112 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -22,7 +22,7 @@ debug_output: link.File.DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, src_loc: Zcu.LazySrcLoc, -code: *std.ArrayList(u8), +code: *std.ArrayListUnmanaged(u8), prev_di_line: u32, prev_di_column: u32, @@ -678,10 +678,13 @@ fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType { } fn writeInstruction(emit: *Emit, instruction: Instruction) !void { + const comp = emit.bin_file.comp; + const gpa = comp.gpa; + // SPARCv9 instructions are always arranged in BE regardless of the // endianness mode the CPU is running in (Section 3.1 of the ISA specification). // This is to ease porting in case someone wants to do a LE SPARCv9 backend. - const endian = Endian.big; + const endian: Endian = .big; - std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian); + std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 49961042bc82..eeaf9988cbd1 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1,14 +1,13 @@ const std = @import("std"); const builtin = @import("builtin"); const Allocator = std.mem.Allocator; -const ArrayList = std.ArrayList; const assert = std.debug.assert; const testing = std.testing; const leb = std.leb; const mem = std.mem; -const wasm = std.wasm; const log = std.log.scoped(.codegen); +const CodeGen = @This(); const codegen = @import("../../codegen.zig"); const Zcu = @import("../../Zcu.zig"); const InternPool = @import("../../InternPool.zig"); @@ -19,13 +18,113 @@ const Compilation = @import("../../Compilation.zig"); const link = @import("../../link.zig"); const Air = @import("../../Air.zig"); const Liveness = @import("../../Liveness.zig"); -const target_util = @import("../../target.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const abi = @import("abi.zig"); const Alignment = InternPool.Alignment; const errUnionPayloadOffset = codegen.errUnionPayloadOffset; const errUnionErrorOffset = codegen.errUnionErrorOffset; +const Wasm = link.File.Wasm; + +const target_util = @import("../../target.zig"); +const libcFloatPrefix = target_util.libcFloatPrefix; +const libcFloatSuffix = target_util.libcFloatSuffix; +const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev; +const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev; + +/// Reference to the function declaration the code +/// section belongs to +owner_nav: InternPool.Nav.Index, +/// Current block depth. Used to calculate the relative difference between a break +/// and block +block_depth: u32 = 0, +air: Air, +liveness: Liveness, +gpa: mem.Allocator, +func_index: InternPool.Index, +/// Contains a list of current branches. +/// When we return from a branch, the branch will be popped from this list, +/// which means branches can only contain references from within its own branch, +/// or a branch higher (lower index) in the tree. +branches: std.ArrayListUnmanaged(Branch) = .empty, +/// Table to save `WValue`'s generated by an `Air.Inst` +// values: ValueTable, +/// Mapping from Air.Inst.Index to block ids +blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, struct { + label: u32, + value: WValue, +}) = .{}, +/// Maps `loop` instructions to their label. `br` to here repeats the loop. +loops: std.AutoHashMapUnmanaged(Air.Inst.Index, u32) = .empty, +/// The index the next local generated will have +/// NOTE: arguments share the index with locals therefore the first variable +/// will have the index that comes after the last argument's index +local_index: u32, +/// The index of the current argument. +/// Used to track which argument is being referenced in `airArg`. +arg_index: u32 = 0, +/// List of simd128 immediates. Each value is stored as an array of bytes. +/// This list will only be populated for 128bit-simd values when the target features +/// are enabled also. +simd_immediates: std.ArrayListUnmanaged([16]u8) = .empty, +/// The Target we're emitting (used to call intInfo) +target: *const std.Target, +ptr_size: enum { wasm32, wasm64 }, +wasm: *link.File.Wasm, +pt: Zcu.PerThread, +/// List of MIR Instructions +mir_instructions: *std.MultiArrayList(Mir.Inst), +/// Contains extra data for MIR +mir_extra: *std.ArrayListUnmanaged(u32), +start_mir_extra_off: u32, +start_locals_off: u32, +/// List of all locals' types generated throughout this declaration +/// used to emit locals count at start of 'code' section. +locals: *std.ArrayListUnmanaged(std.wasm.Valtype), +/// When a function is executing, we store the the current stack pointer's value within this local. +/// This value is then used to restore the stack pointer to the original value at the return of the function. +initial_stack_value: WValue = .none, +/// The current stack pointer subtracted with the stack size. From this value, we will calculate +/// all offsets of the stack values. +bottom_stack_value: WValue = .none, +/// Arguments of this function declaration +/// This will be set after `resolveCallingConventionValues` +args: []WValue, +/// This will only be `.none` if the function returns void, or returns an immediate. +/// When it returns a pointer to the stack, the `.local` tag will be active and must be populated +/// before this function returns its execution to the caller. +return_value: WValue, +/// The size of the stack this function occupies. In the function prologue +/// we will move the stack pointer by this number, forward aligned with the `stack_alignment`. +stack_size: u32 = 0, +/// The stack alignment, which is 16 bytes by default. This is specified by the +/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md +/// and also what the llvm backend will emit. +/// However, local variables or the usage of `incoming_stack_alignment` in a `CallingConvention` can overwrite this default. +stack_alignment: Alignment = .@"16", + +// For each individual Wasm valtype we store a seperate free list which +// allows us to re-use locals that are no longer used. e.g. a temporary local. +/// A list of indexes which represents a local of valtype `i32`. +/// It is illegal to store a non-i32 valtype in this list. +free_locals_i32: std.ArrayListUnmanaged(u32) = .empty, +/// A list of indexes which represents a local of valtype `i64`. +/// It is illegal to store a non-i64 valtype in this list. +free_locals_i64: std.ArrayListUnmanaged(u32) = .empty, +/// A list of indexes which represents a local of valtype `f32`. +/// It is illegal to store a non-f32 valtype in this list. +free_locals_f32: std.ArrayListUnmanaged(u32) = .empty, +/// A list of indexes which represents a local of valtype `f64`. +/// It is illegal to store a non-f64 valtype in this list. +free_locals_f64: std.ArrayListUnmanaged(u32) = .empty, +/// A list of indexes which represents a local of valtype `v127`. +/// It is illegal to store a non-v128 valtype in this list. +free_locals_v128: std.ArrayListUnmanaged(u32) = .empty, + +/// When in debug mode, this tracks if no `finishAir` was missed. +/// Forgetting to call `finishAir` will cause the result to not be +/// stored in our `values` map and therefore cause bugs. +air_bookkeeping: @TypeOf(bookkeeping_init) = bookkeeping_init, /// Wasm Value, created when generating an instruction const WValue = union(enum) { @@ -55,22 +154,15 @@ const WValue = union(enum) { float32: f32, /// A constant 64bit float value float64: f64, - /// A value that represents a pointer to the data section - /// Note: The value contains the symbol index, rather than the actual address - /// as we use this to perform the relocation. - memory: u32, - /// A value that represents a parent pointer and an offset - /// from that pointer. i.e. when slicing with constant values. - memory_offset: struct { - /// The symbol of the parent pointer - pointer: u32, - /// Offset will be set as addend when relocating - offset: u32, + nav_ref: struct { + nav_index: InternPool.Nav.Index, + offset: i32 = 0, + }, + uav_ref: struct { + ip_index: InternPool.Index, + offset: i32 = 0, + orig_ptr_ty: InternPool.Index = .none, }, - /// Represents a function pointer - /// In wasm function pointers are indexes into a function table, - /// rather than an address in the data section. - function_index: u32, /// Offset from the bottom of the virtual stack, with the offset /// pointing to where the value lives. stack_offset: struct { @@ -101,7 +193,7 @@ const WValue = union(enum) { switch (value) { .stack => { const new_local = try gen.allocLocal(ty); - try gen.addLabel(.local_set, new_local.local.value); + try gen.addLocal(.local_set, new_local.local.value); return new_local; }, .local, .stack_offset => return value, @@ -119,7 +211,7 @@ const WValue = union(enum) { if (local_value < reserved + 2) return; // reserved locals may never be re-used. Also accounts for 2 stack locals. const index = local_value - reserved; - const valtype = @as(wasm.Valtype, @enumFromInt(gen.locals.items[index])); + const valtype = gen.locals.items[gen.start_locals_off + index]; switch (valtype) { .i32 => gen.free_locals_i32.append(gen.gpa, local_value) catch return, // It's ok to fail any of those, a new local can be allocated instead .i64 => gen.free_locals_i64.append(gen.gpa, local_value) catch return, @@ -132,8 +224,6 @@ const WValue = union(enum) { } }; -/// Wasm ops, but without input/output/signedness information -/// Used for `buildOpcode` const Op = enum { @"unreachable", nop, @@ -147,12 +237,8 @@ const Op = enum { br_table, @"return", call, - call_indirect, drop, select, - local_get, - local_set, - local_tee, global_get, global_set, load, @@ -200,70 +286,38 @@ const Op = enum { extend, }; -/// Contains the settings needed to create an `Opcode` using `buildOpcode`. -/// -/// The fields correspond to the opcode name. Here is an example -/// i32_trunc_f32_s -/// ^ ^ ^ ^ -/// | | | | -/// valtype1 | | | -/// = .i32 | | | -/// | | | -/// op | | -/// = .trunc | | -/// | | -/// valtype2 | -/// = .f32 | -/// | -/// width | -/// = null | -/// | -/// signed -/// = true -/// -/// There can be missing fields, here are some more examples: -/// i64_load8_u -/// --> .{ .valtype1 = .i64, .op = .load, .width = 8, signed = false } -/// i32_mul -/// --> .{ .valtype1 = .i32, .op = .trunc } -/// nop -/// --> .{ .op = .nop } const OpcodeBuildArguments = struct { /// First valtype in the opcode (usually represents the type of the output) - valtype1: ?wasm.Valtype = null, + valtype1: ?std.wasm.Valtype = null, /// The operation (e.g. call, unreachable, div, min, sqrt, etc.) op: Op, /// Width of the operation (e.g. 8 for i32_load8_s, 16 for i64_extend16_i32_s) width: ?u8 = null, /// Second valtype in the opcode name (usually represents the type of the input) - valtype2: ?wasm.Valtype = null, + valtype2: ?std.wasm.Valtype = null, /// Signedness of the op signedness: ?std.builtin.Signedness = null, }; -/// Helper function that builds an Opcode given the arguments needed -fn buildOpcode(args: OpcodeBuildArguments) wasm.Opcode { +/// TODO: deprecated, should be split up per tag. +fn buildOpcode(args: OpcodeBuildArguments) std.wasm.Opcode { switch (args.op) { - .@"unreachable" => return .@"unreachable", - .nop => return .nop, - .block => return .block, - .loop => return .loop, - .@"if" => return .@"if", - .@"else" => return .@"else", - .end => return .end, - .br => return .br, - .br_if => return .br_if, - .br_table => return .br_table, - .@"return" => return .@"return", - .call => return .call, - .call_indirect => return .call_indirect, - .drop => return .drop, - .select => return .select, - .local_get => return .local_get, - .local_set => return .local_set, - .local_tee => return .local_tee, - .global_get => return .global_get, - .global_set => return .global_set, + .@"unreachable" => unreachable, + .nop => unreachable, + .block => unreachable, + .loop => unreachable, + .@"if" => unreachable, + .@"else" => unreachable, + .end => unreachable, + .br => unreachable, + .br_if => unreachable, + .br_table => unreachable, + .@"return" => unreachable, + .call => unreachable, + .drop => unreachable, + .select => unreachable, + .global_get => unreachable, + .global_set => unreachable, .load => if (args.width) |width| switch (width) { 8 => switch (args.valtype1.?) { @@ -621,121 +675,17 @@ fn buildOpcode(args: OpcodeBuildArguments) wasm.Opcode { test "Wasm - buildOpcode" { // Make sure buildOpcode is referenced, and test some examples const i32_const = buildOpcode(.{ .op = .@"const", .valtype1 = .i32 }); - const end = buildOpcode(.{ .op = .end }); - const local_get = buildOpcode(.{ .op = .local_get }); const i64_extend32_s = buildOpcode(.{ .op = .extend, .valtype1 = .i64, .width = 32, .signedness = .signed }); const f64_reinterpret_i64 = buildOpcode(.{ .op = .reinterpret, .valtype1 = .f64, .valtype2 = .i64 }); - try testing.expectEqual(@as(wasm.Opcode, .i32_const), i32_const); - try testing.expectEqual(@as(wasm.Opcode, .end), end); - try testing.expectEqual(@as(wasm.Opcode, .local_get), local_get); - try testing.expectEqual(@as(wasm.Opcode, .i64_extend32_s), i64_extend32_s); - try testing.expectEqual(@as(wasm.Opcode, .f64_reinterpret_i64), f64_reinterpret_i64); + try testing.expectEqual(@as(std.wasm.Opcode, .i32_const), i32_const); + try testing.expectEqual(@as(std.wasm.Opcode, .i64_extend32_s), i64_extend32_s); + try testing.expectEqual(@as(std.wasm.Opcode, .f64_reinterpret_i64), f64_reinterpret_i64); } /// Hashmap to store generated `WValue` for each `Air.Inst.Ref` pub const ValueTable = std.AutoArrayHashMapUnmanaged(Air.Inst.Ref, WValue); -const CodeGen = @This(); - -/// Reference to the function declaration the code -/// section belongs to -owner_nav: InternPool.Nav.Index, -src_loc: Zcu.LazySrcLoc, -/// Current block depth. Used to calculate the relative difference between a break -/// and block -block_depth: u32 = 0, -air: Air, -liveness: Liveness, -gpa: mem.Allocator, -debug_output: link.File.DebugInfoOutput, -func_index: InternPool.Index, -/// Contains a list of current branches. -/// When we return from a branch, the branch will be popped from this list, -/// which means branches can only contain references from within its own branch, -/// or a branch higher (lower index) in the tree. -branches: std.ArrayListUnmanaged(Branch) = .empty, -/// Table to save `WValue`'s generated by an `Air.Inst` -// values: ValueTable, -/// Mapping from Air.Inst.Index to block ids -blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, struct { - label: u32, - value: WValue, -}) = .{}, -/// Maps `loop` instructions to their label. `br` to here repeats the loop. -loops: std.AutoHashMapUnmanaged(Air.Inst.Index, u32) = .empty, -/// `bytes` contains the wasm bytecode belonging to the 'code' section. -code: *ArrayList(u8), -/// The index the next local generated will have -/// NOTE: arguments share the index with locals therefore the first variable -/// will have the index that comes after the last argument's index -local_index: u32 = 0, -/// The index of the current argument. -/// Used to track which argument is being referenced in `airArg`. -arg_index: u32 = 0, -/// If codegen fails, an error messages will be allocated and saved in `err_msg` -err_msg: *Zcu.ErrorMsg, -/// List of all locals' types generated throughout this declaration -/// used to emit locals count at start of 'code' section. -locals: std.ArrayListUnmanaged(u8), -/// List of simd128 immediates. Each value is stored as an array of bytes. -/// This list will only be populated for 128bit-simd values when the target features -/// are enabled also. -simd_immediates: std.ArrayListUnmanaged([16]u8) = .empty, -/// The Target we're emitting (used to call intInfo) -target: *const std.Target, -/// Represents the wasm binary file that is being linked. -bin_file: *link.File.Wasm, -pt: Zcu.PerThread, -/// List of MIR Instructions -mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, -/// Contains extra data for MIR -mir_extra: std.ArrayListUnmanaged(u32) = .empty, -/// When a function is executing, we store the the current stack pointer's value within this local. -/// This value is then used to restore the stack pointer to the original value at the return of the function. -initial_stack_value: WValue = .none, -/// The current stack pointer subtracted with the stack size. From this value, we will calculate -/// all offsets of the stack values. -bottom_stack_value: WValue = .none, -/// Arguments of this function declaration -/// This will be set after `resolveCallingConventionValues` -args: []WValue = &.{}, -/// This will only be `.none` if the function returns void, or returns an immediate. -/// When it returns a pointer to the stack, the `.local` tag will be active and must be populated -/// before this function returns its execution to the caller. -return_value: WValue = .none, -/// The size of the stack this function occupies. In the function prologue -/// we will move the stack pointer by this number, forward aligned with the `stack_alignment`. -stack_size: u32 = 0, -/// The stack alignment, which is 16 bytes by default. This is specified by the -/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md -/// and also what the llvm backend will emit. -/// However, local variables or the usage of `incoming_stack_alignment` in a `CallingConvention` can overwrite this default. -stack_alignment: Alignment = .@"16", - -// For each individual Wasm valtype we store a seperate free list which -// allows us to re-use locals that are no longer used. e.g. a temporary local. -/// A list of indexes which represents a local of valtype `i32`. -/// It is illegal to store a non-i32 valtype in this list. -free_locals_i32: std.ArrayListUnmanaged(u32) = .empty, -/// A list of indexes which represents a local of valtype `i64`. -/// It is illegal to store a non-i64 valtype in this list. -free_locals_i64: std.ArrayListUnmanaged(u32) = .empty, -/// A list of indexes which represents a local of valtype `f32`. -/// It is illegal to store a non-f32 valtype in this list. -free_locals_f32: std.ArrayListUnmanaged(u32) = .empty, -/// A list of indexes which represents a local of valtype `f64`. -/// It is illegal to store a non-f64 valtype in this list. -free_locals_f64: std.ArrayListUnmanaged(u32) = .empty, -/// A list of indexes which represents a local of valtype `v127`. -/// It is illegal to store a non-v128 valtype in this list. -free_locals_v128: std.ArrayListUnmanaged(u32) = .empty, - -/// When in debug mode, this tracks if no `finishAir` was missed. -/// Forgetting to call `finishAir` will cause the result to not be -/// stored in our `values` map and therefore cause bugs. -air_bookkeeping: @TypeOf(bookkeeping_init) = bookkeeping_init, - const bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {}; const InnerError = error{ @@ -746,38 +696,33 @@ const InnerError = error{ Overflow, } || link.File.UpdateDebugInfoError; -pub fn deinit(func: *CodeGen) void { - // in case of an error and we still have branches - for (func.branches.items) |*branch| { - branch.deinit(func.gpa); - } - func.branches.deinit(func.gpa); - func.blocks.deinit(func.gpa); - func.loops.deinit(func.gpa); - func.locals.deinit(func.gpa); - func.simd_immediates.deinit(func.gpa); - func.mir_instructions.deinit(func.gpa); - func.mir_extra.deinit(func.gpa); - func.free_locals_i32.deinit(func.gpa); - func.free_locals_i64.deinit(func.gpa); - func.free_locals_f32.deinit(func.gpa); - func.free_locals_f64.deinit(func.gpa); - func.free_locals_v128.deinit(func.gpa); - func.* = undefined; +pub fn deinit(cg: *CodeGen) void { + const gpa = cg.gpa; + for (cg.branches.items) |*branch| branch.deinit(gpa); + cg.branches.deinit(gpa); + cg.blocks.deinit(gpa); + cg.loops.deinit(gpa); + cg.simd_immediates.deinit(gpa); + cg.free_locals_i32.deinit(gpa); + cg.free_locals_i64.deinit(gpa); + cg.free_locals_f32.deinit(gpa); + cg.free_locals_f64.deinit(gpa); + cg.free_locals_v128.deinit(gpa); + cg.* = undefined; } -/// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig -fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { - func.err_msg = try Zcu.ErrorMsg.create(func.gpa, func.src_loc, fmt, args); - return error.CodegenFail; +fn fail(cg: *CodeGen, comptime fmt: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } { + const zcu = cg.pt.zcu; + const func = zcu.funcInfo(cg.func_index); + return zcu.codegenFail(func.owner_nav, fmt, args); } /// Resolves the `WValue` for the given instruction `inst` /// When the given instruction has a `Value`, it returns a constant instead -fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { - var branch_index = func.branches.items.len; +fn resolveInst(cg: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { + var branch_index = cg.branches.items.len; while (branch_index > 0) : (branch_index -= 1) { - const branch = func.branches.items[branch_index - 1]; + const branch = cg.branches.items[branch_index - 1]; if (branch.values.get(ref)) |value| { return value; } @@ -787,16 +732,16 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { // means we must generate it from a constant. // We always store constants in the most outer branch as they must never // be removed. The most outer branch is always at index 0. - const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref); + const gop = try cg.branches.items[0].values.getOrPut(cg.gpa, ref); assert(!gop.found_existing); - const pt = func.pt; + const pt = cg.pt; const zcu = pt.zcu; - const val = (try func.air.value(ref, pt)).?; - const ty = func.typeOf(ref); + const val = (try cg.air.value(ref, pt)).?; + const ty = cg.typeOf(ref); if (!ty.hasRuntimeBitsIgnoreComptime(zcu) and !ty.isInt(zcu) and !ty.isError(zcu)) { gop.value_ptr.* = .none; - return gop.value_ptr.*; + return .none; } // When we need to pass the value by reference (such as a struct), we will @@ -805,30 +750,24 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { // // In the other cases, we will simply lower the constant to a value that fits // into a single local (such as a pointer, integer, bool, etc). - const result: WValue = if (isByRef(ty, pt, func.target.*)) - switch (try func.bin_file.lowerUav(pt, val.toIntern(), .none, func.src_loc)) { - .mcv => |mcv| .{ .memory = mcv.load_symbol }, - .fail => |err_msg| { - func.err_msg = err_msg; - return error.CodegenFail; - }, - } + const result: WValue = if (isByRef(ty, zcu, cg.target)) + .{ .uav_ref = .{ .ip_index = val.toIntern() } } else - try func.lowerConstant(val, ty); + try cg.lowerConstant(val, ty); gop.value_ptr.* = result; return result; } /// NOTE: if result == .stack, it will be stored in .local -fn finishAir(func: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []const Air.Inst.Ref) InnerError!void { +fn finishAir(cg: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []const Air.Inst.Ref) InnerError!void { assert(operands.len <= Liveness.bpi - 1); - var tomb_bits = func.liveness.getTombBits(inst); + var tomb_bits = cg.liveness.getTombBits(inst); for (operands) |operand| { const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; - processDeath(func, operand); + processDeath(cg, operand); } // results of `none` can never be referenced. @@ -836,13 +775,13 @@ fn finishAir(func: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []c const trackable_result = if (result != .stack) result else - try result.toLocal(func, func.typeOfIndex(inst)); - const branch = func.currentBranch(); + try result.toLocal(cg, cg.typeOfIndex(inst)); + const branch = cg.currentBranch(); branch.values.putAssumeCapacityNoClobber(inst.toRef(), trackable_result); } if (std.debug.runtime_safety) { - func.air_bookkeeping += 1; + cg.air_bookkeeping += 1; } } @@ -855,8 +794,8 @@ const Branch = struct { } }; -inline fn currentBranch(func: *CodeGen) *Branch { - return &func.branches.items[func.branches.items.len - 1]; +inline fn currentBranch(cg: *CodeGen) *Branch { + return &cg.branches.items[cg.branches.items.len - 1]; } const BigTomb = struct { @@ -883,131 +822,143 @@ const BigTomb = struct { } }; -fn iterateBigTomb(func: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !BigTomb { - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, operand_count + 1); +fn iterateBigTomb(cg: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !BigTomb { + try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, operand_count + 1); return BigTomb{ - .gen = func, + .gen = cg, .inst = inst, - .lbt = func.liveness.iterateBigTomb(inst), + .lbt = cg.liveness.iterateBigTomb(inst), }; } -fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void { +fn processDeath(cg: *CodeGen, ref: Air.Inst.Ref) void { if (ref.toIndex() == null) return; // Branches are currently only allowed to free locals allocated // within their own branch. // TODO: Upon branch consolidation free any locals if needed. - const value = func.currentBranch().values.getPtr(ref) orelse return; + const value = cg.currentBranch().values.getPtr(ref) orelse return; if (value.* != .local) return; - const reserved_indexes = func.args.len + @intFromBool(func.return_value != .none); + const reserved_indexes = cg.args.len + @intFromBool(cg.return_value != .none); if (value.local.value < reserved_indexes) { return; // function arguments can never be re-used } log.debug("Decreasing reference for ref: %{d}, using local '{d}'", .{ @intFromEnum(ref.toIndex().?), value.local.value }); value.local.references -= 1; // if this panics, a call to `reuseOperand` was forgotten by the developer if (value.local.references == 0) { - value.free(func); + value.free(cg); } } -/// Appends a MIR instruction and returns its index within the list of instructions -fn addInst(func: *CodeGen, inst: Mir.Inst) error{OutOfMemory}!void { - try func.mir_instructions.append(func.gpa, inst); +fn addInst(cg: *CodeGen, inst: Mir.Inst) error{OutOfMemory}!void { + try cg.mir_instructions.append(cg.gpa, inst); +} + +fn addTag(cg: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void { + try cg.addInst(.{ .tag = tag, .data = .{ .tag = {} } }); } -fn addTag(func: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void { - try func.addInst(.{ .tag = tag, .data = .{ .tag = {} } }); +fn addExtended(cg: *CodeGen, opcode: std.wasm.MiscOpcode) error{OutOfMemory}!void { + const extra_index = cg.extraLen(); + try cg.mir_extra.append(cg.gpa, @intFromEnum(opcode)); + try cg.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } }); } -fn addExtended(func: *CodeGen, opcode: wasm.MiscOpcode) error{OutOfMemory}!void { - const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); - try func.mir_extra.append(func.gpa, @intFromEnum(opcode)); - try func.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } }); +fn addLabel(cg: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!void { + try cg.addInst(.{ .tag = tag, .data = .{ .label = label } }); } -fn addLabel(func: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!void { - try func.addInst(.{ .tag = tag, .data = .{ .label = label } }); +fn addLocal(cg: *CodeGen, tag: Mir.Inst.Tag, local: u32) error{OutOfMemory}!void { + try cg.addInst(.{ .tag = tag, .data = .{ .local = local } }); +} + +fn addFuncTy(cg: *CodeGen, tag: Mir.Inst.Tag, i: Wasm.FunctionType.Index) error{OutOfMemory}!void { + try cg.addInst(.{ .tag = tag, .data = .{ .func_ty = i } }); } /// Accepts an unsigned 32bit integer rather than a signed integer to /// prevent us from having to bitcast multiple times as most values /// within codegen are represented as unsigned rather than signed. -fn addImm32(func: *CodeGen, imm: u32) error{OutOfMemory}!void { - try func.addInst(.{ .tag = .i32_const, .data = .{ .imm32 = @bitCast(imm) } }); +fn addImm32(cg: *CodeGen, imm: u32) error{OutOfMemory}!void { + try cg.addInst(.{ .tag = .i32_const, .data = .{ .imm32 = @bitCast(imm) } }); } /// Accepts an unsigned 64bit integer rather than a signed integer to /// prevent us from having to bitcast multiple times as most values /// within codegen are represented as unsigned rather than signed. -fn addImm64(func: *CodeGen, imm: u64) error{OutOfMemory}!void { - const extra_index = try func.addExtra(Mir.Imm64.fromU64(imm)); - try func.addInst(.{ .tag = .i64_const, .data = .{ .payload = extra_index } }); +fn addImm64(cg: *CodeGen, imm: u64) error{OutOfMemory}!void { + const extra_index = try cg.addExtra(Mir.Imm64.init(imm)); + try cg.addInst(.{ .tag = .i64_const, .data = .{ .payload = extra_index } }); } /// Accepts the index into the list of 128bit-immediates -fn addImm128(func: *CodeGen, index: u32) error{OutOfMemory}!void { - const simd_values = func.simd_immediates.items[index]; - const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); +fn addImm128(cg: *CodeGen, index: u32) error{OutOfMemory}!void { + const simd_values = cg.simd_immediates.items[index]; + const extra_index = cg.extraLen(); // tag + 128bit value - try func.mir_extra.ensureUnusedCapacity(func.gpa, 5); - func.mir_extra.appendAssumeCapacity(std.wasm.simdOpcode(.v128_const)); - func.mir_extra.appendSliceAssumeCapacity(@alignCast(mem.bytesAsSlice(u32, &simd_values))); - try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); + try cg.mir_extra.ensureUnusedCapacity(cg.gpa, 5); + cg.mir_extra.appendAssumeCapacity(@intFromEnum(std.wasm.SimdOpcode.v128_const)); + cg.mir_extra.appendSliceAssumeCapacity(@alignCast(mem.bytesAsSlice(u32, &simd_values))); + try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); } -fn addFloat64(func: *CodeGen, float: f64) error{OutOfMemory}!void { - const extra_index = try func.addExtra(Mir.Float64.fromFloat64(float)); - try func.addInst(.{ .tag = .f64_const, .data = .{ .payload = extra_index } }); +fn addFloat64(cg: *CodeGen, float: f64) error{OutOfMemory}!void { + const extra_index = try cg.addExtra(Mir.Float64.init(float)); + try cg.addInst(.{ .tag = .f64_const, .data = .{ .payload = extra_index } }); } /// Inserts an instruction to load/store from/to wasm's linear memory dependent on the given `tag`. -fn addMemArg(func: *CodeGen, tag: Mir.Inst.Tag, mem_arg: Mir.MemArg) error{OutOfMemory}!void { - const extra_index = try func.addExtra(mem_arg); - try func.addInst(.{ .tag = tag, .data = .{ .payload = extra_index } }); +fn addMemArg(cg: *CodeGen, tag: Mir.Inst.Tag, mem_arg: Mir.MemArg) error{OutOfMemory}!void { + const extra_index = try cg.addExtra(mem_arg); + try cg.addInst(.{ .tag = tag, .data = .{ .payload = extra_index } }); } /// Inserts an instruction from the 'atomics' feature which accesses wasm's linear memory dependent on the /// given `tag`. -fn addAtomicMemArg(func: *CodeGen, tag: wasm.AtomicsOpcode, mem_arg: Mir.MemArg) error{OutOfMemory}!void { - const extra_index = try func.addExtra(@as(struct { val: u32 }, .{ .val = wasm.atomicsOpcode(tag) })); - _ = try func.addExtra(mem_arg); - try func.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } }); +fn addAtomicMemArg(cg: *CodeGen, tag: std.wasm.AtomicsOpcode, mem_arg: Mir.MemArg) error{OutOfMemory}!void { + const extra_index = try cg.addExtra(@as(struct { val: u32 }, .{ .val = @intFromEnum(tag) })); + _ = try cg.addExtra(mem_arg); + try cg.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } }); } /// Helper function to emit atomic mir opcodes. -fn addAtomicTag(func: *CodeGen, tag: wasm.AtomicsOpcode) error{OutOfMemory}!void { - const extra_index = try func.addExtra(@as(struct { val: u32 }, .{ .val = wasm.atomicsOpcode(tag) })); - try func.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } }); +fn addAtomicTag(cg: *CodeGen, tag: std.wasm.AtomicsOpcode) error{OutOfMemory}!void { + const extra_index = try cg.addExtra(@as(struct { val: u32 }, .{ .val = @intFromEnum(tag) })); + try cg.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } }); } /// Appends entries to `mir_extra` based on the type of `extra`. /// Returns the index into `mir_extra` -fn addExtra(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 { +fn addExtra(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 { const fields = std.meta.fields(@TypeOf(extra)); - try func.mir_extra.ensureUnusedCapacity(func.gpa, fields.len); - return func.addExtraAssumeCapacity(extra); + try cg.mir_extra.ensureUnusedCapacity(cg.gpa, fields.len); + return cg.addExtraAssumeCapacity(extra); } /// Appends entries to `mir_extra` based on the type of `extra`. /// Returns the index into `mir_extra` -fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 { +fn addExtraAssumeCapacity(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @as(u32, @intCast(func.mir_extra.items.len)); + const result = cg.extraLen(); inline for (fields) |field| { - func.mir_extra.appendAssumeCapacity(switch (field.type) { + cg.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), + i32 => @bitCast(@field(extra, field.name)), + InternPool.Index, + InternPool.Nav.Index, + Wasm.UavsObjIndex, + Wasm.UavsExeIndex, + => @intFromEnum(@field(extra, field.name)), else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)), }); } return result; } -/// Using a given `Type`, returns the corresponding valtype for .auto callconv -fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype { - const zcu = pt.zcu; +/// For `std.builtin.CallingConvention.auto`. +pub fn typeToValtype(ty: Type, zcu: *const Zcu, target: *const std.Target) std.wasm.Valtype { const ip = &zcu.intern_pool; return switch (ty.zigTypeTag(zcu)) { - .float => switch (ty.floatBits(target)) { + .float => switch (ty.floatBits(target.*)) { 16 => .i32, // stored/loaded as u16 32 => .f32, 64 => .f64, @@ -1022,19 +973,20 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype { .@"struct" => blk: { if (zcu.typeToPackedStruct(ty)) |packed_struct| { const backing_int_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)); - break :blk typeToValtype(backing_int_ty, pt, target); + break :blk typeToValtype(backing_int_ty, zcu, target); } else { break :blk .i32; } }, - .vector => switch (determineSimdStoreStrategy(ty, zcu, target)) { + .vector => switch (CodeGen.determineSimdStoreStrategy(ty, zcu, target)) { .direct => .v128, .unrolled => .i32, }, .@"union" => switch (ty.containerLayout(zcu)) { - .@"packed" => blk: { - const int_ty = pt.intType(.unsigned, @as(u16, @intCast(ty.bitSize(zcu)))) catch @panic("out of memory"); - break :blk typeToValtype(int_ty, pt, target); + .@"packed" => switch (ty.bitSize(zcu)) { + 0...32 => .i32, + 33...64 => .i64, + else => .i32, }, else => .i32, }, @@ -1042,42 +994,94 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype { }; } -/// Using a given `Type`, returns the byte representation of its wasm value type -fn genValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 { - return wasm.valtype(typeToValtype(ty, pt, target)); -} - /// Using a given `Type`, returns the corresponding wasm value type -/// Differently from `genValtype` this also allows `void` to create a block +/// Differently from `typeToValtype` this also allows `void` to create a block /// with no return type -fn genBlockType(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 { +fn genBlockType(ty: Type, zcu: *const Zcu, target: *const std.Target) std.wasm.BlockType { return switch (ty.ip_index) { - .void_type, .noreturn_type => wasm.block_empty, - else => genValtype(ty, pt, target), + .void_type, .noreturn_type => .empty, + else => .fromValtype(typeToValtype(ty, zcu, target)), }; } /// Writes the bytecode depending on the given `WValue` in `val` -fn emitWValue(func: *CodeGen, value: WValue) InnerError!void { +fn emitWValue(cg: *CodeGen, value: WValue) InnerError!void { switch (value) { .dead => unreachable, // reference to free'd `WValue` (missing reuseOperand?) .none, .stack => {}, // no-op - .local => |idx| try func.addLabel(.local_get, idx.value), - .imm32 => |val| try func.addImm32(val), - .imm64 => |val| try func.addImm64(val), - .imm128 => |val| try func.addImm128(val), - .float32 => |val| try func.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }), - .float64 => |val| try func.addFloat64(val), - .memory => |ptr| { - const extra_index = try func.addExtra(Mir.Memory{ .pointer = ptr, .offset = 0 }); - try func.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } }); + .local => |idx| try cg.addLocal(.local_get, idx.value), + .imm32 => |val| try cg.addImm32(val), + .imm64 => |val| try cg.addImm64(val), + .imm128 => |val| try cg.addImm128(val), + .float32 => |val| try cg.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }), + .float64 => |val| try cg.addFloat64(val), + .nav_ref => |nav_ref| { + const wasm = cg.wasm; + const comp = wasm.base.comp; + const zcu = comp.zcu.?; + const ip = &zcu.intern_pool; + if (ip.getNav(nav_ref.nav_index).isFn(ip)) { + assert(nav_ref.offset == 0); + const gop = try wasm.zcu_indirect_function_set.getOrPut(comp.gpa, nav_ref.nav_index); + if (!gop.found_existing) gop.value_ptr.* = {}; + try cg.addInst(.{ + .tag = .func_ref, + .data = .{ .indirect_function_table_index = @enumFromInt(gop.index) }, + }); + } else if (nav_ref.offset == 0) { + try cg.addInst(.{ .tag = .nav_ref, .data = .{ .nav_index = nav_ref.nav_index } }); + } else { + try cg.addInst(.{ + .tag = .nav_ref_off, + .data = .{ + .payload = try cg.addExtra(Mir.NavRefOff{ + .nav_index = nav_ref.nav_index, + .offset = nav_ref.offset, + }), + }, + }); + } }, - .memory_offset => |mem_off| { - const extra_index = try func.addExtra(Mir.Memory{ .pointer = mem_off.pointer, .offset = mem_off.offset }); - try func.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } }); + .uav_ref => |uav| { + const wasm = cg.wasm; + const comp = wasm.base.comp; + const is_obj = comp.config.output_mode == .Obj; + const zcu = comp.zcu.?; + const ip = &zcu.intern_pool; + if (ip.isFunctionType(ip.typeOf(uav.ip_index))) { + assert(uav.offset == 0); + const owner_nav = ip.toFunc(uav.ip_index).owner_nav; + const gop = try wasm.zcu_indirect_function_set.getOrPut(comp.gpa, owner_nav); + if (!gop.found_existing) gop.value_ptr.* = {}; + try cg.addInst(.{ + .tag = .func_ref, + .data = .{ .indirect_function_table_index = @enumFromInt(gop.index) }, + }); + } else if (uav.offset == 0) { + try cg.addInst(.{ + .tag = .uav_ref, + .data = if (is_obj) .{ + .uav_obj = try wasm.refUavObj(uav.ip_index, uav.orig_ptr_ty), + } else .{ + .uav_exe = try wasm.refUavExe(uav.ip_index, uav.orig_ptr_ty), + }, + }); + } else { + try cg.addInst(.{ + .tag = .uav_ref_off, + .data = .{ + .payload = if (is_obj) try cg.addExtra(Mir.UavRefOffObj{ + .uav_obj = try wasm.refUavObj(uav.ip_index, uav.orig_ptr_ty), + .offset = uav.offset, + }) else try cg.addExtra(Mir.UavRefOffExe{ + .uav_exe = try wasm.refUavExe(uav.ip_index, uav.orig_ptr_ty), + .offset = uav.offset, + }), + }, + }); + } }, - .function_index => |index| try func.addLabel(.function_index, index), // write function index and generate relocation - .stack_offset => try func.addLabel(.local_get, func.bottom_stack_value.local.value), // caller must ensure to address the offset + .stack_offset => try cg.addLocal(.local_get, cg.bottom_stack_value.local.value), // caller must ensure to address the offset } } @@ -1085,7 +1089,7 @@ fn emitWValue(func: *CodeGen, value: WValue) InnerError!void { /// The old `WValue` found at instruction `ref` is then replaced by the /// modified `WValue` and returned. When given a non-local or non-stack-offset, /// returns the given `operand` itfunc instead. -fn reuseOperand(func: *CodeGen, ref: Air.Inst.Ref, operand: WValue) WValue { +fn reuseOperand(cg: *CodeGen, ref: Air.Inst.Ref, operand: WValue) WValue { if (operand != .local and operand != .stack_offset) return operand; var new_value = operand; switch (new_value) { @@ -1093,17 +1097,17 @@ fn reuseOperand(func: *CodeGen, ref: Air.Inst.Ref, operand: WValue) WValue { .stack_offset => |*stack_offset| stack_offset.references += 1, else => unreachable, } - const old_value = func.getResolvedInst(ref); + const old_value = cg.getResolvedInst(ref); old_value.* = new_value; return new_value; } /// From a reference, returns its resolved `WValue`. /// It's illegal to provide a `Air.Inst.Ref` that hasn't been resolved yet. -fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue { - var index = func.branches.items.len; +fn getResolvedInst(cg: *CodeGen, ref: Air.Inst.Ref) *WValue { + var index = cg.branches.items.len; while (index > 0) : (index -= 1) { - const branch = func.branches.items[index - 1]; + const branch = cg.branches.items[index - 1]; if (branch.values.getPtr(ref)) |value| { return value; } @@ -1113,243 +1117,238 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue { /// Creates one locals for a given `Type`. /// Returns a corresponding `Wvalue` with `local` as active tag -fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - const pt = func.pt; - const valtype = typeToValtype(ty, pt, func.target.*); +fn allocLocal(cg: *CodeGen, ty: Type) InnerError!WValue { + const zcu = cg.pt.zcu; + const valtype = typeToValtype(ty, zcu, cg.target); const index_or_null = switch (valtype) { - .i32 => func.free_locals_i32.popOrNull(), - .i64 => func.free_locals_i64.popOrNull(), - .f32 => func.free_locals_f32.popOrNull(), - .f64 => func.free_locals_f64.popOrNull(), - .v128 => func.free_locals_v128.popOrNull(), + .i32 => cg.free_locals_i32.popOrNull(), + .i64 => cg.free_locals_i64.popOrNull(), + .f32 => cg.free_locals_f32.popOrNull(), + .f64 => cg.free_locals_f64.popOrNull(), + .v128 => cg.free_locals_v128.popOrNull(), }; if (index_or_null) |index| { log.debug("reusing local ({d}) of type {}", .{ index, valtype }); return .{ .local = .{ .value = index, .references = 1 } }; } log.debug("new local of type {}", .{valtype}); - return func.ensureAllocLocal(ty); + return cg.ensureAllocLocal(ty); } /// Ensures a new local will be created. This is useful when it's useful /// to use a zero-initialized local. -fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - const pt = func.pt; - try func.locals.append(func.gpa, genValtype(ty, pt, func.target.*)); - const initial_index = func.local_index; - func.local_index += 1; +fn ensureAllocLocal(cg: *CodeGen, ty: Type) InnerError!WValue { + const zcu = cg.pt.zcu; + try cg.locals.append(cg.gpa, typeToValtype(ty, zcu, cg.target)); + const initial_index = cg.local_index; + cg.local_index += 1; return .{ .local = .{ .value = initial_index, .references = 1 } }; } -/// Generates a `wasm.Type` from a given function type. -/// Memory is owned by the caller. -fn genFunctype( - gpa: Allocator, - cc: std.builtin.CallingConvention, - params: []const InternPool.Index, - return_type: Type, - pt: Zcu.PerThread, - target: std.Target, -) !wasm.Type { - const zcu = pt.zcu; - var temp_params = std.ArrayList(wasm.Valtype).init(gpa); - defer temp_params.deinit(); - var returns = std.ArrayList(wasm.Valtype).init(gpa); - defer returns.deinit(); - - if (firstParamSRet(cc, return_type, pt, target)) { - try temp_params.append(.i32); // memory address is always a 32-bit handle - } else if (return_type.hasRuntimeBitsIgnoreComptime(zcu)) { - if (cc == .wasm_watc) { - const res_classes = abi.classifyType(return_type, zcu); - assert(res_classes[0] == .direct and res_classes[1] == .none); - const scalar_type = abi.scalarType(return_type, zcu); - try returns.append(typeToValtype(scalar_type, pt, target)); - } else { - try returns.append(typeToValtype(return_type, pt, target)); +pub const Function = extern struct { + /// Index into `Wasm.mir_instructions`. + mir_off: u32, + /// This is unused except for as a safety slice bound and could be removed. + mir_len: u32, + /// Index into `Wasm.mir_extra`. + mir_extra_off: u32, + /// This is unused except for as a safety slice bound and could be removed. + mir_extra_len: u32, + locals_off: u32, + locals_len: u32, + prologue: Prologue, + + pub const Prologue = extern struct { + flags: Flags, + sp_local: u32, + stack_size: u32, + bottom_stack_local: u32, + + pub const Flags = packed struct(u32) { + stack_alignment: Alignment, + padding: u26 = 0, + }; + + pub const none: Prologue = .{ + .sp_local = 0, + .flags = .{ .stack_alignment = .none }, + .stack_size = 0, + .bottom_stack_local = 0, + }; + + pub fn isNone(p: *const Prologue) bool { + return p.flags.stack_alignment != .none; } - } else if (return_type.isError(zcu)) { - try returns.append(.i32); - } - - // param types - for (params) |param_type_ip| { - const param_type = Type.fromInterned(param_type_ip); - if (!param_type.hasRuntimeBitsIgnoreComptime(zcu)) continue; - - switch (cc) { - .wasm_watc => { - const param_classes = abi.classifyType(param_type, zcu); - if (param_classes[1] == .none) { - if (param_classes[0] == .direct) { - const scalar_type = abi.scalarType(param_type, zcu); - try temp_params.append(typeToValtype(scalar_type, pt, target)); - } else { - try temp_params.append(typeToValtype(param_type, pt, target)); - } - } else { - // i128/f128 - try temp_params.append(.i64); - try temp_params.append(.i64); - } - }, - else => try temp_params.append(typeToValtype(param_type, pt, target)), + }; + + pub fn lower(f: *Function, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) Allocator.Error!void { + const gpa = wasm.base.comp.gpa; + + // Write the locals in the prologue of the function body. + const locals = wasm.all_zcu_locals.items[f.locals_off..][0..f.locals_len]; + try code.ensureUnusedCapacity(gpa, 5 + locals.len * 6 + 38); + + std.leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(locals.len))) catch unreachable; + for (locals) |local| { + std.leb.writeUleb128(code.fixedWriter(), @as(u32, 1)) catch unreachable; + code.appendAssumeCapacity(@intFromEnum(local)); + } + + // Stack management section of function prologue. + const stack_alignment = f.prologue.flags.stack_alignment; + if (stack_alignment.toByteUnits()) |align_bytes| { + const sp_global: Wasm.GlobalIndex = .stack_pointer; + // load stack pointer + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_get)); + std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable; + // store stack pointer so we can restore it when we return from the function + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee)); + leb.writeUleb128(code.fixedWriter(), f.prologue.sp_local) catch unreachable; + // get the total stack size + const aligned_stack: i32 = @intCast(stack_alignment.forward(f.prologue.stack_size)); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const)); + leb.writeIleb128(code.fixedWriter(), aligned_stack) catch unreachable; + // subtract it from the current stack pointer + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_sub)); + // Get negative stack alignment + const neg_stack_align = @as(i32, @intCast(align_bytes)) * -1; + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const)); + leb.writeIleb128(code.fixedWriter(), neg_stack_align) catch unreachable; + // Bitwise-and the value to get the new stack pointer to ensure the + // pointers are aligned with the abi alignment. + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_and)); + // The bottom will be used to calculate all stack pointer offsets. + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee)); + leb.writeUleb128(code.fixedWriter(), f.prologue.bottom_stack_local) catch unreachable; + // Store the current stack pointer value into the global stack pointer so other function calls will + // start from this value instead and not overwrite the current stack. + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set)); + std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable; } + + var emit: Emit = .{ + .mir = .{ + .instruction_tags = wasm.mir_instructions.items(.tag)[f.mir_off..][0..f.mir_len], + .instruction_datas = wasm.mir_instructions.items(.data)[f.mir_off..][0..f.mir_len], + .extra = wasm.mir_extra.items[f.mir_extra_off..][0..f.mir_extra_len], + }, + .wasm = wasm, + .code = code, + }; + try emit.lowerToCode(); } +}; - return wasm.Type{ - .params = try temp_params.toOwnedSlice(), - .returns = try returns.toOwnedSlice(), - }; -} +pub const Error = error{ + OutOfMemory, + /// Compiler was asked to operate on a number larger than supported. + Overflow, + /// Indicates the error is already stored in Zcu `failed_codegen`. + CodegenFail, +}; -pub fn generate( - bin_file: *link.File, +pub fn function( + wasm: *Wasm, pt: Zcu.PerThread, - src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, - code: *std.ArrayList(u8), - debug_output: link.File.DebugInfoOutput, -) codegen.CodeGenError!codegen.Result { +) Error!Function { const zcu = pt.zcu; const gpa = zcu.gpa; - const func = zcu.funcInfo(func_index); - const file_scope = zcu.navFileScope(func.owner_nav); + const cg = zcu.funcInfo(func_index); + const file_scope = zcu.navFileScope(cg.owner_nav); const target = &file_scope.mod.resolved_target.result; + const fn_ty = zcu.navValue(cg.owner_nav).typeOf(zcu); + const fn_info = zcu.typeToFunc(fn_ty).?; + const ip = &zcu.intern_pool; + const fn_ty_index = try wasm.internFunctionType(fn_info.cc, fn_info.param_types.get(ip), .fromInterned(fn_info.return_type), target); + const returns = fn_ty_index.ptr(wasm).returns.slice(wasm); + const any_returns = returns.len != 0; + + var cc_result = try resolveCallingConventionValues(zcu, fn_ty, target); + defer cc_result.deinit(gpa); + var code_gen: CodeGen = .{ .gpa = gpa, .pt = pt, .air = air, .liveness = liveness, - .code = code, - .owner_nav = func.owner_nav, - .src_loc = src_loc, - .err_msg = undefined, - .locals = .{}, + .owner_nav = cg.owner_nav, .target = target, - .bin_file = bin_file.cast(.wasm).?, - .debug_output = debug_output, + .ptr_size = switch (target.cpu.arch) { + .wasm32 => .wasm32, + .wasm64 => .wasm64, + else => unreachable, + }, + .wasm = wasm, .func_index = func_index, + .args = cc_result.args, + .return_value = cc_result.return_value, + .local_index = cc_result.local_index, + .mir_instructions = &wasm.mir_instructions, + .mir_extra = &wasm.mir_extra, + .locals = &wasm.all_zcu_locals, + .start_mir_extra_off = @intCast(wasm.mir_extra.items.len), + .start_locals_off = @intCast(wasm.all_zcu_locals.items.len), }; defer code_gen.deinit(); - genFunc(&code_gen) catch |err| switch (err) { - error.CodegenFail => return codegen.Result{ .fail = code_gen.err_msg }, - else => |e| return e, + return functionInner(&code_gen, any_returns) catch |err| switch (err) { + error.CodegenFail => return error.CodegenFail, + else => |e| return code_gen.fail("failed to generate function: {s}", .{@errorName(e)}), }; - - return codegen.Result.ok; } -fn genFunc(func: *CodeGen) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const fn_ty = zcu.navValue(func.owner_nav).typeOf(zcu); - const fn_info = zcu.typeToFunc(fn_ty).?; - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*); - defer func_type.deinit(func.gpa); - _ = try func.bin_file.storeNavType(func.owner_nav, func_type); - - var cc_result = try func.resolveCallingConventionValues(fn_ty); - defer cc_result.deinit(func.gpa); - - func.args = cc_result.args; - func.return_value = cc_result.return_value; +fn functionInner(cg: *CodeGen, any_returns: bool) InnerError!Function { + const wasm = cg.wasm; + const zcu = cg.pt.zcu; - try func.addTag(.dbg_prologue_end); + const start_mir_off: u32 = @intCast(wasm.mir_instructions.len); - try func.branches.append(func.gpa, .{}); + try cg.branches.append(cg.gpa, .{}); // clean up outer branch defer { - var outer_branch = func.branches.pop(); - outer_branch.deinit(func.gpa); - assert(func.branches.items.len == 0); // missing branch merge + var outer_branch = cg.branches.pop(); + outer_branch.deinit(cg.gpa); + assert(cg.branches.items.len == 0); // missing branch merge } // Generate MIR for function body - try func.genBody(func.air.getMainBody()); + try cg.genBody(cg.air.getMainBody()); // In case we have a return value, but the last instruction is a noreturn (such as a while loop) // we emit an unreachable instruction to tell the stack validator that part will never be reached. - if (func_type.returns.len != 0 and func.air.instructions.len > 0) { - const inst: Air.Inst.Index = @enumFromInt(func.air.instructions.len - 1); - const last_inst_ty = func.typeOfIndex(inst); + if (any_returns and cg.air.instructions.len > 0) { + const inst: Air.Inst.Index = @enumFromInt(cg.air.instructions.len - 1); + const last_inst_ty = cg.typeOfIndex(inst); if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(zcu) or last_inst_ty.isNoReturn(zcu)) { - try func.addTag(.@"unreachable"); + try cg.addTag(.@"unreachable"); } } // End of function body - try func.addTag(.end); - - try func.addTag(.dbg_epilogue_begin); - - // check if we have to initialize and allocate anything into the stack frame. - // If so, create enough stack space and insert the instructions at the front of the list. - if (func.initial_stack_value != .none) { - var prologue = std.ArrayList(Mir.Inst).init(func.gpa); - defer prologue.deinit(); - - const sp = @intFromEnum(func.bin_file.zig_object.?.stack_pointer_sym); - // load stack pointer - try prologue.append(.{ .tag = .global_get, .data = .{ .label = sp } }); - // store stack pointer so we can restore it when we return from the function - try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } }); - // get the total stack size - const aligned_stack = func.stack_alignment.forward(func.stack_size); - try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(aligned_stack) } }); - // subtract it from the current stack pointer - try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } }); - // Get negative stack alignment - try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnits().?)) * -1 } }); - // Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment - try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } }); - // store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets - try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.bottom_stack_value.local.value } }); - // Store the current stack pointer value into the global stack pointer so other function calls will - // start from this value instead and not overwrite the current stack. - try prologue.append(.{ .tag = .global_set, .data = .{ .label = sp } }); - - // reserve space and insert all prologue instructions at the front of the instruction list - // We insert them in reserve order as there is no insertSlice in multiArrayList. - try func.mir_instructions.ensureUnusedCapacity(func.gpa, prologue.items.len); - for (prologue.items, 0..) |_, index| { - const inst = prologue.items[prologue.items.len - 1 - index]; - func.mir_instructions.insertAssumeCapacity(0, inst); - } - } - - var mir: Mir = .{ - .instructions = func.mir_instructions.toOwnedSlice(), - .extra = try func.mir_extra.toOwnedSlice(func.gpa), - }; - defer mir.deinit(func.gpa); - - var emit: Emit = .{ - .mir = mir, - .bin_file = func.bin_file, - .code = func.code, - .locals = func.locals.items, - .owner_nav = func.owner_nav, - .dbg_output = func.debug_output, - .prev_di_line = 0, - .prev_di_column = 0, - .prev_di_offset = 0, - }; - - emit.emitMir() catch |err| switch (err) { - error.EmitFail => { - func.err_msg = emit.error_msg.?; - return error.CodegenFail; + try cg.addTag(.end); + try cg.addTag(.dbg_epilogue_begin); + + return .{ + .mir_off = start_mir_off, + .mir_len = @intCast(wasm.mir_instructions.len - start_mir_off), + .mir_extra_off = cg.start_mir_extra_off, + .mir_extra_len = cg.extraLen(), + .locals_off = cg.start_locals_off, + .locals_len = @intCast(wasm.all_zcu_locals.items.len - cg.start_locals_off), + .prologue = if (cg.initial_stack_value == .none) .none else .{ + .sp_local = cg.initial_stack_value.local.value, + .flags = .{ .stack_alignment = cg.stack_alignment }, + .stack_size = cg.stack_size, + .bottom_stack_local = cg.bottom_stack_value.local.value, }, - else => |e| return e, }; } const CallWValues = struct { args: []WValue, return_value: WValue, + local_index: u32, fn deinit(values: *CallWValues, gpa: Allocator) void { gpa.free(values.args); @@ -1357,28 +1356,33 @@ const CallWValues = struct { } }; -fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues { - const pt = func.pt; - const zcu = pt.zcu; +fn resolveCallingConventionValues( + zcu: *const Zcu, + fn_ty: Type, + target: *const std.Target, +) Allocator.Error!CallWValues { + const gpa = zcu.gpa; const ip = &zcu.intern_pool; const fn_info = zcu.typeToFunc(fn_ty).?; const cc = fn_info.cc; + var result: CallWValues = .{ .args = &.{}, .return_value = .none, + .local_index = 0, }; if (cc == .naked) return result; - var args = std.ArrayList(WValue).init(func.gpa); + var args = std.ArrayList(WValue).init(gpa); defer args.deinit(); // Check if we store the result as a pointer to the stack rather than // by value - if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) { + if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), zcu, target)) { // the sret arg will be passed as first argument, therefore we // set the `return_value` before allocating locals for regular args. - result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } }; - func.local_index += 1; + result.return_value = .{ .local = .{ .value = result.local_index, .references = 1 } }; + result.local_index += 1; } switch (cc) { @@ -1388,8 +1392,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV continue; } - try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } }); - func.local_index += 1; + try args.append(.{ .local = .{ .value = result.local_index, .references = 1 } }); + result.local_index += 1; } }, .wasm_watc => { @@ -1397,23 +1401,28 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV const ty_classes = abi.classifyType(Type.fromInterned(ty), zcu); for (ty_classes) |class| { if (class == .none) continue; - try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } }); - func.local_index += 1; + try args.append(.{ .local = .{ .value = result.local_index, .references = 1 } }); + result.local_index += 1; } } }, - else => return func.fail("calling convention '{s}' not supported for Wasm", .{@tagName(cc)}), + else => unreachable, // Frontend is responsible for emitting an error earlier. } result.args = try args.toOwnedSlice(); return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.PerThread, target: std.Target) bool { +pub fn firstParamSRet( + cc: std.builtin.CallingConvention, + return_type: Type, + zcu: *const Zcu, + target: *const std.Target, +) bool { switch (cc) { .@"inline" => unreachable, - .auto => return isByRef(return_type, pt, target), + .auto => return isByRef(return_type, zcu, target), .wasm_watc => { - const ty_classes = abi.classifyType(return_type, pt.zcu); + const ty_classes = abi.classifyType(return_type, zcu); if (ty_classes[0] == .indirect) return true; if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true; return false; @@ -1424,94 +1433,88 @@ fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu. /// Lowers a Zig type and its value based on a given calling convention to ensure /// it matches the ABI. -fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WValue) !void { +fn lowerArg(cg: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WValue) !void { if (cc != .wasm_watc) { - return func.lowerToStack(value); + return cg.lowerToStack(value); } - const pt = func.pt; - const zcu = pt.zcu; + const zcu = cg.pt.zcu; const ty_classes = abi.classifyType(ty, zcu); assert(ty_classes[0] != .none); switch (ty.zigTypeTag(zcu)) { .@"struct", .@"union" => { if (ty_classes[0] == .indirect) { - return func.lowerToStack(value); + return cg.lowerToStack(value); } assert(ty_classes[0] == .direct); const scalar_type = abi.scalarType(ty, zcu); switch (value) { - .memory, - .memory_offset, - .stack_offset, - => _ = try func.load(value, scalar_type, 0), + .nav_ref, .stack_offset => _ = try cg.load(value, scalar_type, 0), .dead => unreachable, - else => try func.emitWValue(value), + else => try cg.emitWValue(value), } }, .int, .float => { if (ty_classes[1] == .none) { - return func.lowerToStack(value); + return cg.lowerToStack(value); } assert(ty_classes[0] == .direct and ty_classes[1] == .direct); assert(ty.abiSize(zcu) == 16); // in this case we have an integer or float that must be lowered as 2 i64's. - try func.emitWValue(value); - try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 }); - try func.emitWValue(value); - try func.addMemArg(.i64_load, .{ .offset = value.offset() + 8, .alignment = 8 }); + try cg.emitWValue(value); + try cg.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 }); + try cg.emitWValue(value); + try cg.addMemArg(.i64_load, .{ .offset = value.offset() + 8, .alignment = 8 }); }, - else => return func.lowerToStack(value), + else => return cg.lowerToStack(value), } } /// Lowers a `WValue` to the stack. This means when the `value` results in /// `.stack_offset` we calculate the pointer of this offset and use that. /// The value is left on the stack, and not stored in any temporary. -fn lowerToStack(func: *CodeGen, value: WValue) !void { +fn lowerToStack(cg: *CodeGen, value: WValue) !void { switch (value) { .stack_offset => |offset| { - try func.emitWValue(value); + try cg.emitWValue(value); if (offset.value > 0) { - switch (func.arch()) { + switch (cg.ptr_size) { .wasm32 => { - try func.addImm32(offset.value); - try func.addTag(.i32_add); + try cg.addImm32(offset.value); + try cg.addTag(.i32_add); }, .wasm64 => { - try func.addImm64(offset.value); - try func.addTag(.i64_add); + try cg.addImm64(offset.value); + try cg.addTag(.i64_add); }, - else => unreachable, } } }, - else => try func.emitWValue(value), + else => try cg.emitWValue(value), } } /// Creates a local for the initial stack value /// Asserts `initial_stack_value` is `.none` -fn initializeStack(func: *CodeGen) !void { - assert(func.initial_stack_value == .none); +fn initializeStack(cg: *CodeGen) !void { + assert(cg.initial_stack_value == .none); // Reserve a local to store the current stack pointer // We can later use this local to set the stack pointer back to the value // we have stored here. - func.initial_stack_value = try func.ensureAllocLocal(Type.usize); + cg.initial_stack_value = try cg.ensureAllocLocal(Type.usize); // Also reserve a local to store the bottom stack value - func.bottom_stack_value = try func.ensureAllocLocal(Type.usize); + cg.bottom_stack_value = try cg.ensureAllocLocal(Type.usize); } /// Reads the stack pointer from `Context.initial_stack_value` and writes it /// to the global stack pointer variable -fn restoreStackPointer(func: *CodeGen) !void { +fn restoreStackPointer(cg: *CodeGen) !void { // only restore the pointer if it was initialized - if (func.initial_stack_value == .none) return; + if (cg.initial_stack_value == .none) return; // Get the original stack pointer's value - try func.emitWValue(func.initial_stack_value); + try cg.emitWValue(cg.initial_stack_value); - // save its value in the global stack pointer - try func.addLabel(.global_set, @intFromEnum(func.bin_file.zig_object.?.stack_pointer_sym)); + try cg.addTag(.global_set_sp); } /// From a given type, will create space on the virtual stack to store the value of such type. @@ -1520,24 +1523,25 @@ fn restoreStackPointer(func: *CodeGen) !void { /// moveStack unless a local was already created to store the pointer. /// /// Asserts Type has codegenbits -fn allocStack(func: *CodeGen, ty: Type) !WValue { - const zcu = func.pt.zcu; +fn allocStack(cg: *CodeGen, ty: Type) !WValue { + const pt = cg.pt; + const zcu = pt.zcu; assert(ty.hasRuntimeBitsIgnoreComptime(zcu)); - if (func.initial_stack_value == .none) { - try func.initializeStack(); + if (cg.initial_stack_value == .none) { + try cg.initializeStack(); } const abi_size = std.math.cast(u32, ty.abiSize(zcu)) orelse { - return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - ty.fmt(func.pt), ty.abiSize(zcu), + return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ + ty.fmt(pt), ty.abiSize(zcu), }); }; const abi_align = ty.abiAlignment(zcu); - func.stack_alignment = func.stack_alignment.max(abi_align); + cg.stack_alignment = cg.stack_alignment.max(abi_align); - const offset: u32 = @intCast(abi_align.forward(func.stack_size)); - defer func.stack_size = offset + abi_size; + const offset: u32 = @intCast(abi_align.forward(cg.stack_size)); + defer cg.stack_size = offset + abi_size; return .{ .stack_offset = .{ .value = offset, .references = 1 } }; } @@ -1546,30 +1550,30 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { /// the value of its type will live. /// This is different from allocStack where this will use the pointer's alignment /// if it is set, to ensure the stack alignment will be set correctly. -fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { - const pt = func.pt; +fn allocStackPtr(cg: *CodeGen, inst: Air.Inst.Index) !WValue { + const pt = cg.pt; const zcu = pt.zcu; - const ptr_ty = func.typeOfIndex(inst); + const ptr_ty = cg.typeOfIndex(inst); const pointee_ty = ptr_ty.childType(zcu); - if (func.initial_stack_value == .none) { - try func.initializeStack(); + if (cg.initial_stack_value == .none) { + try cg.initializeStack(); } if (!pointee_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return func.allocStack(Type.usize); // create a value containing just the stack pointer. + return cg.allocStack(Type.usize); // create a value containing just the stack pointer. } const abi_alignment = ptr_ty.ptrAlignment(zcu); const abi_size = std.math.cast(u32, pointee_ty.abiSize(zcu)) orelse { - return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ + return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ pointee_ty.fmt(pt), pointee_ty.abiSize(zcu), }); }; - func.stack_alignment = func.stack_alignment.max(abi_alignment); + cg.stack_alignment = cg.stack_alignment.max(abi_alignment); - const offset: u32 = @intCast(abi_alignment.forward(func.stack_size)); - defer func.stack_size = offset + abi_size; + const offset: u32 = @intCast(abi_alignment.forward(cg.stack_size)); + defer cg.stack_size = offset + abi_size; return .{ .stack_offset = .{ .value = offset, .references = 1 } }; } @@ -1583,14 +1587,14 @@ fn toWasmBits(bits: u16) ?u16 { /// Performs a copy of bytes for a given type. Copying all bytes /// from rhs to lhs. -fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void { +fn memcpy(cg: *CodeGen, dst: WValue, src: WValue, len: WValue) !void { // When bulk_memory is enabled, we lower it to wasm's memcpy instruction. // If not, we lower it ourselves manually - if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory)) { - try func.lowerToStack(dst); - try func.lowerToStack(src); - try func.emitWValue(len); - try func.addExtended(.memory_copy); + if (std.Target.wasm.featureSetHas(cg.target.cpu.features, .bulk_memory)) { + try cg.lowerToStack(dst); + try cg.lowerToStack(src); + try cg.emitWValue(len); + try cg.addExtended(.memory_copy); return; } @@ -1611,19 +1615,18 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void { const rhs_base = src.offset(); while (offset < length) : (offset += 1) { // get dst's address to store the result - try func.emitWValue(dst); + try cg.emitWValue(dst); // load byte from src's address - try func.emitWValue(src); - switch (func.arch()) { + try cg.emitWValue(src); + switch (cg.ptr_size) { .wasm32 => { - try func.addMemArg(.i32_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 }); - try func.addMemArg(.i32_store8, .{ .offset = lhs_base + offset, .alignment = 1 }); + try cg.addMemArg(.i32_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 }); + try cg.addMemArg(.i32_store8, .{ .offset = lhs_base + offset, .alignment = 1 }); }, .wasm64 => { - try func.addMemArg(.i64_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 }); - try func.addMemArg(.i64_store8, .{ .offset = lhs_base + offset, .alignment = 1 }); + try cg.addMemArg(.i64_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 }); + try cg.addMemArg(.i64_store8, .{ .offset = lhs_base + offset, .alignment = 1 }); }, - else => unreachable, } } return; @@ -1633,94 +1636,84 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void { // allocate a local for the offset, and set it to 0. // This to ensure that inside loops we correctly re-set the counter. - var offset = try func.allocLocal(Type.usize); // local for counter - defer offset.free(func); - switch (func.arch()) { - .wasm32 => try func.addImm32(0), - .wasm64 => try func.addImm64(0), - else => unreachable, + var offset = try cg.allocLocal(Type.usize); // local for counter + defer offset.free(cg); + switch (cg.ptr_size) { + .wasm32 => try cg.addImm32(0), + .wasm64 => try cg.addImm64(0), } - try func.addLabel(.local_set, offset.local.value); + try cg.addLocal(.local_set, offset.local.value); // outer block to jump to when loop is done - try func.startBlock(.block, wasm.block_empty); - try func.startBlock(.loop, wasm.block_empty); + try cg.startBlock(.block, .empty); + try cg.startBlock(.loop, .empty); // loop condition (offset == length -> break) { - try func.emitWValue(offset); - try func.emitWValue(len); - switch (func.arch()) { - .wasm32 => try func.addTag(.i32_eq), - .wasm64 => try func.addTag(.i64_eq), - else => unreachable, + try cg.emitWValue(offset); + try cg.emitWValue(len); + switch (cg.ptr_size) { + .wasm32 => try cg.addTag(.i32_eq), + .wasm64 => try cg.addTag(.i64_eq), } - try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished) + try cg.addLabel(.br_if, 1); // jump out of loop into outer block (finished) } // get dst ptr { - try func.emitWValue(dst); - try func.emitWValue(offset); - switch (func.arch()) { - .wasm32 => try func.addTag(.i32_add), - .wasm64 => try func.addTag(.i64_add), - else => unreachable, + try cg.emitWValue(dst); + try cg.emitWValue(offset); + switch (cg.ptr_size) { + .wasm32 => try cg.addTag(.i32_add), + .wasm64 => try cg.addTag(.i64_add), } } // get src value and also store in dst { - try func.emitWValue(src); - try func.emitWValue(offset); - switch (func.arch()) { + try cg.emitWValue(src); + try cg.emitWValue(offset); + switch (cg.ptr_size) { .wasm32 => { - try func.addTag(.i32_add); - try func.addMemArg(.i32_load8_u, .{ .offset = src.offset(), .alignment = 1 }); - try func.addMemArg(.i32_store8, .{ .offset = dst.offset(), .alignment = 1 }); + try cg.addTag(.i32_add); + try cg.addMemArg(.i32_load8_u, .{ .offset = src.offset(), .alignment = 1 }); + try cg.addMemArg(.i32_store8, .{ .offset = dst.offset(), .alignment = 1 }); }, .wasm64 => { - try func.addTag(.i64_add); - try func.addMemArg(.i64_load8_u, .{ .offset = src.offset(), .alignment = 1 }); - try func.addMemArg(.i64_store8, .{ .offset = dst.offset(), .alignment = 1 }); + try cg.addTag(.i64_add); + try cg.addMemArg(.i64_load8_u, .{ .offset = src.offset(), .alignment = 1 }); + try cg.addMemArg(.i64_store8, .{ .offset = dst.offset(), .alignment = 1 }); }, - else => unreachable, } } // increment loop counter { - try func.emitWValue(offset); - switch (func.arch()) { + try cg.emitWValue(offset); + switch (cg.ptr_size) { .wasm32 => { - try func.addImm32(1); - try func.addTag(.i32_add); + try cg.addImm32(1); + try cg.addTag(.i32_add); }, .wasm64 => { - try func.addImm64(1); - try func.addTag(.i64_add); + try cg.addImm64(1); + try cg.addTag(.i64_add); }, - else => unreachable, } - try func.addLabel(.local_set, offset.local.value); - try func.addLabel(.br, 0); // jump to start of loop + try cg.addLocal(.local_set, offset.local.value); + try cg.addLabel(.br, 0); // jump to start of loop } - try func.endBlock(); // close off loop block - try func.endBlock(); // close off outer block + try cg.endBlock(); // close off loop block + try cg.endBlock(); // close off outer block } -fn ptrSize(func: *const CodeGen) u16 { - return @divExact(func.target.ptrBitWidth(), 8); -} - -fn arch(func: *const CodeGen) std.Target.Cpu.Arch { - return func.target.cpu.arch; +fn ptrSize(cg: *const CodeGen) u16 { + return @divExact(cg.target.ptrBitWidth(), 8); } /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool { - const zcu = pt.zcu; +fn isByRef(ty: Type, zcu: *const Zcu, target: *const std.Target) bool { const ip = &zcu.intern_pool; switch (ty.zigTypeTag(zcu)) { .type, @@ -1753,14 +1746,14 @@ fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool { }, .@"struct" => { if (zcu.typeToPackedStruct(ty)) |packed_struct| { - return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt, target); + return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), zcu, target); } return ty.hasRuntimeBitsIgnoreComptime(zcu); }, .vector => return determineSimdStoreStrategy(ty, zcu, target) == .unrolled, .int => return ty.intInfo(zcu).bits > 64, .@"enum" => return ty.intInfo(zcu).bits > 64, - .float => return ty.floatBits(target) > 64, + .float => return ty.floatBits(target.*) > 64, .error_union => { const pl_ty = ty.errorUnionPayload(zcu); if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) { @@ -1791,8 +1784,8 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, zcu: *Zcu, target: std.Target) SimdStoreStrategy { - std.debug.assert(ty.zigTypeTag(zcu) == .vector); +pub fn determineSimdStoreStrategy(ty: Type, zcu: *const Zcu, target: *const std.Target) SimdStoreStrategy { + assert(ty.zigTypeTag(zcu) == .vector); if (ty.bitSize(zcu) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; const features = target.cpu.features; @@ -1806,215 +1799,214 @@ fn determineSimdStoreStrategy(ty: Type, zcu: *Zcu, target: std.Target) SimdStore /// This can be used to get a pointer to a struct field, error payload, etc. /// By providing `modify` as action, it will modify the given `ptr_value` instead of making a new /// local value to store the pointer. This allows for local re-use and improves binary size. -fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: enum { modify, new }) InnerError!WValue { +fn buildPointerOffset(cg: *CodeGen, ptr_value: WValue, offset: u64, action: enum { modify, new }) InnerError!WValue { // do not perform arithmetic when offset is 0. if (offset == 0 and ptr_value.offset() == 0 and action == .modify) return ptr_value; const result_ptr: WValue = switch (action) { - .new => try func.ensureAllocLocal(Type.usize), + .new => try cg.ensureAllocLocal(Type.usize), .modify => ptr_value, }; - try func.emitWValue(ptr_value); + try cg.emitWValue(ptr_value); if (offset + ptr_value.offset() > 0) { - switch (func.arch()) { + switch (cg.ptr_size) { .wasm32 => { - try func.addImm32(@intCast(offset + ptr_value.offset())); - try func.addTag(.i32_add); + try cg.addImm32(@intCast(offset + ptr_value.offset())); + try cg.addTag(.i32_add); }, .wasm64 => { - try func.addImm64(offset + ptr_value.offset()); - try func.addTag(.i64_add); + try cg.addImm64(offset + ptr_value.offset()); + try cg.addTag(.i64_add); }, - else => unreachable, } } - try func.addLabel(.local_set, result_ptr.local.value); + try cg.addLocal(.local_set, result_ptr.local.value); return result_ptr; } -fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const air_tags = func.air.instructions.items(.tag); +fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const air_tags = cg.air.instructions.items(.tag); return switch (air_tags[@intFromEnum(inst)]) { .inferred_alloc, .inferred_alloc_comptime => unreachable, - .add => func.airBinOp(inst, .add), - .add_sat => func.airSatBinOp(inst, .add), - .add_wrap => func.airWrapBinOp(inst, .add), - .sub => func.airBinOp(inst, .sub), - .sub_sat => func.airSatBinOp(inst, .sub), - .sub_wrap => func.airWrapBinOp(inst, .sub), - .mul => func.airBinOp(inst, .mul), - .mul_sat => func.airSatMul(inst), - .mul_wrap => func.airWrapBinOp(inst, .mul), - .div_float, .div_exact => func.airDiv(inst), - .div_trunc => func.airDivTrunc(inst), - .div_floor => func.airDivFloor(inst), - .bit_and => func.airBinOp(inst, .@"and"), - .bit_or => func.airBinOp(inst, .@"or"), - .bool_and => func.airBinOp(inst, .@"and"), - .bool_or => func.airBinOp(inst, .@"or"), - .rem => func.airRem(inst), - .mod => func.airMod(inst), - .shl => func.airWrapBinOp(inst, .shl), - .shl_exact => func.airBinOp(inst, .shl), - .shl_sat => func.airShlSat(inst), - .shr, .shr_exact => func.airBinOp(inst, .shr), - .xor => func.airBinOp(inst, .xor), - .max => func.airMaxMin(inst, .max), - .min => func.airMaxMin(inst, .min), - .mul_add => func.airMulAdd(inst), - - .sqrt => func.airUnaryFloatOp(inst, .sqrt), - .sin => func.airUnaryFloatOp(inst, .sin), - .cos => func.airUnaryFloatOp(inst, .cos), - .tan => func.airUnaryFloatOp(inst, .tan), - .exp => func.airUnaryFloatOp(inst, .exp), - .exp2 => func.airUnaryFloatOp(inst, .exp2), - .log => func.airUnaryFloatOp(inst, .log), - .log2 => func.airUnaryFloatOp(inst, .log2), - .log10 => func.airUnaryFloatOp(inst, .log10), - .floor => func.airUnaryFloatOp(inst, .floor), - .ceil => func.airUnaryFloatOp(inst, .ceil), - .round => func.airUnaryFloatOp(inst, .round), - .trunc_float => func.airUnaryFloatOp(inst, .trunc), - .neg => func.airUnaryFloatOp(inst, .neg), - - .abs => func.airAbs(inst), - - .add_with_overflow => func.airAddSubWithOverflow(inst, .add), - .sub_with_overflow => func.airAddSubWithOverflow(inst, .sub), - .shl_with_overflow => func.airShlWithOverflow(inst), - .mul_with_overflow => func.airMulWithOverflow(inst), - - .clz => func.airClz(inst), - .ctz => func.airCtz(inst), - - .cmp_eq => func.airCmp(inst, .eq), - .cmp_gte => func.airCmp(inst, .gte), - .cmp_gt => func.airCmp(inst, .gt), - .cmp_lte => func.airCmp(inst, .lte), - .cmp_lt => func.airCmp(inst, .lt), - .cmp_neq => func.airCmp(inst, .neq), - - .cmp_vector => func.airCmpVector(inst), - .cmp_lt_errors_len => func.airCmpLtErrorsLen(inst), - - .array_elem_val => func.airArrayElemVal(inst), - .array_to_slice => func.airArrayToSlice(inst), - .alloc => func.airAlloc(inst), - .arg => func.airArg(inst), - .bitcast => func.airBitcast(inst), - .block => func.airBlock(inst), - .trap => func.airTrap(inst), - .breakpoint => func.airBreakpoint(inst), - .br => func.airBr(inst), - .repeat => func.airRepeat(inst), - .switch_dispatch => return func.fail("TODO implement `switch_dispatch`", .{}), - .int_from_bool => func.airIntFromBool(inst), - .cond_br => func.airCondBr(inst), - .intcast => func.airIntcast(inst), - .fptrunc => func.airFptrunc(inst), - .fpext => func.airFpext(inst), - .int_from_float => func.airIntFromFloat(inst), - .float_from_int => func.airFloatFromInt(inst), - .get_union_tag => func.airGetUnionTag(inst), - - .@"try" => func.airTry(inst), - .try_cold => func.airTry(inst), - .try_ptr => func.airTryPtr(inst), - .try_ptr_cold => func.airTryPtr(inst), - - .dbg_stmt => func.airDbgStmt(inst), - .dbg_empty_stmt => try func.finishAir(inst, .none, &.{}), - .dbg_inline_block => func.airDbgInlineBlock(inst), - .dbg_var_ptr => func.airDbgVar(inst, .local_var, true), - .dbg_var_val => func.airDbgVar(inst, .local_var, false), - .dbg_arg_inline => func.airDbgVar(inst, .local_arg, false), - - .call => func.airCall(inst, .auto), - .call_always_tail => func.airCall(inst, .always_tail), - .call_never_tail => func.airCall(inst, .never_tail), - .call_never_inline => func.airCall(inst, .never_inline), - - .is_err => func.airIsErr(inst, .i32_ne), - .is_non_err => func.airIsErr(inst, .i32_eq), - - .is_null => func.airIsNull(inst, .i32_eq, .value), - .is_non_null => func.airIsNull(inst, .i32_ne, .value), - .is_null_ptr => func.airIsNull(inst, .i32_eq, .ptr), - .is_non_null_ptr => func.airIsNull(inst, .i32_ne, .ptr), - - .load => func.airLoad(inst), - .loop => func.airLoop(inst), - .memset => func.airMemset(inst, false), - .memset_safe => func.airMemset(inst, true), - .not => func.airNot(inst), - .optional_payload => func.airOptionalPayload(inst), - .optional_payload_ptr => func.airOptionalPayloadPtr(inst), - .optional_payload_ptr_set => func.airOptionalPayloadPtrSet(inst), - .ptr_add => func.airPtrBinOp(inst, .add), - .ptr_sub => func.airPtrBinOp(inst, .sub), - .ptr_elem_ptr => func.airPtrElemPtr(inst), - .ptr_elem_val => func.airPtrElemVal(inst), - .int_from_ptr => func.airIntFromPtr(inst), - .ret => func.airRet(inst), - .ret_safe => func.airRet(inst), // TODO - .ret_ptr => func.airRetPtr(inst), - .ret_load => func.airRetLoad(inst), - .splat => func.airSplat(inst), - .select => func.airSelect(inst), - .shuffle => func.airShuffle(inst), - .reduce => func.airReduce(inst), - .aggregate_init => func.airAggregateInit(inst), - .union_init => func.airUnionInit(inst), - .prefetch => func.airPrefetch(inst), - .popcount => func.airPopcount(inst), - .byte_swap => func.airByteSwap(inst), - .bit_reverse => func.airBitReverse(inst), - - .slice => func.airSlice(inst), - .slice_len => func.airSliceLen(inst), - .slice_elem_val => func.airSliceElemVal(inst), - .slice_elem_ptr => func.airSliceElemPtr(inst), - .slice_ptr => func.airSlicePtr(inst), - .ptr_slice_len_ptr => func.airPtrSliceFieldPtr(inst, func.ptrSize()), - .ptr_slice_ptr_ptr => func.airPtrSliceFieldPtr(inst, 0), - .store => func.airStore(inst, false), - .store_safe => func.airStore(inst, true), - - .set_union_tag => func.airSetUnionTag(inst), - .struct_field_ptr => func.airStructFieldPtr(inst), - .struct_field_ptr_index_0 => func.airStructFieldPtrIndex(inst, 0), - .struct_field_ptr_index_1 => func.airStructFieldPtrIndex(inst, 1), - .struct_field_ptr_index_2 => func.airStructFieldPtrIndex(inst, 2), - .struct_field_ptr_index_3 => func.airStructFieldPtrIndex(inst, 3), - .struct_field_val => func.airStructFieldVal(inst), - .field_parent_ptr => func.airFieldParentPtr(inst), - - .switch_br => func.airSwitchBr(inst), - .loop_switch_br => return func.fail("TODO implement `loop_switch_br`", .{}), - .trunc => func.airTrunc(inst), - .unreach => func.airUnreachable(inst), - - .wrap_optional => func.airWrapOptional(inst), - .unwrap_errunion_payload => func.airUnwrapErrUnionPayload(inst, false), - .unwrap_errunion_payload_ptr => func.airUnwrapErrUnionPayload(inst, true), - .unwrap_errunion_err => func.airUnwrapErrUnionError(inst, false), - .unwrap_errunion_err_ptr => func.airUnwrapErrUnionError(inst, true), - .wrap_errunion_payload => func.airWrapErrUnionPayload(inst), - .wrap_errunion_err => func.airWrapErrUnionErr(inst), - .errunion_payload_ptr_set => func.airErrUnionPayloadPtrSet(inst), - .error_name => func.airErrorName(inst), - - .wasm_memory_size => func.airWasmMemorySize(inst), - .wasm_memory_grow => func.airWasmMemoryGrow(inst), - - .memcpy => func.airMemcpy(inst), - - .ret_addr => func.airRetAddr(inst), - .tag_name => func.airTagName(inst), - - .error_set_has_value => func.airErrorSetHasValue(inst), - .frame_addr => func.airFrameAddress(inst), + .add => cg.airBinOp(inst, .add), + .add_sat => cg.airSatBinOp(inst, .add), + .add_wrap => cg.airWrapBinOp(inst, .add), + .sub => cg.airBinOp(inst, .sub), + .sub_sat => cg.airSatBinOp(inst, .sub), + .sub_wrap => cg.airWrapBinOp(inst, .sub), + .mul => cg.airBinOp(inst, .mul), + .mul_sat => cg.airSatMul(inst), + .mul_wrap => cg.airWrapBinOp(inst, .mul), + .div_float, .div_exact => cg.airDiv(inst), + .div_trunc => cg.airDivTrunc(inst), + .div_floor => cg.airDivFloor(inst), + .bit_and => cg.airBinOp(inst, .@"and"), + .bit_or => cg.airBinOp(inst, .@"or"), + .bool_and => cg.airBinOp(inst, .@"and"), + .bool_or => cg.airBinOp(inst, .@"or"), + .rem => cg.airRem(inst), + .mod => cg.airMod(inst), + .shl => cg.airWrapBinOp(inst, .shl), + .shl_exact => cg.airBinOp(inst, .shl), + .shl_sat => cg.airShlSat(inst), + .shr, .shr_exact => cg.airBinOp(inst, .shr), + .xor => cg.airBinOp(inst, .xor), + .max => cg.airMaxMin(inst, .fmax, .gt), + .min => cg.airMaxMin(inst, .fmin, .lt), + .mul_add => cg.airMulAdd(inst), + + .sqrt => cg.airUnaryFloatOp(inst, .sqrt), + .sin => cg.airUnaryFloatOp(inst, .sin), + .cos => cg.airUnaryFloatOp(inst, .cos), + .tan => cg.airUnaryFloatOp(inst, .tan), + .exp => cg.airUnaryFloatOp(inst, .exp), + .exp2 => cg.airUnaryFloatOp(inst, .exp2), + .log => cg.airUnaryFloatOp(inst, .log), + .log2 => cg.airUnaryFloatOp(inst, .log2), + .log10 => cg.airUnaryFloatOp(inst, .log10), + .floor => cg.airUnaryFloatOp(inst, .floor), + .ceil => cg.airUnaryFloatOp(inst, .ceil), + .round => cg.airUnaryFloatOp(inst, .round), + .trunc_float => cg.airUnaryFloatOp(inst, .trunc), + .neg => cg.airUnaryFloatOp(inst, .neg), + + .abs => cg.airAbs(inst), + + .add_with_overflow => cg.airAddSubWithOverflow(inst, .add), + .sub_with_overflow => cg.airAddSubWithOverflow(inst, .sub), + .shl_with_overflow => cg.airShlWithOverflow(inst), + .mul_with_overflow => cg.airMulWithOverflow(inst), + + .clz => cg.airClz(inst), + .ctz => cg.airCtz(inst), + + .cmp_eq => cg.airCmp(inst, .eq), + .cmp_gte => cg.airCmp(inst, .gte), + .cmp_gt => cg.airCmp(inst, .gt), + .cmp_lte => cg.airCmp(inst, .lte), + .cmp_lt => cg.airCmp(inst, .lt), + .cmp_neq => cg.airCmp(inst, .neq), + + .cmp_vector => cg.airCmpVector(inst), + .cmp_lt_errors_len => cg.airCmpLtErrorsLen(inst), + + .array_elem_val => cg.airArrayElemVal(inst), + .array_to_slice => cg.airArrayToSlice(inst), + .alloc => cg.airAlloc(inst), + .arg => cg.airArg(inst), + .bitcast => cg.airBitcast(inst), + .block => cg.airBlock(inst), + .trap => cg.airTrap(inst), + .breakpoint => cg.airBreakpoint(inst), + .br => cg.airBr(inst), + .repeat => cg.airRepeat(inst), + .switch_dispatch => return cg.fail("TODO implement `switch_dispatch`", .{}), + .int_from_bool => cg.airIntFromBool(inst), + .cond_br => cg.airCondBr(inst), + .intcast => cg.airIntcast(inst), + .fptrunc => cg.airFptrunc(inst), + .fpext => cg.airFpext(inst), + .int_from_float => cg.airIntFromFloat(inst), + .float_from_int => cg.airFloatFromInt(inst), + .get_union_tag => cg.airGetUnionTag(inst), + + .@"try" => cg.airTry(inst), + .try_cold => cg.airTry(inst), + .try_ptr => cg.airTryPtr(inst), + .try_ptr_cold => cg.airTryPtr(inst), + + .dbg_stmt => cg.airDbgStmt(inst), + .dbg_empty_stmt => try cg.finishAir(inst, .none, &.{}), + .dbg_inline_block => cg.airDbgInlineBlock(inst), + .dbg_var_ptr => cg.airDbgVar(inst, .local_var, true), + .dbg_var_val => cg.airDbgVar(inst, .local_var, false), + .dbg_arg_inline => cg.airDbgVar(inst, .local_arg, false), + + .call => cg.airCall(inst, .auto), + .call_always_tail => cg.airCall(inst, .always_tail), + .call_never_tail => cg.airCall(inst, .never_tail), + .call_never_inline => cg.airCall(inst, .never_inline), + + .is_err => cg.airIsErr(inst, .i32_ne), + .is_non_err => cg.airIsErr(inst, .i32_eq), + + .is_null => cg.airIsNull(inst, .i32_eq, .value), + .is_non_null => cg.airIsNull(inst, .i32_ne, .value), + .is_null_ptr => cg.airIsNull(inst, .i32_eq, .ptr), + .is_non_null_ptr => cg.airIsNull(inst, .i32_ne, .ptr), + + .load => cg.airLoad(inst), + .loop => cg.airLoop(inst), + .memset => cg.airMemset(inst, false), + .memset_safe => cg.airMemset(inst, true), + .not => cg.airNot(inst), + .optional_payload => cg.airOptionalPayload(inst), + .optional_payload_ptr => cg.airOptionalPayloadPtr(inst), + .optional_payload_ptr_set => cg.airOptionalPayloadPtrSet(inst), + .ptr_add => cg.airPtrBinOp(inst, .add), + .ptr_sub => cg.airPtrBinOp(inst, .sub), + .ptr_elem_ptr => cg.airPtrElemPtr(inst), + .ptr_elem_val => cg.airPtrElemVal(inst), + .int_from_ptr => cg.airIntFromPtr(inst), + .ret => cg.airRet(inst), + .ret_safe => cg.airRet(inst), // TODO + .ret_ptr => cg.airRetPtr(inst), + .ret_load => cg.airRetLoad(inst), + .splat => cg.airSplat(inst), + .select => cg.airSelect(inst), + .shuffle => cg.airShuffle(inst), + .reduce => cg.airReduce(inst), + .aggregate_init => cg.airAggregateInit(inst), + .union_init => cg.airUnionInit(inst), + .prefetch => cg.airPrefetch(inst), + .popcount => cg.airPopcount(inst), + .byte_swap => cg.airByteSwap(inst), + .bit_reverse => cg.airBitReverse(inst), + + .slice => cg.airSlice(inst), + .slice_len => cg.airSliceLen(inst), + .slice_elem_val => cg.airSliceElemVal(inst), + .slice_elem_ptr => cg.airSliceElemPtr(inst), + .slice_ptr => cg.airSlicePtr(inst), + .ptr_slice_len_ptr => cg.airPtrSliceFieldPtr(inst, cg.ptrSize()), + .ptr_slice_ptr_ptr => cg.airPtrSliceFieldPtr(inst, 0), + .store => cg.airStore(inst, false), + .store_safe => cg.airStore(inst, true), + + .set_union_tag => cg.airSetUnionTag(inst), + .struct_field_ptr => cg.airStructFieldPtr(inst), + .struct_field_ptr_index_0 => cg.airStructFieldPtrIndex(inst, 0), + .struct_field_ptr_index_1 => cg.airStructFieldPtrIndex(inst, 1), + .struct_field_ptr_index_2 => cg.airStructFieldPtrIndex(inst, 2), + .struct_field_ptr_index_3 => cg.airStructFieldPtrIndex(inst, 3), + .struct_field_val => cg.airStructFieldVal(inst), + .field_parent_ptr => cg.airFieldParentPtr(inst), + + .switch_br => cg.airSwitchBr(inst), + .loop_switch_br => return cg.fail("TODO implement `loop_switch_br`", .{}), + .trunc => cg.airTrunc(inst), + .unreach => cg.airUnreachable(inst), + + .wrap_optional => cg.airWrapOptional(inst), + .unwrap_errunion_payload => cg.airUnwrapErrUnionPayload(inst, false), + .unwrap_errunion_payload_ptr => cg.airUnwrapErrUnionPayload(inst, true), + .unwrap_errunion_err => cg.airUnwrapErrUnionError(inst, false), + .unwrap_errunion_err_ptr => cg.airUnwrapErrUnionError(inst, true), + .wrap_errunion_payload => cg.airWrapErrUnionPayload(inst), + .wrap_errunion_err => cg.airWrapErrUnionErr(inst), + .errunion_payload_ptr_set => cg.airErrUnionPayloadPtrSet(inst), + .error_name => cg.airErrorName(inst), + + .wasm_memory_size => cg.airWasmMemorySize(inst), + .wasm_memory_grow => cg.airWasmMemoryGrow(inst), + + .memcpy => cg.airMemcpy(inst), + + .ret_addr => cg.airRetAddr(inst), + .tag_name => cg.airTagName(inst), + + .error_set_has_value => cg.airErrorSetHasValue(inst), + .frame_addr => cg.airFrameAddress(inst), .assembly, .is_err_ptr, @@ -2030,18 +2022,18 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .c_va_copy, .c_va_end, .c_va_start, - => |tag| return func.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}), + => |tag| return cg.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}), - .atomic_load => func.airAtomicLoad(inst), + .atomic_load => cg.airAtomicLoad(inst), .atomic_store_unordered, .atomic_store_monotonic, .atomic_store_release, .atomic_store_seq_cst, // in WebAssembly, all atomic instructions are sequentially ordered. - => func.airAtomicStore(inst), - .atomic_rmw => func.airAtomicRmw(inst), - .cmpxchg_weak => func.airCmpxchg(inst), - .cmpxchg_strong => func.airCmpxchg(inst), + => cg.airAtomicStore(inst), + .atomic_rmw => cg.airAtomicRmw(inst), + .cmpxchg_weak => cg.airCmpxchg(inst), + .cmpxchg_strong => cg.airCmpxchg(inst), .add_optimized, .sub_optimized, @@ -2062,12 +2054,12 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .cmp_vector_optimized, .reduce_optimized, .int_from_float_optimized, - => return func.fail("TODO implement optimized float mode", .{}), + => return cg.fail("TODO implement optimized float mode", .{}), .add_safe, .sub_safe, .mul_safe, - => return func.fail("TODO implement safety_checked_instructions", .{}), + => return cg.fail("TODO implement safety_checked_instructions", .{}), .work_item_id, .work_group_size, @@ -2076,123 +2068,120 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; } -fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; +fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; const ip = &zcu.intern_pool; for (body) |inst| { - if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) { + if (cg.liveness.isUnused(inst) and !cg.air.mustLower(inst, ip)) { continue; } - const old_bookkeeping_value = func.air_bookkeeping; - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, Liveness.bpi); - try func.genInst(inst); + const old_bookkeeping_value = cg.air_bookkeeping; + try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, Liveness.bpi); + try cg.genInst(inst); - if (std.debug.runtime_safety and func.air_bookkeeping < old_bookkeeping_value + 1) { + if (std.debug.runtime_safety and cg.air_bookkeeping < old_bookkeeping_value + 1) { std.debug.panic("Missing call to `finishAir` in AIR instruction %{d} ('{}')", .{ inst, - func.air.instructions.items(.tag)[@intFromEnum(inst)], + cg.air.instructions.items(.tag)[@intFromEnum(inst)], }); } } } -fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try func.resolveInst(un_op); - const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?; +fn airRet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try cg.resolveInst(un_op); + const fn_info = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?; const ret_ty = Type.fromInterned(fn_info.return_type); // result must be stored in the stack and we return a pointer // to the stack instead - if (func.return_value != .none) { - try func.store(func.return_value, operand, ret_ty, 0); + if (cg.return_value != .none) { + try cg.store(cg.return_value, operand, ret_ty, 0); } else if (fn_info.cc == .wasm_watc and ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) { switch (ret_ty.zigTypeTag(zcu)) { // Aggregate types can be lowered as a singular value .@"struct", .@"union" => { const scalar_type = abi.scalarType(ret_ty, zcu); - try func.emitWValue(operand); + try cg.emitWValue(operand); const opcode = buildOpcode(.{ .op = .load, .width = @as(u8, @intCast(scalar_type.abiSize(zcu) * 8)), .signedness = if (scalar_type.isSignedInt(zcu)) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, pt, func.target.*), + .valtype1 = typeToValtype(scalar_type, zcu, cg.target), }); - try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ + try cg.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = operand.offset(), .alignment = @intCast(scalar_type.abiAlignment(zcu).toByteUnits().?), }); }, - else => try func.emitWValue(operand), + else => try cg.emitWValue(operand), } } else { if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and ret_ty.isError(zcu)) { - try func.addImm32(0); + try cg.addImm32(0); } else { - try func.emitWValue(operand); + try cg.emitWValue(operand); } } - try func.restoreStackPointer(); - try func.addTag(.@"return"); + try cg.restoreStackPointer(); + try cg.addTag(.@"return"); - return func.finishAir(inst, .none, &.{un_op}); + return cg.finishAir(inst, .none, &.{un_op}); } -fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const child_type = func.typeOfIndex(inst).childType(zcu); +fn airRetPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const child_type = cg.typeOfIndex(inst).childType(zcu); const result = result: { if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { - break :result try func.allocStack(Type.usize); // create pointer to void + break :result try cg.allocStack(Type.usize); // create pointer to void } - const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?; - if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) { - break :result func.return_value; + const fn_info = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?; + if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), zcu, cg.target)) { + break :result cg.return_value; } - break :result try func.allocStackPtr(inst); + break :result try cg.allocStackPtr(inst); }; - return func.finishAir(inst, result, &.{}); + return cg.finishAir(inst, result, &.{}); } -fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try func.resolveInst(un_op); - const ret_ty = func.typeOf(un_op).childType(zcu); +fn airRetLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try cg.resolveInst(un_op); + const ret_ty = cg.typeOf(un_op).childType(zcu); - const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?; + const fn_info = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) { if (ret_ty.isError(zcu)) { - try func.addImm32(0); + try cg.addImm32(0); } - } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) { + } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), zcu, cg.target)) { // leave on the stack - _ = try func.load(operand, ret_ty, 0); + _ = try cg.load(operand, ret_ty, 0); } - try func.restoreStackPointer(); - try func.addTag(.@"return"); - return func.finishAir(inst, .none, &.{un_op}); + try cg.restoreStackPointer(); + try cg.addTag(.@"return"); + return cg.finishAir(inst, .none, &.{un_op}); } -fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void { - if (modifier == .always_tail) return func.fail("TODO implement tail calls for wasm", .{}); - const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const extra = func.air.extraData(Air.Call, pl_op.payload); - const args = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len])); - const ty = func.typeOf(pl_op.operand); +fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void { + const wasm = cg.wasm; + if (modifier == .always_tail) return cg.fail("TODO implement tail calls for wasm", .{}); + const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = cg.air.extraData(Air.Call, pl_op.payload); + const args: []const Air.Inst.Ref = @ptrCast(cg.air.extra[extra.end..][0..extra.data.args_len]); + const ty = cg.typeOf(pl_op.operand); - const pt = func.pt; + const pt = cg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const fn_ty = switch (ty.zigTypeTag(zcu)) { @@ -2202,142 +2191,109 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif }; const ret_ty = fn_ty.fnReturnType(zcu); const fn_info = zcu.typeToFunc(fn_ty).?; - const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*); + const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), zcu, cg.target); const callee: ?InternPool.Nav.Index = blk: { - const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null; + const func_val = (try cg.air.value(pl_op.operand, pt)) orelse break :blk null; switch (ip.indexToKey(func_val.toIntern())) { - .func => |function| { - _ = try func.bin_file.getOrCreateAtomForNav(pt, function.owner_nav); - break :blk function.owner_nav; - }, - .@"extern" => |@"extern"| { - const ext_nav = ip.getNav(@"extern".owner_nav); - const ext_info = zcu.typeToFunc(Type.fromInterned(@"extern".ty)).?; - var func_type = try genFunctype( - func.gpa, - ext_info.cc, - ext_info.param_types.get(ip), - Type.fromInterned(ext_info.return_type), - pt, - func.target.*, - ); - defer func_type.deinit(func.gpa); - const atom_index = try func.bin_file.getOrCreateAtomForNav(pt, @"extern".owner_nav); - const atom = func.bin_file.getAtomPtr(atom_index); - const type_index = try func.bin_file.storeNavType(@"extern".owner_nav, func_type); - try func.bin_file.addOrUpdateImport( - ext_nav.name.toSlice(ip), - atom.sym_index, - @"extern".lib_name.toSlice(ip), - type_index, - ); - break :blk @"extern".owner_nav; - }, + inline .func, .@"extern" => |x| break :blk x.owner_nav, .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { - .nav => |nav| { - _ = try func.bin_file.getOrCreateAtomForNav(pt, nav); - break :blk nav; - }, + .nav => |nav| break :blk nav, else => {}, }, else => {}, } - return func.fail("Expected a function, but instead found '{s}'", .{@tagName(ip.indexToKey(func_val.toIntern()))}); + return cg.fail("unable to lower callee to a function index", .{}); }; const sret: WValue = if (first_param_sret) blk: { - const sret_local = try func.allocStack(ret_ty); - try func.lowerToStack(sret_local); + const sret_local = try cg.allocStack(ret_ty); + try cg.lowerToStack(sret_local); break :blk sret_local; } else .none; for (args) |arg| { - const arg_val = try func.resolveInst(arg); + const arg_val = try cg.resolveInst(arg); - const arg_ty = func.typeOf(arg); + const arg_ty = cg.typeOf(arg); if (!arg_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - try func.lowerArg(zcu.typeToFunc(fn_ty).?.cc, arg_ty, arg_val); + try cg.lowerArg(zcu.typeToFunc(fn_ty).?.cc, arg_ty, arg_val); } - if (callee) |direct| { - const atom_index = func.bin_file.zig_object.?.navs.get(direct).?.atom; - try func.addLabel(.call, @intFromEnum(func.bin_file.getAtom(atom_index).sym_index)); + if (callee) |nav_index| { + try cg.addInst(.{ .tag = .call_nav, .data = .{ .nav_index = nav_index } }); } else { // in this case we call a function pointer // so load its value onto the stack - std.debug.assert(ty.zigTypeTag(zcu) == .pointer); - const operand = try func.resolveInst(pl_op.operand); - try func.emitWValue(operand); - - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*); - defer fn_type.deinit(func.gpa); + assert(ty.zigTypeTag(zcu) == .pointer); + const operand = try cg.resolveInst(pl_op.operand); + try cg.emitWValue(operand); - const fn_type_index = try func.bin_file.zig_object.?.putOrGetFuncType(func.gpa, fn_type); - try func.addLabel(.call_indirect, fn_type_index); + const fn_type_index = try wasm.internFunctionType(fn_info.cc, fn_info.param_types.get(ip), .fromInterned(fn_info.return_type), cg.target); + try cg.addFuncTy(.call_indirect, fn_type_index); } const result_value = result_value: { if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) { break :result_value .none; } else if (ret_ty.isNoReturn(zcu)) { - try func.addTag(.@"unreachable"); + try cg.addTag(.@"unreachable"); break :result_value .none; } else if (first_param_sret) { break :result_value sret; // TODO: Make this less fragile and optimize } else if (zcu.typeToFunc(fn_ty).?.cc == .wasm_watc and ret_ty.zigTypeTag(zcu) == .@"struct" or ret_ty.zigTypeTag(zcu) == .@"union") { - const result_local = try func.allocLocal(ret_ty); - try func.addLabel(.local_set, result_local.local.value); + const result_local = try cg.allocLocal(ret_ty); + try cg.addLocal(.local_set, result_local.local.value); const scalar_type = abi.scalarType(ret_ty, zcu); - const result = try func.allocStack(scalar_type); - try func.store(result, result_local, scalar_type, 0); + const result = try cg.allocStack(scalar_type); + try cg.store(result, result_local, scalar_type, 0); break :result_value result; } else { - const result_local = try func.allocLocal(ret_ty); - try func.addLabel(.local_set, result_local.local.value); + const result_local = try cg.allocLocal(ret_ty); + try cg.addLocal(.local_set, result_local.local.value); break :result_value result_local; } }; - var bt = try func.iterateBigTomb(inst, 1 + args.len); + var bt = try cg.iterateBigTomb(inst, 1 + args.len); bt.feed(pl_op.operand); for (args) |arg| bt.feed(arg); return bt.finishAir(result_value); } -fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const value = try func.allocStackPtr(inst); - return func.finishAir(inst, value, &.{}); +fn airAlloc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const value = try cg.allocStackPtr(inst); + return cg.finishAir(inst, value, &.{}); } -fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { - const pt = func.pt; +fn airStore(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { // TODO if the value is undef, don't lower this instruction } - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.typeOf(bin_op.lhs); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); + const ptr_ty = cg.typeOf(bin_op.lhs); const ptr_info = ptr_ty.ptrInfo(zcu); const ty = ptr_ty.childType(zcu); if (ptr_info.packed_offset.host_size == 0) { - try func.store(lhs, rhs, ty, 0); + try cg.store(lhs, rhs, ty, 0); } else { // at this point we have a non-natural alignment, we must // load the value, and then shift+or the rhs into the result location. const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8); - if (isByRef(int_elem_ty, pt, func.target.*)) { - return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); + if (isByRef(int_elem_ty, zcu, cg.target)) { + return cg.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); } var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(zcu)))) - 1)); @@ -2356,115 +2312,115 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void else .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(zcu)) }; - try func.emitWValue(lhs); - const loaded = try func.load(lhs, int_elem_ty, 0); - const anded = try func.binOp(loaded, mask_val, int_elem_ty, .@"and"); - const extended_value = try func.intcast(rhs, ty, int_elem_ty); - const masked_value = try func.binOp(extended_value, wrap_mask_val, int_elem_ty, .@"and"); + try cg.emitWValue(lhs); + const loaded = try cg.load(lhs, int_elem_ty, 0); + const anded = try cg.binOp(loaded, mask_val, int_elem_ty, .@"and"); + const extended_value = try cg.intcast(rhs, ty, int_elem_ty); + const masked_value = try cg.binOp(extended_value, wrap_mask_val, int_elem_ty, .@"and"); const shifted_value = if (ptr_info.packed_offset.bit_offset > 0) shifted: { - break :shifted try func.binOp(masked_value, shift_val, int_elem_ty, .shl); + break :shifted try cg.binOp(masked_value, shift_val, int_elem_ty, .shl); } else masked_value; - const result = try func.binOp(anded, shifted_value, int_elem_ty, .@"or"); + const result = try cg.binOp(anded, shifted_value, int_elem_ty, .@"or"); // lhs is still on the stack - try func.store(.stack, result, int_elem_ty, lhs.offset()); + try cg.store(.stack, result, int_elem_ty, lhs.offset()); } - return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); } -fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void { +fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void { assert(!(lhs != .stack and rhs == .stack)); - const pt = func.pt; + const pt = cg.pt; const zcu = pt.zcu; const abi_size = ty.abiSize(zcu); switch (ty.zigTypeTag(zcu)) { .error_union => { const pl_ty = ty.errorUnionPayload(zcu); if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return func.store(lhs, rhs, Type.anyerror, 0); + return cg.store(lhs, rhs, Type.anyerror, 0); } const len = @as(u32, @intCast(abi_size)); - return func.memcpy(lhs, rhs, .{ .imm32 = len }); + return cg.memcpy(lhs, rhs, .{ .imm32 = len }); }, .optional => { if (ty.isPtrLikeOptional(zcu)) { - return func.store(lhs, rhs, Type.usize, 0); + return cg.store(lhs, rhs, Type.usize, 0); } const pl_ty = ty.optionalChild(zcu); if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return func.store(lhs, rhs, Type.u8, 0); + return cg.store(lhs, rhs, Type.u8, 0); } if (pl_ty.zigTypeTag(zcu) == .error_set) { - return func.store(lhs, rhs, Type.anyerror, 0); + return cg.store(lhs, rhs, Type.anyerror, 0); } const len = @as(u32, @intCast(abi_size)); - return func.memcpy(lhs, rhs, .{ .imm32 = len }); + return cg.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .@"struct", .array, .@"union" => if (isByRef(ty, pt, func.target.*)) { + .@"struct", .array, .@"union" => if (isByRef(ty, zcu, cg.target)) { const len = @as(u32, @intCast(abi_size)); - return func.memcpy(lhs, rhs, .{ .imm32 = len }); + return cg.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .vector => switch (determineSimdStoreStrategy(ty, zcu, func.target.*)) { + .vector => switch (determineSimdStoreStrategy(ty, zcu, cg.target)) { .unrolled => { const len: u32 = @intCast(abi_size); - return func.memcpy(lhs, rhs, .{ .imm32 = len }); + return cg.memcpy(lhs, rhs, .{ .imm32 = len }); }, .direct => { - try func.emitWValue(lhs); - try func.lowerToStack(rhs); + try cg.emitWValue(lhs); + try cg.lowerToStack(rhs); // TODO: Add helper functions for simd opcodes - const extra_index: u32 = @intCast(func.mir_extra.items.len); + const extra_index = cg.extraLen(); // stores as := opcode, offset, alignment (opcode::memarg) - try func.mir_extra.appendSlice(func.gpa, &[_]u32{ - std.wasm.simdOpcode(.v128_store), + try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{ + @intFromEnum(std.wasm.SimdOpcode.v128_store), offset + lhs.offset(), @intCast(ty.abiAlignment(zcu).toByteUnits() orelse 0), }); - return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); + return cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); }, }, .pointer => { if (ty.isSlice(zcu)) { // store pointer first // lower it to the stack so we do not have to store rhs into a local first - try func.emitWValue(lhs); - const ptr_local = try func.load(rhs, Type.usize, 0); - try func.store(.stack, ptr_local, Type.usize, 0 + lhs.offset()); + try cg.emitWValue(lhs); + const ptr_local = try cg.load(rhs, Type.usize, 0); + try cg.store(.stack, ptr_local, Type.usize, 0 + lhs.offset()); // retrieve length from rhs, and store that alongside lhs as well - try func.emitWValue(lhs); - const len_local = try func.load(rhs, Type.usize, func.ptrSize()); - try func.store(.stack, len_local, Type.usize, func.ptrSize() + lhs.offset()); + try cg.emitWValue(lhs); + const len_local = try cg.load(rhs, Type.usize, cg.ptrSize()); + try cg.store(.stack, len_local, Type.usize, cg.ptrSize() + lhs.offset()); return; } }, .int, .@"enum", .float => if (abi_size > 8 and abi_size <= 16) { - try func.emitWValue(lhs); - const lsb = try func.load(rhs, Type.u64, 0); - try func.store(.stack, lsb, Type.u64, 0 + lhs.offset()); + try cg.emitWValue(lhs); + const lsb = try cg.load(rhs, Type.u64, 0); + try cg.store(.stack, lsb, Type.u64, 0 + lhs.offset()); - try func.emitWValue(lhs); - const msb = try func.load(rhs, Type.u64, 8); - try func.store(.stack, msb, Type.u64, 8 + lhs.offset()); + try cg.emitWValue(lhs); + const msb = try cg.load(rhs, Type.u64, 8); + try cg.store(.stack, msb, Type.u64, 8 + lhs.offset()); return; } else if (abi_size > 16) { - try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) }); + try cg.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) }); }, else => if (abi_size > 8) { - return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{ + return cg.fail("TODO: `store` for type `{}` with abisize `{d}`", .{ ty.fmt(pt), abi_size, }); }, } - try func.emitWValue(lhs); + try cg.emitWValue(lhs); // In this case we're actually interested in storing the stack position // into lhs, so we calculate that and emit that instead - try func.lowerToStack(rhs); + try cg.lowerToStack(rhs); - const valtype = typeToValtype(ty, pt, func.target.*); + const valtype = typeToValtype(ty, zcu, cg.target); const opcode = buildOpcode(.{ .valtype1 = valtype, .width = @as(u8, @intCast(abi_size * 8)), @@ -2472,7 +2428,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE }); // store rhs value at stack pointer's location in memory - try func.addMemArg( + try cg.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = offset + lhs.offset(), @@ -2481,26 +2437,26 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE ); } -fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand = try cg.resolveInst(ty_op.operand); const ty = ty_op.ty.toType(); - const ptr_ty = func.typeOf(ty_op.operand); + const ptr_ty = cg.typeOf(ty_op.operand); const ptr_info = ptr_ty.ptrInfo(zcu); - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{ty_op.operand}); + if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return cg.finishAir(inst, .none, &.{ty_op.operand}); const result = result: { - if (isByRef(ty, pt, func.target.*)) { - const new_local = try func.allocStack(ty); - try func.store(new_local, operand, ty, 0); + if (isByRef(ty, zcu, cg.target)) { + const new_local = try cg.allocStack(ty); + try cg.store(new_local, operand, ty, 0); break :result new_local; } if (ptr_info.packed_offset.host_size == 0) { - break :result try func.load(operand, ty, 0); + break :result try cg.load(operand, ty, 0); } // at this point we have a non-natural alignment, we must @@ -2511,45 +2467,44 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else if (ptr_info.packed_offset.host_size <= 8) .{ .imm64 = ptr_info.packed_offset.bit_offset } else - return func.fail("TODO: airLoad where ptr to bitfield exceeds 64 bits", .{}); + return cg.fail("TODO: airLoad where ptr to bitfield exceeds 64 bits", .{}); - const stack_loaded = try func.load(operand, int_elem_ty, 0); - const shifted = try func.binOp(stack_loaded, shift_val, int_elem_ty, .shr); - break :result try func.trunc(shifted, ty, int_elem_ty); + const stack_loaded = try cg.load(operand, int_elem_ty, 0); + const shifted = try cg.binOp(stack_loaded, shift_val, int_elem_ty, .shr); + break :result try cg.trunc(shifted, ty, int_elem_ty); }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } /// Loads an operand from the linear memory section. /// NOTE: Leaves the value on the stack. -fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; +fn load(cg: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue { + const zcu = cg.pt.zcu; // load local's value from memory by its stack position - try func.emitWValue(operand); + try cg.emitWValue(operand); if (ty.zigTypeTag(zcu) == .vector) { // TODO: Add helper functions for simd opcodes - const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); + const extra_index = cg.extraLen(); // stores as := opcode, offset, alignment (opcode::memarg) - try func.mir_extra.appendSlice(func.gpa, &[_]u32{ - std.wasm.simdOpcode(.v128_load), + try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{ + @intFromEnum(std.wasm.SimdOpcode.v128_load), offset + operand.offset(), @intCast(ty.abiAlignment(zcu).toByteUnits().?), }); - try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); + try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); return .stack; } const abi_size: u8 = @intCast(ty.abiSize(zcu)); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, pt, func.target.*), + .valtype1 = typeToValtype(ty, zcu, cg.target), .width = abi_size * 8, .op = .load, .signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned, }); - try func.addMemArg( + try cg.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = offset + operand.offset(), @@ -2560,18 +2515,18 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu return .stack; } -fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airArg(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; - const arg_index = func.arg_index; - const arg = func.args[arg_index]; - const cc = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?.cc; - const arg_ty = func.typeOfIndex(inst); + const arg_index = cg.arg_index; + const arg = cg.args[arg_index]; + const cc = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?.cc; + const arg_ty = cg.typeOfIndex(inst); if (cc == .wasm_watc) { const arg_classes = abi.classifyType(arg_ty, zcu); for (arg_classes) |class| { if (class != .none) { - func.arg_index += 1; + cg.arg_index += 1; } } @@ -2579,44 +2534,30 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // we combine them into a single stack value if (arg_classes[0] == .direct and arg_classes[1] == .direct) { if (arg_ty.zigTypeTag(zcu) != .int and arg_ty.zigTypeTag(zcu) != .float) { - return func.fail( + return cg.fail( "TODO: Implement C-ABI argument for type '{}'", .{arg_ty.fmt(pt)}, ); } - const result = try func.allocStack(arg_ty); - try func.store(result, arg, Type.u64, 0); - try func.store(result, func.args[arg_index + 1], Type.u64, 8); - return func.finishAir(inst, result, &.{}); + const result = try cg.allocStack(arg_ty); + try cg.store(result, arg, Type.u64, 0); + try cg.store(result, cg.args[arg_index + 1], Type.u64, 8); + return cg.finishAir(inst, result, &.{}); } } else { - func.arg_index += 1; - } - - switch (func.debug_output) { - .dwarf => |dwarf| { - const name = func.air.instructions.items(.data)[@intFromEnum(inst)].arg.name; - if (name != .none) try dwarf.genLocalDebugInfo( - .local_arg, - name.toSlice(func.air), - arg_ty, - .{ .wasm_ext = .{ .local = arg.local.value } }, - ); - }, - else => {}, + cg.arg_index += 1; } - return func.finishAir(inst, arg, &.{}); + return cg.finishAir(inst, arg, &.{}); } -fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); +fn airBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const zcu = cg.pt.zcu; + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); + const lhs_ty = cg.typeOf(bin_op.lhs); + const rhs_ty = cg.typeOf(bin_op.rhs); // For certain operations, such as shifting, the types are different. // When converting this to a WebAssembly type, they *must* match to perform @@ -2626,122 +2567,121 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const result = switch (op) { .shr, .shl => result: { const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse { - return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); + return cg.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(zcu))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) - try (try func.intcast(rhs, rhs_ty, lhs_ty)).toLocal(func, lhs_ty) + try (try cg.intcast(rhs, rhs_ty, lhs_ty)).toLocal(cg, lhs_ty) else rhs; - break :result try func.binOp(lhs, new_rhs, lhs_ty, op); + break :result try cg.binOp(lhs, new_rhs, lhs_ty, op); }, - else => try func.binOp(lhs, rhs, lhs_ty, op), + else => try cg.binOp(lhs, rhs, lhs_ty, op), }; - return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } /// Performs a binary operation on the given `WValue`'s /// NOTE: THis leaves the value on top of the stack. -fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - const pt = func.pt; +fn binOp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { + const pt = cg.pt; const zcu = pt.zcu; assert(!(lhs != .stack and rhs == .stack)); if (ty.isAnyFloat()) { const float_op = FloatOp.fromOp(op); - return func.floatOp(float_op, ty, &.{ lhs, rhs }); + return cg.floatOp(float_op, ty, &.{ lhs, rhs }); } - if (isByRef(ty, pt, func.target.*)) { + if (isByRef(ty, zcu, cg.target)) { if (ty.zigTypeTag(zcu) == .int) { - return func.binOpBigInt(lhs, rhs, ty, op); + return cg.binOpBigInt(lhs, rhs, ty, op); } else { - return func.fail( + return cg.fail( "TODO: Implement binary operation for type: {}", .{ty.fmt(pt)}, ); } } - const opcode: wasm.Opcode = buildOpcode(.{ + const opcode: std.wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = typeToValtype(ty, pt, func.target.*), + .valtype1 = typeToValtype(ty, zcu, cg.target), .signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned, }); - try func.emitWValue(lhs); - try func.emitWValue(rhs); + try cg.emitWValue(lhs); + try cg.emitWValue(rhs); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } -fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; +fn binOpBigInt(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { + const zcu = cg.pt.zcu; const int_info = ty.intInfo(zcu); if (int_info.bits > 128) { - return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{}); + return cg.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{}); } switch (op) { - .mul => return func.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), + .mul => return cg.callIntrinsic(.__multi3, &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), .div => switch (int_info.signedness) { - .signed => return func.callIntrinsic("__divti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), - .unsigned => return func.callIntrinsic("__udivti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), + .signed => return cg.callIntrinsic(.__divti3, &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), + .unsigned => return cg.callIntrinsic(.__udivti3, &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), }, .rem => switch (int_info.signedness) { - .signed => return func.callIntrinsic("__modti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), - .unsigned => return func.callIntrinsic("__umodti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), + .signed => return cg.callIntrinsic(.__modti3, &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), + .unsigned => return cg.callIntrinsic(.__umodti3, &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), }, .shr => switch (int_info.signedness) { - .signed => return func.callIntrinsic("__ashrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), - .unsigned => return func.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), + .signed => return cg.callIntrinsic(.__ashrti3, &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), + .unsigned => return cg.callIntrinsic(.__lshrti3, &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), }, - .shl => return func.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), + .shl => return cg.callIntrinsic(.__ashlti3, &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), .@"and", .@"or", .xor => { - const result = try func.allocStack(ty); - try func.emitWValue(result); - const lhs_lsb = try func.load(lhs, Type.u64, 0); - const rhs_lsb = try func.load(rhs, Type.u64, 0); - const op_lsb = try func.binOp(lhs_lsb, rhs_lsb, Type.u64, op); - try func.store(.stack, op_lsb, Type.u64, result.offset()); - - try func.emitWValue(result); - const lhs_msb = try func.load(lhs, Type.u64, 8); - const rhs_msb = try func.load(rhs, Type.u64, 8); - const op_msb = try func.binOp(lhs_msb, rhs_msb, Type.u64, op); - try func.store(.stack, op_msb, Type.u64, result.offset() + 8); + const result = try cg.allocStack(ty); + try cg.emitWValue(result); + const lhs_lsb = try cg.load(lhs, Type.u64, 0); + const rhs_lsb = try cg.load(rhs, Type.u64, 0); + const op_lsb = try cg.binOp(lhs_lsb, rhs_lsb, Type.u64, op); + try cg.store(.stack, op_lsb, Type.u64, result.offset()); + + try cg.emitWValue(result); + const lhs_msb = try cg.load(lhs, Type.u64, 8); + const rhs_msb = try cg.load(rhs, Type.u64, 8); + const op_msb = try cg.binOp(lhs_msb, rhs_msb, Type.u64, op); + try cg.store(.stack, op_msb, Type.u64, result.offset() + 8); return result; }, .add, .sub => { - const result = try func.allocStack(ty); - var lhs_lsb = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); - defer lhs_lsb.free(func); - var rhs_lsb = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64); - defer rhs_lsb.free(func); - var op_lsb = try (try func.binOp(lhs_lsb, rhs_lsb, Type.u64, op)).toLocal(func, Type.u64); - defer op_lsb.free(func); - - const lhs_msb = try func.load(lhs, Type.u64, 8); - const rhs_msb = try func.load(rhs, Type.u64, 8); - const op_msb = try func.binOp(lhs_msb, rhs_msb, Type.u64, op); + const result = try cg.allocStack(ty); + var lhs_lsb = try (try cg.load(lhs, Type.u64, 0)).toLocal(cg, Type.u64); + defer lhs_lsb.free(cg); + var rhs_lsb = try (try cg.load(rhs, Type.u64, 0)).toLocal(cg, Type.u64); + defer rhs_lsb.free(cg); + var op_lsb = try (try cg.binOp(lhs_lsb, rhs_lsb, Type.u64, op)).toLocal(cg, Type.u64); + defer op_lsb.free(cg); + + const lhs_msb = try cg.load(lhs, Type.u64, 8); + const rhs_msb = try cg.load(rhs, Type.u64, 8); + const op_msb = try cg.binOp(lhs_msb, rhs_msb, Type.u64, op); const lt = if (op == .add) blk: { - break :blk try func.cmp(op_lsb, rhs_lsb, Type.u64, .lt); + break :blk try cg.cmp(op_lsb, rhs_lsb, Type.u64, .lt); } else if (op == .sub) blk: { - break :blk try func.cmp(lhs_lsb, rhs_lsb, Type.u64, .lt); + break :blk try cg.cmp(lhs_lsb, rhs_lsb, Type.u64, .lt); } else unreachable; - const tmp = try func.intcast(lt, Type.u32, Type.u64); - var tmp_op = try (try func.binOp(op_msb, tmp, Type.u64, op)).toLocal(func, Type.u64); - defer tmp_op.free(func); + const tmp = try cg.intcast(lt, Type.u32, Type.u64); + var tmp_op = try (try cg.binOp(op_msb, tmp, Type.u64, op)).toLocal(cg, Type.u64); + defer tmp_op.free(cg); - try func.store(result, op_lsb, Type.u64, 0); - try func.store(result, tmp_op, Type.u64, 8); + try cg.store(result, op_lsb, Type.u64, 0); + try cg.store(result, tmp_op, Type.u64, 8); return result; }, - else => return func.fail("TODO: Implement binary operation for big integers: '{s}'", .{@tagName(op)}), + else => return cg.fail("TODO: Implement binary operation for big integers: '{s}'", .{@tagName(op)}), } } @@ -2819,199 +2759,214 @@ const FloatOp = enum { => null, }; } + + fn intrinsic(op: FloatOp, bits: u16) Mir.Intrinsic { + return switch (op) { + inline .add, .sub, .div, .mul => |ct_op| switch (bits) { + inline 16, 80, 128 => |ct_bits| @field( + Mir.Intrinsic, + "__" ++ @tagName(ct_op) ++ compilerRtFloatAbbrev(ct_bits) ++ "f3", + ), + else => unreachable, + }, + + inline .ceil, + .fabs, + .floor, + .fmax, + .fmin, + .round, + .sqrt, + .trunc, + => |ct_op| switch (bits) { + inline 16, 80, 128 => |ct_bits| @field( + Mir.Intrinsic, + libcFloatPrefix(ct_bits) ++ @tagName(ct_op) ++ libcFloatSuffix(ct_bits), + ), + else => unreachable, + }, + + inline .cos, + .exp, + .exp2, + .fma, + .fmod, + .log, + .log10, + .log2, + .sin, + .tan, + => |ct_op| switch (bits) { + inline 16, 32, 64, 80, 128 => |ct_bits| @field( + Mir.Intrinsic, + libcFloatPrefix(ct_bits) ++ @tagName(ct_op) ++ libcFloatSuffix(ct_bits), + ), + else => unreachable, + }, + + .neg => unreachable, + }; + } }; -fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airAbs(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const ty = func.typeOf(ty_op.operand); + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand = try cg.resolveInst(ty_op.operand); + const ty = cg.typeOf(ty_op.operand); const scalar_ty = ty.scalarType(zcu); switch (scalar_ty.zigTypeTag(zcu)) { .int => if (ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)}); + return cg.fail("TODO implement airAbs for {}", .{ty.fmt(pt)}); } else { const int_bits = ty.intInfo(zcu).bits; const wasm_bits = toWasmBits(int_bits) orelse { - return func.fail("TODO: airAbs for signed integers larger than '{d}' bits", .{int_bits}); + return cg.fail("TODO: airAbs for signed integers larger than '{d}' bits", .{int_bits}); }; switch (wasm_bits) { 32 => { - try func.emitWValue(operand); + try cg.emitWValue(operand); - try func.addImm32(31); - try func.addTag(.i32_shr_s); + try cg.addImm32(31); + try cg.addTag(.i32_shr_s); - var tmp = try func.allocLocal(ty); - defer tmp.free(func); - try func.addLabel(.local_tee, tmp.local.value); + var tmp = try cg.allocLocal(ty); + defer tmp.free(cg); + try cg.addLocal(.local_tee, tmp.local.value); - try func.emitWValue(operand); - try func.addTag(.i32_xor); - try func.emitWValue(tmp); - try func.addTag(.i32_sub); - return func.finishAir(inst, .stack, &.{ty_op.operand}); + try cg.emitWValue(operand); + try cg.addTag(.i32_xor); + try cg.emitWValue(tmp); + try cg.addTag(.i32_sub); + return cg.finishAir(inst, .stack, &.{ty_op.operand}); }, 64 => { - try func.emitWValue(operand); + try cg.emitWValue(operand); - try func.addImm64(63); - try func.addTag(.i64_shr_s); + try cg.addImm64(63); + try cg.addTag(.i64_shr_s); - var tmp = try func.allocLocal(ty); - defer tmp.free(func); - try func.addLabel(.local_tee, tmp.local.value); + var tmp = try cg.allocLocal(ty); + defer tmp.free(cg); + try cg.addLocal(.local_tee, tmp.local.value); - try func.emitWValue(operand); - try func.addTag(.i64_xor); - try func.emitWValue(tmp); - try func.addTag(.i64_sub); - return func.finishAir(inst, .stack, &.{ty_op.operand}); + try cg.emitWValue(operand); + try cg.addTag(.i64_xor); + try cg.emitWValue(tmp); + try cg.addTag(.i64_sub); + return cg.finishAir(inst, .stack, &.{ty_op.operand}); }, 128 => { - const mask = try func.allocStack(Type.u128); - try func.emitWValue(mask); - try func.emitWValue(mask); + const mask = try cg.allocStack(Type.u128); + try cg.emitWValue(mask); + try cg.emitWValue(mask); - _ = try func.load(operand, Type.u64, 8); - try func.addImm64(63); - try func.addTag(.i64_shr_s); + _ = try cg.load(operand, Type.u64, 8); + try cg.addImm64(63); + try cg.addTag(.i64_shr_s); - var tmp = try func.allocLocal(Type.u64); - defer tmp.free(func); - try func.addLabel(.local_tee, tmp.local.value); - try func.store(.stack, .stack, Type.u64, mask.offset() + 0); - try func.emitWValue(tmp); - try func.store(.stack, .stack, Type.u64, mask.offset() + 8); + var tmp = try cg.allocLocal(Type.u64); + defer tmp.free(cg); + try cg.addLocal(.local_tee, tmp.local.value); + try cg.store(.stack, .stack, Type.u64, mask.offset() + 0); + try cg.emitWValue(tmp); + try cg.store(.stack, .stack, Type.u64, mask.offset() + 8); - const a = try func.binOpBigInt(operand, mask, Type.u128, .xor); - const b = try func.binOpBigInt(a, mask, Type.u128, .sub); + const a = try cg.binOpBigInt(operand, mask, Type.u128, .xor); + const b = try cg.binOpBigInt(a, mask, Type.u128, .sub); - return func.finishAir(inst, b, &.{ty_op.operand}); + return cg.finishAir(inst, b, &.{ty_op.operand}); }, else => unreachable, } }, .float => { - const result = try func.floatOp(.fabs, ty, &.{operand}); - return func.finishAir(inst, result, &.{ty_op.operand}); + const result = try cg.floatOp(.fabs, ty, &.{operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); }, else => unreachable, } } -fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void { - const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try func.resolveInst(un_op); - const ty = func.typeOf(un_op); +fn airUnaryFloatOp(cg: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void { + const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try cg.resolveInst(un_op); + const ty = cg.typeOf(un_op); - const result = try func.floatOp(op, ty, &.{operand}); - return func.finishAir(inst, result, &.{un_op}); + const result = try cg.floatOp(op, ty, &.{operand}); + return cg.finishAir(inst, result, &.{un_op}); } -fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; +fn floatOp(cg: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue { + const zcu = cg.pt.zcu; if (ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: Implement floatOps for vectors", .{}); + return cg.fail("TODO: Implement floatOps for vectors", .{}); } - const float_bits = ty.floatBits(func.target.*); + const float_bits = ty.floatBits(cg.target.*); if (float_op == .neg) { - return func.floatNeg(ty, args[0]); + return cg.floatNeg(ty, args[0]); } if (float_bits == 32 or float_bits == 64) { if (float_op.toOp()) |op| { for (args) |operand| { - try func.emitWValue(operand); + try cg.emitWValue(operand); } - const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt, func.target.*) }); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, zcu, cg.target) }); + try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } } - var fn_name_buf: [64]u8 = undefined; - const fn_name = switch (float_op) { - .add, - .sub, - .div, - .mul, - => std.fmt.bufPrint(&fn_name_buf, "__{s}{s}f3", .{ - @tagName(float_op), target_util.compilerRtFloatAbbrev(float_bits), - }) catch unreachable, - - .ceil, - .cos, - .exp, - .exp2, - .fabs, - .floor, - .fma, - .fmax, - .fmin, - .fmod, - .log, - .log10, - .log2, - .round, - .sin, - .sqrt, - .tan, - .trunc, - => std.fmt.bufPrint(&fn_name_buf, "{s}{s}{s}", .{ - target_util.libcFloatPrefix(float_bits), @tagName(float_op), target_util.libcFloatSuffix(float_bits), - }) catch unreachable, - .neg => unreachable, // handled above - }; + const intrinsic = float_op.intrinsic(float_bits); // fma requires three operands var param_types_buffer: [3]InternPool.Index = .{ ty.ip_index, ty.ip_index, ty.ip_index }; const param_types = param_types_buffer[0..args.len]; - return func.callIntrinsic(fn_name, param_types, ty, args); + return cg.callIntrinsic(intrinsic, param_types, ty, args); } /// NOTE: The result value remains on top of the stack. -fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue { - const float_bits = ty.floatBits(func.target.*); +fn floatNeg(cg: *CodeGen, ty: Type, arg: WValue) InnerError!WValue { + const float_bits = ty.floatBits(cg.target.*); switch (float_bits) { 16 => { - try func.emitWValue(arg); - try func.addImm32(0x8000); - try func.addTag(.i32_xor); + try cg.emitWValue(arg); + try cg.addImm32(0x8000); + try cg.addTag(.i32_xor); return .stack; }, 32, 64 => { - try func.emitWValue(arg); - const val_type: wasm.Valtype = if (float_bits == 32) .f32 else .f64; + try cg.emitWValue(arg); + const val_type: std.wasm.Valtype = if (float_bits == 32) .f32 else .f64; const opcode = buildOpcode(.{ .op = .neg, .valtype1 = val_type }); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; }, 80, 128 => { - const result = try func.allocStack(ty); - try func.emitWValue(result); - try func.emitWValue(arg); - try func.addMemArg(.i64_load, .{ .offset = 0 + arg.offset(), .alignment = 2 }); - try func.addMemArg(.i64_store, .{ .offset = 0 + result.offset(), .alignment = 2 }); + const result = try cg.allocStack(ty); + try cg.emitWValue(result); + try cg.emitWValue(arg); + try cg.addMemArg(.i64_load, .{ .offset = 0 + arg.offset(), .alignment = 2 }); + try cg.addMemArg(.i64_store, .{ .offset = 0 + result.offset(), .alignment = 2 }); - try func.emitWValue(result); - try func.emitWValue(arg); - try func.addMemArg(.i64_load, .{ .offset = 8 + arg.offset(), .alignment = 2 }); + try cg.emitWValue(result); + try cg.emitWValue(arg); + try cg.addMemArg(.i64_load, .{ .offset = 8 + arg.offset(), .alignment = 2 }); if (float_bits == 80) { - try func.addImm64(0x8000); - try func.addTag(.i64_xor); - try func.addMemArg(.i64_store16, .{ .offset = 8 + result.offset(), .alignment = 2 }); + try cg.addImm64(0x8000); + try cg.addTag(.i64_xor); + try cg.addMemArg(.i64_store16, .{ .offset = 8 + result.offset(), .alignment = 2 }); } else { - try func.addImm64(0x8000000000000000); - try func.addTag(.i64_xor); - try func.addMemArg(.i64_store, .{ .offset = 8 + result.offset(), .alignment = 2 }); + try cg.addImm64(0x8000000000000000); + try cg.addTag(.i64_xor); + try cg.addMemArg(.i64_store, .{ .offset = 8 + result.offset(), .alignment = 2 }); } return result; }, @@ -3019,18 +2974,17 @@ fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue { } } -fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airWrapBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const zcu = cg.pt.zcu; + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); + const lhs_ty = cg.typeOf(bin_op.lhs); + const rhs_ty = cg.typeOf(bin_op.rhs); if (lhs_ty.zigTypeTag(zcu) == .vector or rhs_ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: Implement wrapping arithmetic for vectors", .{}); + return cg.fail("TODO: Implement wrapping arithmetic for vectors", .{}); } // For certain operations, such as shifting, the types are different. @@ -3041,90 +2995,89 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const result = switch (op) { .shr, .shl => result: { const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse { - return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); + return cg.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(zcu))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) - try (try func.intcast(rhs, rhs_ty, lhs_ty)).toLocal(func, lhs_ty) + try (try cg.intcast(rhs, rhs_ty, lhs_ty)).toLocal(cg, lhs_ty) else rhs; - break :result try func.wrapBinOp(lhs, new_rhs, lhs_ty, op); + break :result try cg.wrapBinOp(lhs, new_rhs, lhs_ty, op); }, - else => try func.wrapBinOp(lhs, rhs, lhs_ty, op), + else => try cg.wrapBinOp(lhs, rhs, lhs_ty, op), }; - return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } /// Performs a wrapping binary operation. /// Asserts rhs is not a stack value when lhs also isn't. /// NOTE: Leaves the result on the stack when its Type is <= 64 bits -fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - const bin_local = try func.binOp(lhs, rhs, ty, op); - return func.wrapOperand(bin_local, ty); +fn wrapBinOp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { + const bin_local = try cg.binOp(lhs, rhs, ty, op); + return cg.wrapOperand(bin_local, ty); } /// Wraps an operand based on a given type's bitsize. /// Asserts `Type` is <= 128 bits. /// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack, if wrapping was needed. -fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; +fn wrapOperand(cg: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { + const zcu = cg.pt.zcu; assert(ty.abiSize(zcu) <= 16); const int_bits: u16 = @intCast(ty.bitSize(zcu)); // TODO use ty.intInfo(zcu).bits const wasm_bits = toWasmBits(int_bits) orelse { - return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{int_bits}); + return cg.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{int_bits}); }; if (wasm_bits == int_bits) return operand; switch (wasm_bits) { 32 => { - try func.emitWValue(operand); + try cg.emitWValue(operand); if (ty.isSignedInt(zcu)) { - try func.addImm32(32 - int_bits); - try func.addTag(.i32_shl); - try func.addImm32(32 - int_bits); - try func.addTag(.i32_shr_s); + try cg.addImm32(32 - int_bits); + try cg.addTag(.i32_shl); + try cg.addImm32(32 - int_bits); + try cg.addTag(.i32_shr_s); } else { - try func.addImm32(~@as(u32, 0) >> @intCast(32 - int_bits)); - try func.addTag(.i32_and); + try cg.addImm32(~@as(u32, 0) >> @intCast(32 - int_bits)); + try cg.addTag(.i32_and); } return .stack; }, 64 => { - try func.emitWValue(operand); + try cg.emitWValue(operand); if (ty.isSignedInt(zcu)) { - try func.addImm64(64 - int_bits); - try func.addTag(.i64_shl); - try func.addImm64(64 - int_bits); - try func.addTag(.i64_shr_s); + try cg.addImm64(64 - int_bits); + try cg.addTag(.i64_shl); + try cg.addImm64(64 - int_bits); + try cg.addTag(.i64_shr_s); } else { - try func.addImm64(~@as(u64, 0) >> @intCast(64 - int_bits)); - try func.addTag(.i64_and); + try cg.addImm64(~@as(u64, 0) >> @intCast(64 - int_bits)); + try cg.addTag(.i64_and); } return .stack; }, 128 => { assert(operand != .stack); - const result = try func.allocStack(ty); + const result = try cg.allocStack(ty); - try func.emitWValue(result); - _ = try func.load(operand, Type.u64, 0); - try func.store(.stack, .stack, Type.u64, result.offset()); + try cg.emitWValue(result); + _ = try cg.load(operand, Type.u64, 0); + try cg.store(.stack, .stack, Type.u64, result.offset()); - try func.emitWValue(result); - _ = try func.load(operand, Type.u64, 8); + try cg.emitWValue(result); + _ = try cg.load(operand, Type.u64, 8); if (ty.isSignedInt(zcu)) { - try func.addImm64(128 - int_bits); - try func.addTag(.i64_shl); - try func.addImm64(128 - int_bits); - try func.addTag(.i64_shr_s); + try cg.addImm64(128 - int_bits); + try cg.addTag(.i64_shl); + try cg.addImm64(128 - int_bits); + try cg.addTag(.i64_shr_s); } else { - try func.addImm64(~@as(u64, 0) >> @intCast(128 - int_bits)); - try func.addTag(.i64_and); + try cg.addImm64(~@as(u64, 0) >> @intCast(128 - int_bits)); + try cg.addTag(.i64_and); } - try func.store(.stack, .stack, Type.u64, result.offset() + 8); + try cg.store(.stack, .stack, Type.u64, result.offset() + 8); return result; }, @@ -3132,17 +3085,17 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { } } -fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue { - const pt = func.pt; +fn lowerPtr(cg: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue { + const pt = cg.pt; const zcu = pt.zcu; const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { - .nav => |nav| return func.lowerNavRef(nav, @intCast(offset)), - .uav => |uav| return func.lowerUavRef(uav, @intCast(offset)), - .int => return func.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize), - .eu_payload => return func.fail("Wasm TODO: lower error union payload pointer", .{}), - .opt_payload => |opt_ptr| return func.lowerPtr(opt_ptr, offset), + .nav => |nav| return .{ .nav_ref = .{ .nav_index = nav, .offset = @intCast(offset) } }, + .uav => |uav| return .{ .uav_ref = .{ .ip_index = uav.val, .offset = @intCast(offset), .orig_ptr_ty = uav.orig_ty } }, + .int => return cg.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize), + .eu_payload => return cg.fail("Wasm TODO: lower error union payload pointer", .{}), + .opt_payload => |opt_ptr| return cg.lowerPtr(opt_ptr, offset), .field => |field| { const base_ptr = Value.fromInterned(field.base); const base_ty = base_ptr.typeOf(zcu).childType(zcu); @@ -3151,7 +3104,7 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr assert(base_ty.isSlice(zcu)); break :off switch (field.index) { Value.slice_ptr_index => 0, - Value.slice_len_index => @divExact(func.target.ptrBitWidth(), 8), + Value.slice_len_index => @divExact(cg.target.ptrBitWidth(), 8), else => unreachable, }; }, @@ -3177,70 +3130,19 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr }, else => unreachable, }; - return func.lowerPtr(field.base, offset + field_off); + return cg.lowerPtr(field.base, offset + field_off); }, .arr_elem, .comptime_field, .comptime_alloc => unreachable, }; } -fn lowerUavRef( - func: *CodeGen, - uav: InternPool.Key.Ptr.BaseAddr.Uav, - offset: u32, -) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; - const ty = Type.fromInterned(zcu.intern_pool.typeOf(uav.val)); - - const is_fn_body = ty.zigTypeTag(zcu) == .@"fn"; - if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return .{ .imm32 = 0xaaaaaaaa }; - } - - const decl_align = zcu.intern_pool.indexToKey(uav.orig_ty).ptr_type.flags.alignment; - const res = try func.bin_file.lowerUav(pt, uav.val, decl_align, func.src_loc); - const target_sym_index = switch (res) { - .mcv => |mcv| mcv.load_symbol, - .fail => |err_msg| { - func.err_msg = err_msg; - return error.CodegenFail; - }, - }; - if (is_fn_body) { - return .{ .function_index = target_sym_index }; - } else if (offset == 0) { - return .{ .memory = target_sym_index }; - } else return .{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } }; -} - -fn lowerNavRef(func: *CodeGen, nav_index: InternPool.Nav.Index, offset: u32) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - - const nav_ty = ip.getNav(nav_index).typeOf(ip); - if (!ip.isFunctionType(nav_ty) and !Type.fromInterned(nav_ty).hasRuntimeBitsIgnoreComptime(zcu)) { - return .{ .imm32 = 0xaaaaaaaa }; - } - - const atom_index = try func.bin_file.getOrCreateAtomForNav(pt, nav_index); - const atom = func.bin_file.getAtom(atom_index); - - const target_sym_index = @intFromEnum(atom.sym_index); - if (ip.isFunctionType(nav_ty)) { - return .{ .function_index = target_sym_index }; - } else if (offset == 0) { - return .{ .memory = target_sym_index }; - } else return .{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } }; -} - /// Asserts that `isByRef` returns `false` for `ty`. -fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { - const pt = func.pt; +fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue { + const pt = cg.pt; const zcu = pt.zcu; - assert(!isByRef(ty, pt, func.target.*)); + assert(!isByRef(ty, zcu, cg.target)); const ip = &zcu.intern_pool; - if (val.isUndefDeep(zcu)) return func.emitUndefined(ty); + if (val.isUndefDeep(zcu)) return cg.emitUndefined(ty); switch (ip.indexToKey(val.ip_index)) { .int_type, @@ -3319,14 +3221,14 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { const payload_type = ty.errorUnionPayload(zcu); if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) { // We use the error type directly as the type. - return func.lowerConstant(err_val, err_ty); + return cg.lowerConstant(err_val, err_ty); } - return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); + return cg.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, .enum_tag => |enum_tag| { const int_tag_ty = ip.typeOf(enum_tag.int); - return func.lowerConstant(Value.fromInterned(enum_tag.int), Type.fromInterned(int_tag_ty)); + return cg.lowerConstant(Value.fromInterned(enum_tag.int), Type.fromInterned(int_tag_ty)); }, .float => |float| switch (float.storage) { .f16 => |f16_val| return .{ .imm32 = @as(u16, @bitCast(f16_val)) }, @@ -3334,18 +3236,12 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { .f64 => |f64_val| return .{ .float64 = f64_val }, else => unreachable, }, - .slice => switch (try func.bin_file.lowerUav(pt, val.toIntern(), .none, func.src_loc)) { - .mcv => |mcv| return .{ .memory = mcv.load_symbol }, - .fail => |err_msg| { - func.err_msg = err_msg; - return error.CodegenFail; - }, - }, - .ptr => return func.lowerPtr(val.toIntern(), 0), + .slice => unreachable, // isByRef == true + .ptr => return cg.lowerPtr(val.toIntern(), 0), .opt => if (ty.optionalReprIsPayload(zcu)) { const pl_ty = ty.optionalChild(zcu); if (val.optionalValue(zcu)) |payload| { - return func.lowerConstant(payload, pl_ty); + return cg.lowerConstant(payload, pl_ty); } else { return .{ .imm32 = 0 }; } @@ -3353,12 +3249,12 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { return .{ .imm32 = @intFromBool(!val.isNull(zcu)) }; }, .aggregate => switch (ip.indexToKey(ty.ip_index)) { - .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}), + .array_type => return cg.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}), .vector_type => { - assert(determineSimdStoreStrategy(ty, zcu, func.target.*) == .direct); + assert(determineSimdStoreStrategy(ty, zcu, cg.target) == .direct); var buf: [16]u8 = undefined; val.writeToMemory(pt, &buf) catch unreachable; - return func.storeSimdImmd(buf); + return cg.storeSimdImmd(buf); }, .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); @@ -3372,7 +3268,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { backing_int_ty, mem.readInt(u64, &buf, .little), ); - return func.lowerConstant(int_val, backing_int_ty); + return cg.lowerConstant(int_val, backing_int_ty); }, else => unreachable, }, @@ -3385,7 +3281,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { const field_index = zcu.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?; break :field_ty Type.fromInterned(union_obj.field_types.get(ip)[field_index]); }; - return func.lowerConstant(Value.fromInterned(un.val), constant_ty); + return cg.lowerConstant(Value.fromInterned(un.val), constant_ty); }, .memoized_call => unreachable, } @@ -3393,15 +3289,14 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { /// Stores the value as a 128bit-immediate value by storing it inside /// the list and returning the index into this list as `WValue`. -fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue { - const index = @as(u32, @intCast(func.simd_immediates.items.len)); - try func.simd_immediates.append(func.gpa, value); +fn storeSimdImmd(cg: *CodeGen, value: [16]u8) !WValue { + const index = @as(u32, @intCast(cg.simd_immediates.items.len)); + try cg.simd_immediates.append(cg.gpa, value); return .{ .imm128 = index }; } -fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; +fn emitUndefined(cg: *CodeGen, ty: Type) InnerError!WValue { + const zcu = cg.pt.zcu; const ip = &zcu.intern_pool; switch (ty.zigTypeTag(zcu)) { .bool, .error_set => return .{ .imm32 = 0xaaaaaaaa }, @@ -3410,21 +3305,20 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { 33...64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa }, else => unreachable, }, - .float => switch (ty.floatBits(func.target.*)) { + .float => switch (ty.floatBits(cg.target.*)) { 16 => return .{ .imm32 = 0xaaaaaaaa }, 32 => return .{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) }, 64 => return .{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) }, else => unreachable, }, - .pointer => switch (func.arch()) { + .pointer => switch (cg.ptr_size) { .wasm32 => return .{ .imm32 = 0xaaaaaaaa }, .wasm64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa }, - else => unreachable, }, .optional => { const pl_ty = ty.optionalChild(zcu); if (ty.optionalReprIsPayload(zcu)) { - return func.emitUndefined(pl_ty); + return cg.emitUndefined(pl_ty); } return .{ .imm32 = 0xaaaaaaaa }; }, @@ -3433,26 +3327,25 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { }, .@"struct" => { const packed_struct = zcu.typeToPackedStruct(ty).?; - return func.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip))); + return cg.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip))); }, - else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(zcu)}), + else => return cg.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(zcu)}), } } /// Returns a `Value` as a signed 32 bit value. /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. -fn valueAsI32(func: *const CodeGen, val: Value) i32 { - const pt = func.pt; - const zcu = pt.zcu; +fn valueAsI32(cg: *const CodeGen, val: Value) i32 { + const zcu = cg.pt.zcu; const ip = &zcu.intern_pool; switch (val.toIntern()) { .bool_true => return 1, .bool_false => return 0, else => return switch (ip.indexToKey(val.ip_index)) { - .enum_tag => |enum_tag| intIndexAsI32(ip, enum_tag.int, pt), - .int => |int| intStorageAsI32(int.storage, pt), + .enum_tag => |enum_tag| intIndexAsI32(ip, enum_tag.int, zcu), + .int => |int| intStorageAsI32(int.storage, zcu), .ptr => |ptr| { assert(ptr.base_addr == .int); return @intCast(ptr.byte_offset); @@ -3463,12 +3356,11 @@ fn valueAsI32(func: *const CodeGen, val: Value) i32 { } } -fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, pt: Zcu.PerThread) i32 { - return intStorageAsI32(ip.indexToKey(int).int.storage, pt); +fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, zcu: *const Zcu) i32 { + return intStorageAsI32(ip.indexToKey(int).int.storage, zcu); } -fn intStorageAsI32(storage: InternPool.Key.Int.Storage, pt: Zcu.PerThread) i32 { - const zcu = pt.zcu; +fn intStorageAsI32(storage: InternPool.Key.Int.Storage, zcu: *const Zcu) i32 { return switch (storage) { .i64 => |x| @as(i32, @intCast(x)), .u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))), @@ -3478,145 +3370,144 @@ fn intStorageAsI32(storage: InternPool.Key.Int.Storage, pt: Zcu.PerThread) i32 { }; } -fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = func.air.extraData(Air.Block, ty_pl.payload); - try func.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len])); +fn airBlock(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = cg.air.extraData(Air.Block, ty_pl.payload); + try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len])); } -fn lowerBlock(func: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void { - const pt = func.pt; - const wasm_block_ty = genBlockType(block_ty, pt, func.target.*); +fn lowerBlock(cg: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const wasm_block_ty = genBlockType(block_ty, zcu, cg.target); // if wasm_block_ty is non-empty, we create a register to store the temporary value - const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: { - const ty: Type = if (isByRef(block_ty, pt, func.target.*)) Type.u32 else block_ty; - break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten + const block_result: WValue = if (wasm_block_ty != .empty) blk: { + const ty: Type = if (isByRef(block_ty, zcu, cg.target)) Type.u32 else block_ty; + break :blk try cg.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten } else .none; - try func.startBlock(.block, wasm.block_empty); + try cg.startBlock(.block, .empty); // Here we set the current block idx, so breaks know the depth to jump // to when breaking out. - try func.blocks.putNoClobber(func.gpa, inst, .{ - .label = func.block_depth, + try cg.blocks.putNoClobber(cg.gpa, inst, .{ + .label = cg.block_depth, .value = block_result, }); - try func.genBody(body); - try func.endBlock(); + try cg.genBody(body); + try cg.endBlock(); - const liveness = func.liveness.getBlock(inst); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths.len); + const liveness = cg.liveness.getBlock(inst); + try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.deaths.len); - return func.finishAir(inst, block_result, &.{}); + return cg.finishAir(inst, block_result, &.{}); } /// appends a new wasm block to the code section and increases the `block_depth` by 1 -fn startBlock(func: *CodeGen, block_tag: wasm.Opcode, valtype: u8) !void { - func.block_depth += 1; - try func.addInst(.{ +fn startBlock(cg: *CodeGen, block_tag: std.wasm.Opcode, block_type: std.wasm.BlockType) !void { + cg.block_depth += 1; + try cg.addInst(.{ .tag = Mir.Inst.Tag.fromOpcode(block_tag), - .data = .{ .block_type = valtype }, + .data = .{ .block_type = block_type }, }); } /// Ends the current wasm block and decreases the `block_depth` by 1 -fn endBlock(func: *CodeGen) !void { - try func.addTag(.end); - func.block_depth -= 1; +fn endBlock(cg: *CodeGen) !void { + try cg.addTag(.end); + cg.block_depth -= 1; } -fn airLoop(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const loop = func.air.extraData(Air.Block, ty_pl.payload); - const body: []const Air.Inst.Index = @ptrCast(func.air.extra[loop.end..][0..loop.data.body_len]); +fn airLoop(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const loop = cg.air.extraData(Air.Block, ty_pl.payload); + const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[loop.end..][0..loop.data.body_len]); // result type of loop is always 'noreturn', meaning we can always // emit the wasm type 'block_empty'. - try func.startBlock(.loop, wasm.block_empty); + try cg.startBlock(.loop, .empty); - try func.loops.putNoClobber(func.gpa, inst, func.block_depth); - defer assert(func.loops.remove(inst)); + try cg.loops.putNoClobber(cg.gpa, inst, cg.block_depth); + defer assert(cg.loops.remove(inst)); - try func.genBody(body); - try func.endBlock(); + try cg.genBody(body); + try cg.endBlock(); - return func.finishAir(inst, .none, &.{}); + return cg.finishAir(inst, .none, &.{}); } -fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const condition = try func.resolveInst(pl_op.operand); - const extra = func.air.extraData(Air.CondBr, pl_op.payload); - const then_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.then_body_len]); - const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]); - const liveness_condbr = func.liveness.getCondBr(inst); +fn airCondBr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const condition = try cg.resolveInst(pl_op.operand); + const extra = cg.air.extraData(Air.CondBr, pl_op.payload); + const then_body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.then_body_len]); + const else_body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]); + const liveness_condbr = cg.liveness.getCondBr(inst); // result type is always noreturn, so use `block_empty` as type. - try func.startBlock(.block, wasm.block_empty); + try cg.startBlock(.block, .empty); // emit the conditional value - try func.emitWValue(condition); + try cg.emitWValue(condition); // we inserted the block in front of the condition // so now check if condition matches. If not, break outside this block // and continue with the then codepath - try func.addLabel(.br_if, 0); + try cg.addLabel(.br_if, 0); - try func.branches.ensureUnusedCapacity(func.gpa, 2); + try cg.branches.ensureUnusedCapacity(cg.gpa, 2); { - func.branches.appendAssumeCapacity(.{}); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.else_deaths.len))); + cg.branches.appendAssumeCapacity(.{}); + try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, @as(u32, @intCast(liveness_condbr.else_deaths.len))); defer { - var else_stack = func.branches.pop(); - else_stack.deinit(func.gpa); + var else_stack = cg.branches.pop(); + else_stack.deinit(cg.gpa); } - try func.genBody(else_body); - try func.endBlock(); + try cg.genBody(else_body); + try cg.endBlock(); } // Outer block that matches the condition { - func.branches.appendAssumeCapacity(.{}); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.then_deaths.len))); + cg.branches.appendAssumeCapacity(.{}); + try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, @as(u32, @intCast(liveness_condbr.then_deaths.len))); defer { - var then_stack = func.branches.pop(); - then_stack.deinit(func.gpa); + var then_stack = cg.branches.pop(); + then_stack.deinit(cg.gpa); } - try func.genBody(then_body); + try cg.genBody(then_body); } - return func.finishAir(inst, .none, &.{}); + return cg.finishAir(inst, .none, &.{}); } -fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airCmp(cg: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!void { + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const operand_ty = func.typeOf(bin_op.lhs); - const result = try func.cmp(lhs, rhs, operand_ty, op); - return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); + const operand_ty = cg.typeOf(bin_op.lhs); + const result = try cg.cmp(lhs, rhs, operand_ty, op); + return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } /// Compares two operands. /// Asserts rhs is not a stack value when the lhs isn't a stack value either /// NOTE: This leaves the result on top of the stack, rather than a new local. -fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue { +fn cmp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue { assert(!(lhs != .stack and rhs == .stack)); - const pt = func.pt; - const zcu = pt.zcu; + const zcu = cg.pt.zcu; if (ty.zigTypeTag(zcu) == .optional and !ty.optionalReprIsPayload(zcu)) { const payload_ty = ty.optionalChild(zcu); if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for // both lhs and rhs, as well as checking the payload are matching of lhs and rhs - return func.cmpOptionals(lhs, rhs, ty, op); + return cg.cmpOptionals(lhs, rhs, ty, op); } } else if (ty.isAnyFloat()) { - return func.cmpFloat(ty, lhs, rhs, op); - } else if (isByRef(ty, pt, func.target.*)) { - return func.cmpBigInt(lhs, rhs, ty, op); + return cg.cmpFloat(ty, lhs, rhs, op); + } else if (isByRef(ty, zcu, cg.target)) { + return cg.cmpBigInt(lhs, rhs, ty, op); } const signedness: std.builtin.Signedness = blk: { @@ -3629,11 +3520,11 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO // ensure that when we compare pointers, we emit // the true pointer of a stack value, rather than the stack pointer. - try func.lowerToStack(lhs); - try func.lowerToStack(rhs); + try cg.lowerToStack(lhs); + try cg.lowerToStack(rhs); - const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, pt, func.target.*), + const opcode: std.wasm.Opcode = buildOpcode(.{ + .valtype1 = typeToValtype(ty, zcu, cg.target), .op = switch (op) { .lt => .lt, .lte => .le, @@ -3644,15 +3535,15 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO }, .signedness = signedness, }); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } /// Compares two floats. /// NOTE: Leaves the result of the comparison on top of the stack. -fn cmpFloat(func: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math.CompareOperator) InnerError!WValue { - const float_bits = ty.floatBits(func.target.*); +fn cmpFloat(cg: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math.CompareOperator) InnerError!WValue { + const float_bits = ty.floatBits(cg.target.*); const op: Op = switch (cmp_op) { .lt => .lt, @@ -3665,143 +3556,137 @@ fn cmpFloat(func: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math switch (float_bits) { 16 => { - _ = try func.fpext(lhs, Type.f16, Type.f32); - _ = try func.fpext(rhs, Type.f16, Type.f32); + _ = try cg.fpext(lhs, Type.f16, Type.f32); + _ = try cg.fpext(rhs, Type.f16, Type.f32); const opcode = buildOpcode(.{ .op = op, .valtype1 = .f32 }); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; }, 32, 64 => { - try func.emitWValue(lhs); - try func.emitWValue(rhs); - const val_type: wasm.Valtype = if (float_bits == 32) .f32 else .f64; + try cg.emitWValue(lhs); + try cg.emitWValue(rhs); + const val_type: std.wasm.Valtype = if (float_bits == 32) .f32 else .f64; const opcode = buildOpcode(.{ .op = op, .valtype1 = val_type }); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; }, 80, 128 => { - var fn_name_buf: [32]u8 = undefined; - const fn_name = std.fmt.bufPrint(&fn_name_buf, "__{s}{s}f2", .{ - @tagName(op), target_util.compilerRtFloatAbbrev(float_bits), - }) catch unreachable; - - const result = try func.callIntrinsic(fn_name, &.{ ty.ip_index, ty.ip_index }, Type.bool, &.{ lhs, rhs }); - return func.cmp(result, .{ .imm32 = 0 }, Type.i32, cmp_op); + const intrinsic = floatCmpIntrinsic(cmp_op, float_bits); + const result = try cg.callIntrinsic(intrinsic, &.{ ty.ip_index, ty.ip_index }, Type.bool, &.{ lhs, rhs }); + return cg.cmp(result, .{ .imm32 = 0 }, Type.i32, cmp_op); }, else => unreachable, } } -fn airCmpVector(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { +fn airCmpVector(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { _ = inst; - return func.fail("TODO implement airCmpVector for wasm", .{}); + return cg.fail("TODO implement airCmpVector for wasm", .{}); } -fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try func.resolveInst(un_op); - const sym_index = try func.bin_file.getGlobalSymbol("__zig_errors_len", null); - const errors_len: WValue = .{ .memory = @intFromEnum(sym_index) }; +fn airCmpLtErrorsLen(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try cg.resolveInst(un_op); - try func.emitWValue(operand); - const pt = func.pt; + try cg.emitWValue(operand); + const pt = cg.pt; const err_int_ty = try pt.errorIntType(); - const errors_len_val = try func.load(errors_len, err_int_ty, 0); - const result = try func.cmp(.stack, errors_len_val, err_int_ty, .lt); + try cg.addTag(.errors_len); + const result = try cg.cmp(.stack, .stack, err_int_ty, .lt); - return func.finishAir(inst, result, &.{un_op}); + return cg.finishAir(inst, result, &.{un_op}); } -fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const zcu = func.pt.zcu; - const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br; - const block = func.blocks.get(br.block_inst).?; +fn airBr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const br = cg.air.instructions.items(.data)[@intFromEnum(inst)].br; + const block = cg.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(zcu)) { - const operand = try func.resolveInst(br.operand); - try func.lowerToStack(operand); + if (cg.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(zcu)) { + const operand = try cg.resolveInst(br.operand); + try cg.lowerToStack(operand); if (block.value != .none) { - try func.addLabel(.local_set, block.value.local.value); + try cg.addLocal(.local_set, block.value.local.value); } } // We map every block to its block index. // We then determine how far we have to jump to it by subtracting it from current block depth - const idx: u32 = func.block_depth - block.label; - try func.addLabel(.br, idx); + const idx: u32 = cg.block_depth - block.label; + try cg.addLabel(.br, idx); - return func.finishAir(inst, .none, &.{br.operand}); + return cg.finishAir(inst, .none, &.{br.operand}); } -fn airRepeat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const repeat = func.air.instructions.items(.data)[@intFromEnum(inst)].repeat; - const loop_label = func.loops.get(repeat.loop_inst).?; +fn airRepeat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const repeat = cg.air.instructions.items(.data)[@intFromEnum(inst)].repeat; + const loop_label = cg.loops.get(repeat.loop_inst).?; - const idx: u32 = func.block_depth - loop_label; - try func.addLabel(.br, idx); + const idx: u32 = cg.block_depth - loop_label; + try cg.addLabel(.br, idx); - return func.finishAir(inst, .none, &.{}); + return cg.finishAir(inst, .none, &.{}); } -fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airNot(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.typeOf(ty_op.operand); - const pt = func.pt; + const operand = try cg.resolveInst(ty_op.operand); + const operand_ty = cg.typeOf(ty_op.operand); + const pt = cg.pt; const zcu = pt.zcu; const result = result: { if (operand_ty.zigTypeTag(zcu) == .bool) { - try func.emitWValue(operand); - try func.addTag(.i32_eqz); - const not_tmp = try func.allocLocal(operand_ty); - try func.addLabel(.local_set, not_tmp.local.value); + try cg.emitWValue(operand); + try cg.addTag(.i32_eqz); + const not_tmp = try cg.allocLocal(operand_ty); + try cg.addLocal(.local_set, not_tmp.local.value); break :result not_tmp; } else { const int_info = operand_ty.intInfo(zcu); const wasm_bits = toWasmBits(int_info.bits) orelse { - return func.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)}); + return cg.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)}); }; switch (wasm_bits) { 32 => { - try func.emitWValue(operand); - try func.addImm32(switch (int_info.signedness) { + try cg.emitWValue(operand); + try cg.addImm32(switch (int_info.signedness) { .unsigned => ~@as(u32, 0) >> @intCast(32 - int_info.bits), .signed => ~@as(u32, 0), }); - try func.addTag(.i32_xor); + try cg.addTag(.i32_xor); break :result .stack; }, 64 => { - try func.emitWValue(operand); - try func.addImm64(switch (int_info.signedness) { + try cg.emitWValue(operand); + try cg.addImm64(switch (int_info.signedness) { .unsigned => ~@as(u64, 0) >> @intCast(64 - int_info.bits), .signed => ~@as(u64, 0), }); - try func.addTag(.i64_xor); + try cg.addTag(.i64_xor); break :result .stack; }, 128 => { - const ptr = try func.allocStack(operand_ty); + const ptr = try cg.allocStack(operand_ty); - try func.emitWValue(ptr); - _ = try func.load(operand, Type.u64, 0); - try func.addImm64(~@as(u64, 0)); - try func.addTag(.i64_xor); - try func.store(.stack, .stack, Type.u64, ptr.offset()); + try cg.emitWValue(ptr); + _ = try cg.load(operand, Type.u64, 0); + try cg.addImm64(~@as(u64, 0)); + try cg.addTag(.i64_xor); + try cg.store(.stack, .stack, Type.u64, ptr.offset()); - try func.emitWValue(ptr); - _ = try func.load(operand, Type.u64, 8); - try func.addImm64(switch (int_info.signedness) { + try cg.emitWValue(ptr); + _ = try cg.load(operand, Type.u64, 8); + try cg.addImm64(switch (int_info.signedness) { .unsigned => ~@as(u64, 0) >> @intCast(128 - int_info.bits), .signed => ~@as(u64, 0), }); - try func.addTag(.i64_xor); - try func.store(.stack, .stack, Type.u64, ptr.offset() + 8); + try cg.addTag(.i64_xor); + try cg.store(.stack, .stack, Type.u64, ptr.offset() + 8); break :result ptr; }, @@ -3809,33 +3694,32 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airTrap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - try func.addTag(.@"unreachable"); - return func.finishAir(inst, .none, &.{}); +fn airTrap(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + try cg.addTag(.@"unreachable"); + return cg.finishAir(inst, .none, &.{}); } -fn airBreakpoint(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { +fn airBreakpoint(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { // unsupported by wasm itfunc. Can be implemented once we support DWARF // for wasm - try func.addTag(.@"unreachable"); - return func.finishAir(inst, .none, &.{}); + try cg.addTag(.@"unreachable"); + return cg.finishAir(inst, .none, &.{}); } -fn airUnreachable(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - try func.addTag(.@"unreachable"); - return func.finishAir(inst, .none, &.{}); +fn airUnreachable(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + try cg.addTag(.@"unreachable"); + return cg.finishAir(inst, .none, &.{}); } -fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const wanted_ty = func.typeOfIndex(inst); - const given_ty = func.typeOf(ty_op.operand); +fn airBitcast(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand = try cg.resolveInst(ty_op.operand); + const wanted_ty = cg.typeOfIndex(inst); + const given_ty = cg.typeOf(ty_op.operand); const bit_size = given_ty.bitSize(zcu); const needs_wrapping = (given_ty.isSignedInt(zcu) != wanted_ty.isSignedInt(zcu)) and @@ -3843,39 +3727,38 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = result: { if (given_ty.isAnyFloat() or wanted_ty.isAnyFloat()) { - break :result try func.bitcast(wanted_ty, given_ty, operand); + break :result try cg.bitcast(wanted_ty, given_ty, operand); } - if (isByRef(given_ty, pt, func.target.*) and !isByRef(wanted_ty, pt, func.target.*)) { - const loaded_memory = try func.load(operand, wanted_ty, 0); + if (isByRef(given_ty, zcu, cg.target) and !isByRef(wanted_ty, zcu, cg.target)) { + const loaded_memory = try cg.load(operand, wanted_ty, 0); if (needs_wrapping) { - break :result try func.wrapOperand(loaded_memory, wanted_ty); + break :result try cg.wrapOperand(loaded_memory, wanted_ty); } else { break :result loaded_memory; } } - if (!isByRef(given_ty, pt, func.target.*) and isByRef(wanted_ty, pt, func.target.*)) { - const stack_memory = try func.allocStack(wanted_ty); - try func.store(stack_memory, operand, given_ty, 0); + if (!isByRef(given_ty, zcu, cg.target) and isByRef(wanted_ty, zcu, cg.target)) { + const stack_memory = try cg.allocStack(wanted_ty); + try cg.store(stack_memory, operand, given_ty, 0); if (needs_wrapping) { - break :result try func.wrapOperand(stack_memory, wanted_ty); + break :result try cg.wrapOperand(stack_memory, wanted_ty); } else { break :result stack_memory; } } if (needs_wrapping) { - break :result try func.wrapOperand(operand, wanted_ty); + break :result try cg.wrapOperand(operand, wanted_ty); } - break :result func.reuseOperand(ty_op.operand, operand); + break :result cg.reuseOperand(ty_op.operand, operand); }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; +fn bitcast(cg: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue { + const zcu = cg.pt.zcu; // if we bitcast a float to or from an integer we must use the 'reinterpret' instruction if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand; if (wanted_ty.ip_index == .f16_type or given_ty.ip_index == .f16_type) return operand; @@ -3884,41 +3767,39 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn const opcode = buildOpcode(.{ .op = .reinterpret, - .valtype1 = typeToValtype(wanted_ty, pt, func.target.*), - .valtype2 = typeToValtype(given_ty, pt, func.target.*), + .valtype1 = typeToValtype(wanted_ty, zcu, cg.target), + .valtype2 = typeToValtype(given_ty, zcu, cg.target), }); - try func.emitWValue(operand); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + try cg.emitWValue(operand); + try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } -fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = func.air.extraData(Air.StructField, ty_pl.payload); +fn airStructFieldPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = cg.air.extraData(Air.StructField, ty_pl.payload); - const struct_ptr = try func.resolveInst(extra.data.struct_operand); - const struct_ptr_ty = func.typeOf(extra.data.struct_operand); + const struct_ptr = try cg.resolveInst(extra.data.struct_operand); + const struct_ptr_ty = cg.typeOf(extra.data.struct_operand); const struct_ty = struct_ptr_ty.childType(zcu); - const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ptr_ty, struct_ty, extra.data.field_index); - return func.finishAir(inst, result, &.{extra.data.struct_operand}); + const result = try cg.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ptr_ty, struct_ty, extra.data.field_index); + return cg.finishAir(inst, result, &.{extra.data.struct_operand}); } -fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const struct_ptr = try func.resolveInst(ty_op.operand); - const struct_ptr_ty = func.typeOf(ty_op.operand); +fn airStructFieldPtrIndex(cg: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const struct_ptr = try cg.resolveInst(ty_op.operand); + const struct_ptr_ty = cg.typeOf(ty_op.operand); const struct_ty = struct_ptr_ty.childType(zcu); - const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ptr_ty, struct_ty, index); - return func.finishAir(inst, result, &.{ty_op.operand}); + const result = try cg.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ptr_ty, struct_ty, index); + return cg.finishAir(inst, result, &.{ty_op.operand}); } fn structFieldPtr( - func: *CodeGen, + cg: *CodeGen, inst: Air.Inst.Index, ref: Air.Inst.Ref, struct_ptr: WValue, @@ -3926,9 +3807,9 @@ fn structFieldPtr( struct_ty: Type, index: u32, ) InnerError!WValue { - const pt = func.pt; + const pt = cg.pt; const zcu = pt.zcu; - const result_ty = func.typeOfIndex(inst); + const result_ty = cg.typeOfIndex(inst); const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(zcu); const offset = switch (struct_ty.containerLayout(zcu)) { @@ -3947,28 +3828,28 @@ fn structFieldPtr( }; // save a load and store when we can simply reuse the operand if (offset == 0) { - return func.reuseOperand(ref, struct_ptr); + return cg.reuseOperand(ref, struct_ptr); } switch (struct_ptr) { .stack_offset => |stack_offset| { return .{ .stack_offset = .{ .value = stack_offset.value + @as(u32, @intCast(offset)), .references = 1 } }; }, - else => return func.buildPointerOffset(struct_ptr, offset, .new), + else => return cg.buildPointerOffset(struct_ptr, offset, .new), } } -fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data; + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const struct_field = cg.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = func.typeOf(struct_field.struct_operand); - const operand = try func.resolveInst(struct_field.struct_operand); + const struct_ty = cg.typeOf(struct_field.struct_operand); + const operand = try cg.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.fieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return cg.finishAir(inst, .none, &.{struct_field.struct_operand}); const result: WValue = switch (struct_ty.containerLayout(zcu)) { .@"packed" => switch (struct_ty.zigTypeTag(zcu)) { @@ -3977,42 +3858,42 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const offset = pt.structPackedFieldBitOffset(packed_struct, field_index); const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)); const wasm_bits = toWasmBits(backing_ty.intInfo(zcu).bits) orelse { - return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{}); + return cg.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{}); }; const const_wvalue: WValue = if (wasm_bits == 32) .{ .imm32 = offset } else if (wasm_bits == 64) .{ .imm64 = offset } else - return func.fail("TODO: airStructFieldVal for packed structs larger than 64 bits", .{}); + return cg.fail("TODO: airStructFieldVal for packed structs larger than 64 bits", .{}); // for first field we don't require any shifting const shifted_value = if (offset == 0) operand else - try func.binOp(operand, const_wvalue, backing_ty, .shr); + try cg.binOp(operand, const_wvalue, backing_ty, .shr); if (field_ty.zigTypeTag(zcu) == .float) { const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu)))); - const truncated = try func.trunc(shifted_value, int_type, backing_ty); - break :result try func.bitcast(field_ty, int_type, truncated); + const truncated = try cg.trunc(shifted_value, int_type, backing_ty); + break :result try cg.bitcast(field_ty, int_type, truncated); } else if (field_ty.isPtrAtRuntime(zcu) and packed_struct.field_types.len == 1) { // In this case we do not have to perform any transformations, // we can simply reuse the operand. - break :result func.reuseOperand(struct_field.struct_operand, operand); + break :result cg.reuseOperand(struct_field.struct_operand, operand); } else if (field_ty.isPtrAtRuntime(zcu)) { const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu)))); - break :result try func.trunc(shifted_value, int_type, backing_ty); + break :result try cg.trunc(shifted_value, int_type, backing_ty); } - break :result try func.trunc(shifted_value, field_ty, backing_ty); + break :result try cg.trunc(shifted_value, field_ty, backing_ty); }, .@"union" => result: { - if (isByRef(struct_ty, pt, func.target.*)) { - if (!isByRef(field_ty, pt, func.target.*)) { - break :result try func.load(operand, field_ty, 0); + if (isByRef(struct_ty, zcu, cg.target)) { + if (!isByRef(field_ty, zcu, cg.target)) { + break :result try cg.load(operand, field_ty, 0); } else { - const new_stack_val = try func.allocStack(field_ty); - try func.store(new_stack_val, operand, field_ty, 0); + const new_stack_val = try cg.allocStack(field_ty); + try cg.store(new_stack_val, operand, field_ty, 0); break :result new_stack_val; } } @@ -4020,45 +3901,45 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(zcu)))); if (field_ty.zigTypeTag(zcu) == .float) { const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu)))); - const truncated = try func.trunc(operand, int_type, union_int_type); - break :result try func.bitcast(field_ty, int_type, truncated); + const truncated = try cg.trunc(operand, int_type, union_int_type); + break :result try cg.bitcast(field_ty, int_type, truncated); } else if (field_ty.isPtrAtRuntime(zcu)) { const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu)))); - break :result try func.trunc(operand, int_type, union_int_type); + break :result try cg.trunc(operand, int_type, union_int_type); } - break :result try func.trunc(operand, field_ty, union_int_type); + break :result try cg.trunc(operand, field_ty, union_int_type); }, else => unreachable, }, else => result: { const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, zcu)) orelse { - return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)}); + return cg.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)}); }; - if (isByRef(field_ty, pt, func.target.*)) { + if (isByRef(field_ty, zcu, cg.target)) { switch (operand) { .stack_offset => |stack_offset| { break :result .{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } }; }, - else => break :result try func.buildPointerOffset(operand, offset, .new), + else => break :result try cg.buildPointerOffset(operand, offset, .new), } } - break :result try func.load(operand, field_ty, offset); + break :result try cg.load(operand, field_ty, offset); }, }; - return func.finishAir(inst, result, &.{struct_field.struct_operand}); + return cg.finishAir(inst, result, &.{struct_field.struct_operand}); } -fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airSwitchBr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; // result type is always 'noreturn' - const blocktype = wasm.block_empty; - const switch_br = func.air.unwrapSwitch(inst); - const target = try func.resolveInst(switch_br.operand); - const target_ty = func.typeOf(switch_br.operand); - const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.cases_len + 1); - defer func.gpa.free(liveness.deaths); + const blocktype: std.wasm.BlockType = .empty; + const switch_br = cg.air.unwrapSwitch(inst); + const target = try cg.resolveInst(switch_br.operand); + const target_ty = cg.typeOf(switch_br.operand); + const liveness = try cg.liveness.getSwitchBr(cg.gpa, inst, switch_br.cases_len + 1); + defer cg.gpa.free(liveness.deaths); // a list that maps each value with its value and body based on the order inside the list. const CaseValue = union(enum) { @@ -4068,21 +3949,21 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var case_list = try std.ArrayList(struct { values: []const CaseValue, body: []const Air.Inst.Index, - }).initCapacity(func.gpa, switch_br.cases_len); + }).initCapacity(cg.gpa, switch_br.cases_len); defer for (case_list.items) |case| { - func.gpa.free(case.values); + cg.gpa.free(case.values); } else case_list.deinit(); var lowest_maybe: ?i32 = null; var highest_maybe: ?i32 = null; var it = switch_br.iterateCases(); while (it.next()) |case| { - const values = try func.gpa.alloc(CaseValue, case.items.len + case.ranges.len); - errdefer func.gpa.free(values); + const values = try cg.gpa.alloc(CaseValue, case.items.len + case.ranges.len); + errdefer cg.gpa.free(values); for (case.items, 0..) |ref, i| { - const item_val = (try func.air.value(ref, pt)).?; - const int_val = func.valueAsI32(item_val); + const item_val = (try cg.air.value(ref, pt)).?; + const int_val = cg.valueAsI32(item_val); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; } @@ -4093,15 +3974,15 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } for (case.ranges, 0..) |range, i| { - const min_val = (try func.air.value(range[0], pt)).?; - const int_min_val = func.valueAsI32(min_val); + const min_val = (try cg.air.value(range[0], pt)).?; + const int_min_val = cg.valueAsI32(min_val); if (lowest_maybe == null or int_min_val < lowest_maybe.?) { lowest_maybe = int_min_val; } - const max_val = (try func.air.value(range[1], pt)).?; - const int_max_val = func.valueAsI32(max_val); + const max_val = (try cg.air.value(range[1], pt)).?; + const int_max_val = cg.valueAsI32(max_val); if (highest_maybe == null or int_max_val > highest_maybe.?) { highest_maybe = int_max_val; @@ -4116,7 +3997,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } case_list.appendAssumeCapacity(.{ .values = values, .body = case.body }); - try func.startBlock(.block, blocktype); + try cg.startBlock(.block, blocktype); } // When highest and lowest are null, we have no cases and can use a jump table @@ -4132,7 +4013,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const else_body = it.elseBody(); const has_else_body = else_body.len != 0; if (has_else_body) { - try func.startBlock(.block, blocktype); + try cg.startBlock(.block, blocktype); } if (!is_sparse) { @@ -4140,25 +4021,25 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // The value 'target' represents the index into the table. // Each index in the table represents a label to the branch // to jump to. - try func.startBlock(.block, blocktype); - try func.emitWValue(target); + try cg.startBlock(.block, blocktype); + try cg.emitWValue(target); if (lowest < 0) { // since br_table works using indexes, starting from '0', we must ensure all values // we put inside, are atleast 0. - try func.addImm32(@bitCast(lowest * -1)); - try func.addTag(.i32_add); + try cg.addImm32(@bitCast(lowest * -1)); + try cg.addTag(.i32_add); } else if (lowest > 0) { // make the index start from 0 by substracting the lowest value - try func.addImm32(@bitCast(lowest)); - try func.addTag(.i32_sub); + try cg.addImm32(@bitCast(lowest)); + try cg.addTag(.i32_sub); } // Account for default branch so always add '1' const depth = @as(u32, @intCast(highest - lowest + @intFromBool(has_else_body))) + 1; const jump_table: Mir.JumpTable = .{ .length = depth }; - const table_extra_index = try func.addExtra(jump_table); - try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } }); - try func.mir_extra.ensureUnusedCapacity(func.gpa, depth); + const table_extra_index = try cg.addExtra(jump_table); + try cg.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } }); + try cg.mir_extra.ensureUnusedCapacity(cg.gpa, depth); var value = lowest; while (value <= highest) : (value += 1) { // idx represents the branch we jump to @@ -4179,78 +4060,77 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // by using a jump table for this instead of if-else chains. break :blk if (has_else_body or target_ty.zigTypeTag(zcu) == .error_set) switch_br.cases_len else unreachable; }; - func.mir_extra.appendAssumeCapacity(idx); + cg.mir_extra.appendAssumeCapacity(idx); } else if (has_else_body) { - func.mir_extra.appendAssumeCapacity(switch_br.cases_len); // default branch + cg.mir_extra.appendAssumeCapacity(switch_br.cases_len); // default branch } - try func.endBlock(); + try cg.endBlock(); } - try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @intFromBool(has_else_body)); + try cg.branches.ensureUnusedCapacity(cg.gpa, case_list.items.len + @intFromBool(has_else_body)); for (case_list.items, 0..) |case, index| { // when sparse, we use if/else-chain, so emit conditional checks if (is_sparse) { // for single value prong we can emit a simple condition if (case.values.len == 1 and case.values[0] == .singular) { - const val = try func.lowerConstant(case.values[0].singular.value, target_ty); + const val = try cg.lowerConstant(case.values[0].singular.value, target_ty); // not equal, because we want to jump out of this block if it does not match the condition. - _ = try func.cmp(target, val, target_ty, .neq); - try func.addLabel(.br_if, 0); + _ = try cg.cmp(target, val, target_ty, .neq); + try cg.addLabel(.br_if, 0); } else { // in multi-value prongs we must check if any prongs match the target value. - try func.startBlock(.block, blocktype); + try cg.startBlock(.block, blocktype); for (case.values) |value| { switch (value) { .singular => |single_val| { - const val = try func.lowerConstant(single_val.value, target_ty); - _ = try func.cmp(target, val, target_ty, .eq); + const val = try cg.lowerConstant(single_val.value, target_ty); + _ = try cg.cmp(target, val, target_ty, .eq); }, .range => |range| { - const min_val = try func.lowerConstant(range.min_value, target_ty); - const max_val = try func.lowerConstant(range.max_value, target_ty); + const min_val = try cg.lowerConstant(range.min_value, target_ty); + const max_val = try cg.lowerConstant(range.max_value, target_ty); - const gte = try func.cmp(target, min_val, target_ty, .gte); - const lte = try func.cmp(target, max_val, target_ty, .lte); - _ = try func.binOp(gte, lte, Type.bool, .@"and"); + const gte = try cg.cmp(target, min_val, target_ty, .gte); + const lte = try cg.cmp(target, max_val, target_ty, .lte); + _ = try cg.binOp(gte, lte, Type.bool, .@"and"); }, } - try func.addLabel(.br_if, 0); + try cg.addLabel(.br_if, 0); } // value did not match any of the prong values - try func.addLabel(.br, 1); - try func.endBlock(); + try cg.addLabel(.br, 1); + try cg.endBlock(); } } - func.branches.appendAssumeCapacity(.{}); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[index].len); + cg.branches.appendAssumeCapacity(.{}); + try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.deaths[index].len); defer { - var case_branch = func.branches.pop(); - case_branch.deinit(func.gpa); + var case_branch = cg.branches.pop(); + case_branch.deinit(cg.gpa); } - try func.genBody(case.body); - try func.endBlock(); + try cg.genBody(case.body); + try cg.endBlock(); } if (has_else_body) { - func.branches.appendAssumeCapacity(.{}); + cg.branches.appendAssumeCapacity(.{}); const else_deaths = liveness.deaths.len - 1; - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[else_deaths].len); + try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.deaths[else_deaths].len); defer { - var else_branch = func.branches.pop(); - else_branch.deinit(func.gpa); + var else_branch = cg.branches.pop(); + else_branch.deinit(cg.gpa); } - try func.genBody(else_body); - try func.endBlock(); + try cg.genBody(else_body); + try cg.endBlock(); } - return func.finishAir(inst, .none, &.{}); + return cg.finishAir(inst, .none, &.{}); } -fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try func.resolveInst(un_op); - const err_union_ty = func.typeOf(un_op); +fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode) InnerError!void { + const zcu = cg.pt.zcu; + const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try cg.resolveInst(un_op); + const err_union_ty = cg.typeOf(un_op); const pl_ty = err_union_ty.errorUnionPayload(zcu); const result: WValue = result: { @@ -4262,57 +4142,55 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } } - try func.emitWValue(operand); + try cg.emitWValue(operand); if (pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - try func.addMemArg(.i32_load16_u, .{ + try cg.addMemArg(.i32_load16_u, .{ .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, zcu))), .alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?), }); } // Compare the error value with '0' - try func.addImm32(0); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + try cg.addImm32(0); + try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode)); break :result .stack; }; - return func.finishAir(inst, result, &.{un_op}); + return cg.finishAir(inst, result, &.{un_op}); } -fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airUnwrapErrUnionPayload(cg: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.typeOf(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); + const op_ty = cg.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty; const payload_ty = err_ty.errorUnionPayload(zcu); const result: WValue = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { if (op_is_ptr) { - break :result func.reuseOperand(ty_op.operand, operand); + break :result cg.reuseOperand(ty_op.operand, operand); } break :result .none; } const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu))); - if (op_is_ptr or isByRef(payload_ty, pt, func.target.*)) { - break :result try func.buildPointerOffset(operand, pl_offset, .new); + if (op_is_ptr or isByRef(payload_ty, zcu, cg.target)) { + break :result try cg.buildPointerOffset(operand, pl_offset, .new); } - break :result try func.load(operand, payload_ty, pl_offset); + break :result try cg.load(operand, payload_ty, pl_offset); }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airUnwrapErrUnionError(cg: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.typeOf(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); + const op_ty = cg.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty; const payload_ty = err_ty.errorUnionPayload(zcu); @@ -4322,104 +4200,101 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) } if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - break :result func.reuseOperand(ty_op.operand, operand); + break :result cg.reuseOperand(ty_op.operand, operand); } - break :result try func.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, zcu))); + break :result try cg.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, zcu))); }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const zcu = func.pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airWrapErrUnionPayload(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const err_ty = func.typeOfIndex(inst); + const operand = try cg.resolveInst(ty_op.operand); + const err_ty = cg.typeOfIndex(inst); - const pl_ty = func.typeOf(ty_op.operand); + const pl_ty = cg.typeOf(ty_op.operand); const result = result: { if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - break :result func.reuseOperand(ty_op.operand, operand); + break :result cg.reuseOperand(ty_op.operand, operand); } - const err_union = try func.allocStack(err_ty); - const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new); - try func.store(payload_ptr, operand, pl_ty, 0); + const err_union = try cg.allocStack(err_ty); + const payload_ptr = try cg.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new); + try cg.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. - try func.emitWValue(err_union); - try func.addImm32(0); + try cg.emitWValue(err_union); + try cg.addImm32(0); const err_val_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu)); - try func.addMemArg(.i32_store16, .{ + try cg.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2, }); break :result err_union; }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airWrapErrUnionErr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); const err_ty = ty_op.ty.toType(); const pl_ty = err_ty.errorUnionPayload(zcu); const result = result: { if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - break :result func.reuseOperand(ty_op.operand, operand); + break :result cg.reuseOperand(ty_op.operand, operand); } - const err_union = try func.allocStack(err_ty); + const err_union = try cg.allocStack(err_ty); // store error value - try func.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, zcu))); + try cg.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, zcu))); // write 'undefined' to the payload - const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new); + const payload_ptr = try cg.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new); const len = @as(u32, @intCast(err_ty.errorUnionPayload(zcu).abiSize(zcu))); - try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); + try cg.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airIntcast(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty = ty_op.ty.toType(); - const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.typeOf(ty_op.operand); - const pt = func.pt; - const zcu = pt.zcu; + const operand = try cg.resolveInst(ty_op.operand); + const operand_ty = cg.typeOf(ty_op.operand); + const zcu = cg.pt.zcu; if (ty.zigTypeTag(zcu) == .vector or operand_ty.zigTypeTag(zcu) == .vector) { - return func.fail("todo Wasm intcast for vectors", .{}); + return cg.fail("todo Wasm intcast for vectors", .{}); } if (ty.abiSize(zcu) > 16 or operand_ty.abiSize(zcu) > 16) { - return func.fail("todo Wasm intcast for bitsize > 128", .{}); + return cg.fail("todo Wasm intcast for bitsize > 128", .{}); } const op_bits = toWasmBits(@intCast(operand_ty.bitSize(zcu))).?; const wanted_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?; const result = if (op_bits == wanted_bits) - func.reuseOperand(ty_op.operand, operand) + cg.reuseOperand(ty_op.operand, operand) else - try func.intcast(operand, operand_ty, ty); + try cg.intcast(operand, operand_ty, ty); - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } /// Upcasts or downcasts an integer based on the given and wanted types, /// and stores the result in a new operand. /// Asserts type's bitsize <= 128 /// NOTE: May leave the result on the top of the stack. -fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; +fn intcast(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { + const zcu = cg.pt.zcu; const given_bitsize = @as(u16, @intCast(given.bitSize(zcu))); const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(zcu))); assert(given_bitsize <= 128); @@ -4432,470 +4307,456 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } if (op_bits == 64 and wanted_bits == 32) { - try func.emitWValue(operand); - try func.addTag(.i32_wrap_i64); + try cg.emitWValue(operand); + try cg.addTag(.i32_wrap_i64); return .stack; } else if (op_bits == 32 and wanted_bits == 64) { - try func.emitWValue(operand); - try func.addTag(if (wanted.isSignedInt(zcu)) .i64_extend_i32_s else .i64_extend_i32_u); + try cg.emitWValue(operand); + try cg.addTag(if (wanted.isSignedInt(zcu)) .i64_extend_i32_s else .i64_extend_i32_u); return .stack; } else if (wanted_bits == 128) { // for 128bit integers we store the integer in the virtual stack, rather than a local - const stack_ptr = try func.allocStack(wanted); - try func.emitWValue(stack_ptr); + const stack_ptr = try cg.allocStack(wanted); + try cg.emitWValue(stack_ptr); // for 32 bit integers, we first coerce the value into a 64 bit integer before storing it // meaning less store operations are required. const lhs = if (op_bits == 32) blk: { const sign_ty = if (wanted.isSignedInt(zcu)) Type.i64 else Type.u64; - break :blk try (try func.intcast(operand, given, sign_ty)).toLocal(func, sign_ty); + break :blk try (try cg.intcast(operand, given, sign_ty)).toLocal(cg, sign_ty); } else operand; // store lsb first - try func.store(.stack, lhs, Type.u64, 0 + stack_ptr.offset()); + try cg.store(.stack, lhs, Type.u64, 0 + stack_ptr.offset()); // For signed integers we shift lsb by 63 (64bit integer - 1 sign bit) and store remaining value if (wanted.isSignedInt(zcu)) { - try func.emitWValue(stack_ptr); - const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr); - try func.store(.stack, shr, Type.u64, 8 + stack_ptr.offset()); + try cg.emitWValue(stack_ptr); + const shr = try cg.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr); + try cg.store(.stack, shr, Type.u64, 8 + stack_ptr.offset()); } else { // Ensure memory of msb is zero'd - try func.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8); + try cg.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8); } return stack_ptr; - } else return func.load(operand, wanted, 0); + } else return cg.load(operand, wanted, 0); } -fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try func.resolveInst(un_op); +fn airIsNull(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void { + const zcu = cg.pt.zcu; + const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try cg.resolveInst(un_op); - const op_ty = func.typeOf(un_op); + const op_ty = cg.typeOf(un_op); const optional_ty = if (op_kind == .ptr) op_ty.childType(zcu) else op_ty; - const result = try func.isNull(operand, optional_ty, opcode); - return func.finishAir(inst, result, &.{un_op}); + const result = try cg.isNull(operand, optional_ty, opcode); + return cg.finishAir(inst, result, &.{un_op}); } /// For a given type and operand, checks if it's considered `null`. /// NOTE: Leaves the result on the stack -fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { - const pt = func.pt; +fn isNull(cg: *CodeGen, operand: WValue, optional_ty: Type, opcode: std.wasm.Opcode) InnerError!WValue { + const pt = cg.pt; const zcu = pt.zcu; - try func.emitWValue(operand); + try cg.emitWValue(operand); const payload_ty = optional_ty.optionalChild(zcu); if (!optional_ty.optionalReprIsPayload(zcu)) { // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse { - return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)}); + return cg.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)}); }; - try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 }); + try cg.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 }); } } else if (payload_ty.isSlice(zcu)) { - switch (func.arch()) { - .wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }), - .wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }), - else => unreachable, + switch (cg.ptr_size) { + .wasm32 => try cg.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }), + .wasm64 => try cg.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }), } } // Compare the null value with '0' - try func.addImm32(0); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + try cg.addImm32(0); + try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } -fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const opt_ty = func.typeOf(ty_op.operand); - const payload_ty = func.typeOfIndex(inst); +fn airOptionalPayload(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const opt_ty = cg.typeOf(ty_op.operand); + const payload_ty = cg.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return func.finishAir(inst, .none, &.{ty_op.operand}); + return cg.finishAir(inst, .none, &.{ty_op.operand}); } const result = result: { - const operand = try func.resolveInst(ty_op.operand); - if (opt_ty.optionalReprIsPayload(zcu)) break :result func.reuseOperand(ty_op.operand, operand); + const operand = try cg.resolveInst(ty_op.operand); + if (opt_ty.optionalReprIsPayload(zcu)) break :result cg.reuseOperand(ty_op.operand, operand); - if (isByRef(payload_ty, pt, func.target.*)) { - break :result try func.buildPointerOffset(operand, 0, .new); + if (isByRef(payload_ty, zcu, cg.target)) { + break :result try cg.buildPointerOffset(operand, 0, .new); } - break :result try func.load(operand, payload_ty, 0); + break :result try cg.load(operand, payload_ty, 0); }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.typeOf(ty_op.operand).childType(zcu); +fn airOptionalPayloadPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand = try cg.resolveInst(ty_op.operand); + const opt_ty = cg.typeOf(ty_op.operand).childType(zcu); const result = result: { const payload_ty = opt_ty.optionalChild(zcu); if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu) or opt_ty.optionalReprIsPayload(zcu)) { - break :result func.reuseOperand(ty_op.operand, operand); + break :result cg.reuseOperand(ty_op.operand, operand); } - break :result try func.buildPointerOffset(operand, 0, .new); + break :result try cg.buildPointerOffset(operand, 0, .new); }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airOptionalPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.typeOf(ty_op.operand).childType(zcu); + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand = try cg.resolveInst(ty_op.operand); + const opt_ty = cg.typeOf(ty_op.operand).childType(zcu); const payload_ty = opt_ty.optionalChild(zcu); if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); + return cg.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } if (opt_ty.optionalReprIsPayload(zcu)) { - return func.finishAir(inst, operand, &.{ty_op.operand}); + return cg.finishAir(inst, operand, &.{ty_op.operand}); } const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse { - return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)}); + return cg.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)}); }; - try func.emitWValue(operand); - try func.addImm32(1); - try func.addMemArg(.i32_store8, .{ .offset = operand.offset() + offset, .alignment = 1 }); + try cg.emitWValue(operand); + try cg.addImm32(1); + try cg.addMemArg(.i32_store8, .{ .offset = operand.offset() + offset, .alignment = 1 }); - const result = try func.buildPointerOffset(operand, 0, .new); - return func.finishAir(inst, result, &.{ty_op.operand}); + const result = try cg.buildPointerOffset(operand, 0, .new); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const payload_ty = func.typeOf(ty_op.operand); - const pt = func.pt; +fn airWrapOptional(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const payload_ty = cg.typeOf(ty_op.operand); + const pt = cg.pt; const zcu = pt.zcu; const result = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - const non_null_bit = try func.allocStack(Type.u1); - try func.emitWValue(non_null_bit); - try func.addImm32(1); - try func.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 }); + const non_null_bit = try cg.allocStack(Type.u1); + try cg.emitWValue(non_null_bit); + try cg.addImm32(1); + try cg.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 }); break :result non_null_bit; } - const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.typeOfIndex(inst); + const operand = try cg.resolveInst(ty_op.operand); + const op_ty = cg.typeOfIndex(inst); if (op_ty.optionalReprIsPayload(zcu)) { - break :result func.reuseOperand(ty_op.operand, operand); + break :result cg.reuseOperand(ty_op.operand, operand); } const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse { - return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)}); + return cg.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)}); }; // Create optional type, set the non-null bit, and store the operand inside the optional type - const result_ptr = try func.allocStack(op_ty); - try func.emitWValue(result_ptr); - try func.addImm32(1); - try func.addMemArg(.i32_store8, .{ .offset = result_ptr.offset() + offset, .alignment = 1 }); + const result_ptr = try cg.allocStack(op_ty); + try cg.emitWValue(result_ptr); + try cg.addImm32(1); + try cg.addMemArg(.i32_store8, .{ .offset = result_ptr.offset() + offset, .alignment = 1 }); - const payload_ptr = try func.buildPointerOffset(result_ptr, 0, .new); - try func.store(payload_ptr, operand, payload_ty, 0); + const payload_ptr = try cg.buildPointerOffset(result_ptr, 0, .new); + try cg.store(payload_ptr, operand, payload_ty, 0); break :result result_ptr; }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; +fn airSlice(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const slice_ty = func.typeOfIndex(inst); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); + const slice_ty = cg.typeOfIndex(inst); - const slice = try func.allocStack(slice_ty); - try func.store(slice, lhs, Type.usize, 0); - try func.store(slice, rhs, Type.usize, func.ptrSize()); + const slice = try cg.allocStack(slice_ty); + try cg.store(slice, lhs, Type.usize, 0); + try cg.store(slice, rhs, Type.usize, cg.ptrSize()); - return func.finishAir(inst, slice, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, slice, &.{ bin_op.lhs, bin_op.rhs }); } -fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airSliceLen(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - return func.finishAir(inst, try func.sliceLen(operand), &.{ty_op.operand}); + const operand = try cg.resolveInst(ty_op.operand); + return cg.finishAir(inst, try cg.sliceLen(operand), &.{ty_op.operand}); } -fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airSliceElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const slice_ty = func.typeOf(bin_op.lhs); - const slice = try func.resolveInst(bin_op.lhs); - const index = try func.resolveInst(bin_op.rhs); + const slice_ty = cg.typeOf(bin_op.lhs); + const slice = try cg.resolveInst(bin_op.lhs); + const index = try cg.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(zcu); const elem_size = elem_ty.abiSize(zcu); // load pointer onto stack - _ = try func.load(slice, Type.usize, 0); + _ = try cg.load(slice, Type.usize, 0); // calculate index into slice - try func.emitWValue(index); - try func.addImm32(@intCast(elem_size)); - try func.addTag(.i32_mul); - try func.addTag(.i32_add); + try cg.emitWValue(index); + try cg.addImm32(@intCast(elem_size)); + try cg.addTag(.i32_mul); + try cg.addTag(.i32_add); - const elem_result = if (isByRef(elem_ty, pt, func.target.*)) + const elem_result = if (isByRef(elem_ty, zcu, cg.target)) .stack else - try func.load(.stack, elem_ty, 0); + try cg.load(.stack, elem_ty, 0); - return func.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs }); } -fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; +fn airSliceElemPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data; const elem_ty = ty_pl.ty.toType().childType(zcu); const elem_size = elem_ty.abiSize(zcu); - const slice = try func.resolveInst(bin_op.lhs); - const index = try func.resolveInst(bin_op.rhs); + const slice = try cg.resolveInst(bin_op.lhs); + const index = try cg.resolveInst(bin_op.rhs); - _ = try func.load(slice, Type.usize, 0); + _ = try cg.load(slice, Type.usize, 0); // calculate index into slice - try func.emitWValue(index); - try func.addImm32(@intCast(elem_size)); - try func.addTag(.i32_mul); - try func.addTag(.i32_add); + try cg.emitWValue(index); + try cg.addImm32(@intCast(elem_size)); + try cg.addTag(.i32_mul); + try cg.addTag(.i32_add); - return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); } -fn airSlicePtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - return func.finishAir(inst, try func.slicePtr(operand), &.{ty_op.operand}); +fn airSlicePtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand = try cg.resolveInst(ty_op.operand); + return cg.finishAir(inst, try cg.slicePtr(operand), &.{ty_op.operand}); } -fn slicePtr(func: *CodeGen, operand: WValue) InnerError!WValue { - const ptr = try func.load(operand, Type.usize, 0); - return ptr.toLocal(func, Type.usize); +fn slicePtr(cg: *CodeGen, operand: WValue) InnerError!WValue { + const ptr = try cg.load(operand, Type.usize, 0); + return ptr.toLocal(cg, Type.usize); } -fn sliceLen(func: *CodeGen, operand: WValue) InnerError!WValue { - const len = try func.load(operand, Type.usize, func.ptrSize()); - return len.toLocal(func, Type.usize); +fn sliceLen(cg: *CodeGen, operand: WValue) InnerError!WValue { + const len = try cg.load(operand, Type.usize, cg.ptrSize()); + return len.toLocal(cg, Type.usize); } -fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airTrunc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); const wanted_ty: Type = ty_op.ty.toType(); - const op_ty = func.typeOf(ty_op.operand); - const pt = func.pt; - const zcu = pt.zcu; + const op_ty = cg.typeOf(ty_op.operand); + const zcu = cg.pt.zcu; if (wanted_ty.zigTypeTag(zcu) == .vector or op_ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: trunc for vectors", .{}); + return cg.fail("TODO: trunc for vectors", .{}); } const result = if (op_ty.bitSize(zcu) == wanted_ty.bitSize(zcu)) - func.reuseOperand(ty_op.operand, operand) + cg.reuseOperand(ty_op.operand, operand) else - try func.trunc(operand, wanted_ty, op_ty); + try cg.trunc(operand, wanted_ty, op_ty); - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } /// Truncates a given operand to a given type, discarding any overflown bits. /// NOTE: Resulting value is left on the stack. -fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; +fn trunc(cg: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue { + const zcu = cg.pt.zcu; const given_bits = @as(u16, @intCast(given_ty.bitSize(zcu))); if (toWasmBits(given_bits) == null) { - return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits}); + return cg.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits}); } - var result = try func.intcast(operand, given_ty, wanted_ty); + var result = try cg.intcast(operand, given_ty, wanted_ty); const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(zcu))); const wasm_bits = toWasmBits(wanted_bits).?; if (wasm_bits != wanted_bits) { - result = try func.wrapOperand(result, wanted_ty); + result = try cg.wrapOperand(result, wanted_ty); } return result; } -fn airIntFromBool(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try func.resolveInst(un_op); - const result = func.reuseOperand(un_op, operand); +fn airIntFromBool(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try cg.resolveInst(un_op); + const result = cg.reuseOperand(un_op, operand); - return func.finishAir(inst, result, &.{un_op}); + return cg.finishAir(inst, result, &.{un_op}); } -fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airArrayToSlice(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const array_ty = func.typeOf(ty_op.operand).childType(zcu); + const operand = try cg.resolveInst(ty_op.operand); + const array_ty = cg.typeOf(ty_op.operand).childType(zcu); const slice_ty = ty_op.ty.toType(); // create a slice on the stack - const slice_local = try func.allocStack(slice_ty); + const slice_local = try cg.allocStack(slice_ty); // store the array ptr in the slice if (array_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - try func.store(slice_local, operand, Type.usize, 0); + try cg.store(slice_local, operand, Type.usize, 0); } // store the length of the array in the slice const array_len: u32 = @intCast(array_ty.arrayLen(zcu)); - try func.store(slice_local, .{ .imm32 = array_len }, Type.usize, func.ptrSize()); + try cg.store(slice_local, .{ .imm32 = array_len }, Type.usize, cg.ptrSize()); - return func.finishAir(inst, slice_local, &.{ty_op.operand}); + return cg.finishAir(inst, slice_local, &.{ty_op.operand}); } -fn airIntFromPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try func.resolveInst(un_op); - const ptr_ty = func.typeOf(un_op); +fn airIntFromPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try cg.resolveInst(un_op); + const ptr_ty = cg.typeOf(un_op); const result = if (ptr_ty.isSlice(zcu)) - try func.slicePtr(operand) + try cg.slicePtr(operand) else switch (operand) { // for stack offset, return a pointer to this offset. - .stack_offset => try func.buildPointerOffset(operand, 0, .new), - else => func.reuseOperand(un_op, operand), + .stack_offset => try cg.buildPointerOffset(operand, 0, .new), + else => cg.reuseOperand(un_op, operand), }; - return func.finishAir(inst, result, &.{un_op}); + return cg.finishAir(inst, result, &.{un_op}); } -fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airPtrElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const ptr_ty = func.typeOf(bin_op.lhs); - const ptr = try func.resolveInst(bin_op.lhs); - const index = try func.resolveInst(bin_op.rhs); + const ptr_ty = cg.typeOf(bin_op.lhs); + const ptr = try cg.resolveInst(bin_op.lhs); + const index = try cg.resolveInst(bin_op.rhs); const elem_ty = ptr_ty.childType(zcu); const elem_size = elem_ty.abiSize(zcu); // load pointer onto the stack if (ptr_ty.isSlice(zcu)) { - _ = try func.load(ptr, Type.usize, 0); + _ = try cg.load(ptr, Type.usize, 0); } else { - try func.lowerToStack(ptr); + try cg.lowerToStack(ptr); } // calculate index into slice - try func.emitWValue(index); - try func.addImm32(@intCast(elem_size)); - try func.addTag(.i32_mul); - try func.addTag(.i32_add); + try cg.emitWValue(index); + try cg.addImm32(@intCast(elem_size)); + try cg.addTag(.i32_mul); + try cg.addTag(.i32_add); - const elem_result = if (isByRef(elem_ty, pt, func.target.*)) + const elem_result = if (isByRef(elem_ty, zcu, cg.target)) .stack else - try func.load(.stack, elem_ty, 0); + try cg.load(.stack, elem_ty, 0); - return func.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs }); } -fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; +fn airPtrElemPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = func.typeOf(bin_op.lhs); + const ptr_ty = cg.typeOf(bin_op.lhs); const elem_ty = ty_pl.ty.toType().childType(zcu); const elem_size = elem_ty.abiSize(zcu); - const ptr = try func.resolveInst(bin_op.lhs); - const index = try func.resolveInst(bin_op.rhs); + const ptr = try cg.resolveInst(bin_op.lhs); + const index = try cg.resolveInst(bin_op.rhs); // load pointer onto the stack if (ptr_ty.isSlice(zcu)) { - _ = try func.load(ptr, Type.usize, 0); + _ = try cg.load(ptr, Type.usize, 0); } else { - try func.lowerToStack(ptr); + try cg.lowerToStack(ptr); } // calculate index into ptr - try func.emitWValue(index); - try func.addImm32(@intCast(elem_size)); - try func.addTag(.i32_mul); - try func.addTag(.i32_add); + try cg.emitWValue(index); + try cg.addImm32(@intCast(elem_size)); + try cg.addTag(.i32_mul); + try cg.addTag(.i32_add); - return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); } -fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; +fn airPtrBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const zcu = cg.pt.zcu; + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr = try func.resolveInst(bin_op.lhs); - const offset = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.typeOf(bin_op.lhs); + const ptr = try cg.resolveInst(bin_op.lhs); + const offset = try cg.resolveInst(bin_op.rhs); + const ptr_ty = cg.typeOf(bin_op.lhs); const pointee_ty = switch (ptr_ty.ptrSize(zcu)) { .One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type else => ptr_ty.childType(zcu), }; - const valtype = typeToValtype(Type.usize, pt, func.target.*); + const valtype = typeToValtype(Type.usize, zcu, cg.target); const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul }); const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op }); - try func.lowerToStack(ptr); - try func.emitWValue(offset); - try func.addImm32(@intCast(pointee_ty.abiSize(zcu))); - try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode)); - try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode)); + try cg.lowerToStack(ptr); + try cg.emitWValue(offset); + try cg.addImm32(@intCast(pointee_ty.abiSize(zcu))); + try cg.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode)); + try cg.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode)); - return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); } -fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; +fn airMemset(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const zcu = cg.pt.zcu; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { // TODO if the value is undef, don't lower this instruction } - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const ptr = try func.resolveInst(bin_op.lhs); - const ptr_ty = func.typeOf(bin_op.lhs); - const value = try func.resolveInst(bin_op.rhs); + const ptr = try cg.resolveInst(bin_op.lhs); + const ptr_ty = cg.typeOf(bin_op.lhs); + const value = try cg.resolveInst(bin_op.rhs); const len = switch (ptr_ty.ptrSize(zcu)) { - .Slice => try func.sliceLen(ptr), + .Slice => try cg.sliceLen(ptr), .One => @as(WValue, .{ .imm32 = @as(u32, @intCast(ptr_ty.childType(zcu).arrayLen(zcu))) }), .C, .Many => unreachable, }; @@ -4905,27 +4766,27 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void else ptr_ty.childType(zcu); - const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty); - try func.memset(elem_ty, dst_ptr, len, value); + const dst_ptr = try cg.sliceOrArrayPtr(ptr, ptr_ty); + try cg.memset(elem_ty, dst_ptr, len, value); - return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); } /// Sets a region of memory at `ptr` to the value of `value` /// When the user has enabled the bulk_memory feature, we lower /// this to wasm's memset instruction. When the feature is not present, /// we implement it manually. -fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void { - const pt = func.pt; - const abi_size = @as(u32, @intCast(elem_ty.abiSize(pt.zcu))); +fn memset(cg: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void { + const zcu = cg.pt.zcu; + const abi_size = @as(u32, @intCast(elem_ty.abiSize(zcu))); // When bulk_memory is enabled, we lower it to wasm's memset instruction. // If not, we lower it ourselves. - if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory) and abi_size == 1) { - try func.lowerToStack(ptr); - try func.emitWValue(value); - try func.emitWValue(len); - try func.addExtended(.memory_fill); + if (std.Target.wasm.featureSetHas(cg.target.cpu.features, .bulk_memory) and abi_size == 1) { + try cg.lowerToStack(ptr); + try cg.emitWValue(value); + try cg.emitWValue(len); + try cg.addExtended(.memory_fill); return; } @@ -4933,100 +4794,95 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue .imm32 => |val| .{ .imm32 = val * abi_size }, .imm64 => |val| .{ .imm64 = val * abi_size }, else => if (abi_size != 1) blk: { - const new_len = try func.ensureAllocLocal(Type.usize); - try func.emitWValue(len); - switch (func.arch()) { + const new_len = try cg.ensureAllocLocal(Type.usize); + try cg.emitWValue(len); + switch (cg.ptr_size) { .wasm32 => { - try func.emitWValue(.{ .imm32 = abi_size }); - try func.addTag(.i32_mul); + try cg.emitWValue(.{ .imm32 = abi_size }); + try cg.addTag(.i32_mul); }, .wasm64 => { - try func.emitWValue(.{ .imm64 = abi_size }); - try func.addTag(.i64_mul); + try cg.emitWValue(.{ .imm64 = abi_size }); + try cg.addTag(.i64_mul); }, - else => unreachable, } - try func.addLabel(.local_set, new_len.local.value); + try cg.addLocal(.local_set, new_len.local.value); break :blk new_len; } else len, }; - var end_ptr = try func.allocLocal(Type.usize); - defer end_ptr.free(func); - var new_ptr = try func.buildPointerOffset(ptr, 0, .new); - defer new_ptr.free(func); + var end_ptr = try cg.allocLocal(Type.usize); + defer end_ptr.free(cg); + var new_ptr = try cg.buildPointerOffset(ptr, 0, .new); + defer new_ptr.free(cg); // get the loop conditional: if current pointer address equals final pointer's address - try func.lowerToStack(ptr); - try func.emitWValue(final_len); - switch (func.arch()) { - .wasm32 => try func.addTag(.i32_add), - .wasm64 => try func.addTag(.i64_add), - else => unreachable, + try cg.lowerToStack(ptr); + try cg.emitWValue(final_len); + switch (cg.ptr_size) { + .wasm32 => try cg.addTag(.i32_add), + .wasm64 => try cg.addTag(.i64_add), } - try func.addLabel(.local_set, end_ptr.local.value); + try cg.addLocal(.local_set, end_ptr.local.value); // outer block to jump to when loop is done - try func.startBlock(.block, wasm.block_empty); - try func.startBlock(.loop, wasm.block_empty); + try cg.startBlock(.block, .empty); + try cg.startBlock(.loop, .empty); // check for condition for loop end - try func.emitWValue(new_ptr); - try func.emitWValue(end_ptr); - switch (func.arch()) { - .wasm32 => try func.addTag(.i32_eq), - .wasm64 => try func.addTag(.i64_eq), - else => unreachable, + try cg.emitWValue(new_ptr); + try cg.emitWValue(end_ptr); + switch (cg.ptr_size) { + .wasm32 => try cg.addTag(.i32_eq), + .wasm64 => try cg.addTag(.i64_eq), } - try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished) + try cg.addLabel(.br_if, 1); // jump out of loop into outer block (finished) // store the value at the current position of the pointer - try func.store(new_ptr, value, elem_ty, 0); + try cg.store(new_ptr, value, elem_ty, 0); // move the pointer to the next element - try func.emitWValue(new_ptr); - switch (func.arch()) { + try cg.emitWValue(new_ptr); + switch (cg.ptr_size) { .wasm32 => { - try func.emitWValue(.{ .imm32 = abi_size }); - try func.addTag(.i32_add); + try cg.emitWValue(.{ .imm32 = abi_size }); + try cg.addTag(.i32_add); }, .wasm64 => { - try func.emitWValue(.{ .imm64 = abi_size }); - try func.addTag(.i64_add); + try cg.emitWValue(.{ .imm64 = abi_size }); + try cg.addTag(.i64_add); }, - else => unreachable, } - try func.addLabel(.local_set, new_ptr.local.value); + try cg.addLocal(.local_set, new_ptr.local.value); // end of loop - try func.addLabel(.br, 0); // jump to start of loop - try func.endBlock(); - try func.endBlock(); + try cg.addLabel(.br, 0); // jump to start of loop + try cg.endBlock(); + try cg.endBlock(); } -fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airArrayElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const array_ty = func.typeOf(bin_op.lhs); - const array = try func.resolveInst(bin_op.lhs); - const index = try func.resolveInst(bin_op.rhs); + const array_ty = cg.typeOf(bin_op.lhs); + const array = try cg.resolveInst(bin_op.lhs); + const index = try cg.resolveInst(bin_op.rhs); const elem_ty = array_ty.childType(zcu); const elem_size = elem_ty.abiSize(zcu); - if (isByRef(array_ty, pt, func.target.*)) { - try func.lowerToStack(array); - try func.emitWValue(index); - try func.addImm32(@intCast(elem_size)); - try func.addTag(.i32_mul); - try func.addTag(.i32_add); + if (isByRef(array_ty, zcu, cg.target)) { + try cg.lowerToStack(array); + try cg.emitWValue(index); + try cg.addImm32(@intCast(elem_size)); + try cg.addTag(.i32_mul); + try cg.addTag(.i32_add); } else { - std.debug.assert(array_ty.zigTypeTag(zcu) == .vector); + assert(array_ty.zigTypeTag(zcu) == .vector); switch (index) { inline .imm32, .imm64 => |lane| { - const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(zcu)) { + const opcode: std.wasm.SimdOpcode = switch (elem_ty.bitSize(zcu)) { 8 => if (elem_ty.isSignedInt(zcu)) .i8x16_extract_lane_s else .i8x16_extract_lane_u, 16 => if (elem_ty.isSignedInt(zcu)) .i16x8_extract_lane_s else .i16x8_extract_lane_u, 32 => if (elem_ty.isInt(zcu)) .i32x4_extract_lane else .f32x4_extract_lane, @@ -5034,174 +4890,185 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, }; - var operands = [_]u32{ std.wasm.simdOpcode(opcode), @as(u8, @intCast(lane)) }; + var operands = [_]u32{ @intFromEnum(opcode), @as(u8, @intCast(lane)) }; - try func.emitWValue(array); + try cg.emitWValue(array); - const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); - try func.mir_extra.appendSlice(func.gpa, &operands); - try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); + const extra_index = cg.extraLen(); + try cg.mir_extra.appendSlice(cg.gpa, &operands); + try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); - return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); }, else => { - const stack_vec = try func.allocStack(array_ty); - try func.store(stack_vec, array, array_ty, 0); + const stack_vec = try cg.allocStack(array_ty); + try cg.store(stack_vec, array, array_ty, 0); // Is a non-unrolled vector (v128) - try func.lowerToStack(stack_vec); - try func.emitWValue(index); - try func.addImm32(@intCast(elem_size)); - try func.addTag(.i32_mul); - try func.addTag(.i32_add); + try cg.lowerToStack(stack_vec); + try cg.emitWValue(index); + try cg.addImm32(@intCast(elem_size)); + try cg.addTag(.i32_mul); + try cg.addTag(.i32_add); }, } } - const elem_result = if (isByRef(elem_ty, pt, func.target.*)) + const elem_result = if (isByRef(elem_ty, zcu, cg.target)) .stack else - try func.load(.stack, elem_ty, 0); + try cg.load(.stack, elem_ty, 0); - return func.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs }); } -fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airIntFromFloat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.typeOf(ty_op.operand); - const op_bits = op_ty.floatBits(func.target.*); + const operand = try cg.resolveInst(ty_op.operand); + const op_ty = cg.typeOf(ty_op.operand); + const op_bits = op_ty.floatBits(cg.target.*); - const dest_ty = func.typeOfIndex(inst); + const dest_ty = cg.typeOfIndex(inst); const dest_info = dest_ty.intInfo(zcu); if (dest_info.bits > 128) { - return func.fail("TODO: intFromFloat for integers/floats with bitsize {}", .{dest_info.bits}); + return cg.fail("TODO: intFromFloat for integers/floats with bitsize {}", .{dest_info.bits}); } if ((op_bits != 32 and op_bits != 64) or dest_info.bits > 64) { - const dest_bitsize = if (dest_info.bits <= 16) 16 else std.math.ceilPowerOfTwoAssert(u16, dest_info.bits); - - var fn_name_buf: [16]u8 = undefined; - const fn_name = std.fmt.bufPrint(&fn_name_buf, "__fix{s}{s}f{s}i", .{ - switch (dest_info.signedness) { - .signed => "", - .unsigned => "uns", + const dest_bitsize = if (dest_info.bits <= 32) 32 else std.math.ceilPowerOfTwoAssert(u16, dest_info.bits); + + const intrinsic = switch (dest_info.signedness) { + inline .signed, .unsigned => |ct_s| switch (op_bits) { + inline 16, 32, 64, 80, 128 => |ct_op_bits| switch (dest_bitsize) { + inline 32, 64, 128 => |ct_dest_bits| @field( + Mir.Intrinsic, + "__fix" ++ switch (ct_s) { + .signed => "", + .unsigned => "uns", + } ++ + compilerRtFloatAbbrev(ct_op_bits) ++ "f" ++ + compilerRtIntAbbrev(ct_dest_bits) ++ "i", + ), + else => unreachable, + }, + else => unreachable, }, - target_util.compilerRtFloatAbbrev(op_bits), - target_util.compilerRtIntAbbrev(dest_bitsize), - }) catch unreachable; - - const result = try func.callIntrinsic(fn_name, &.{op_ty.ip_index}, dest_ty, &.{operand}); - return func.finishAir(inst, result, &.{ty_op.operand}); + }; + const result = try cg.callIntrinsic(intrinsic, &.{op_ty.ip_index}, dest_ty, &.{operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } - try func.emitWValue(operand); + try cg.emitWValue(operand); const op = buildOpcode(.{ .op = .trunc, - .valtype1 = typeToValtype(dest_ty, pt, func.target.*), - .valtype2 = typeToValtype(op_ty, pt, func.target.*), + .valtype1 = typeToValtype(dest_ty, zcu, cg.target), + .valtype2 = typeToValtype(op_ty, zcu, cg.target), .signedness = dest_info.signedness, }); - try func.addTag(Mir.Inst.Tag.fromOpcode(op)); - const result = try func.wrapOperand(.stack, dest_ty); - return func.finishAir(inst, result, &.{ty_op.operand}); + try cg.addTag(Mir.Inst.Tag.fromOpcode(op)); + const result = try cg.wrapOperand(.stack, dest_ty); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airFloatFromInt(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.typeOf(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); + const op_ty = cg.typeOf(ty_op.operand); const op_info = op_ty.intInfo(zcu); - const dest_ty = func.typeOfIndex(inst); - const dest_bits = dest_ty.floatBits(func.target.*); + const dest_ty = cg.typeOfIndex(inst); + const dest_bits = dest_ty.floatBits(cg.target.*); if (op_info.bits > 128) { - return func.fail("TODO: floatFromInt for integers/floats with bitsize {d} bits", .{op_info.bits}); + return cg.fail("TODO: floatFromInt for integers/floats with bitsize {d} bits", .{op_info.bits}); } if (op_info.bits > 64 or (dest_bits > 64 or dest_bits < 32)) { - const op_bitsize = if (op_info.bits <= 16) 16 else std.math.ceilPowerOfTwoAssert(u16, op_info.bits); - - var fn_name_buf: [16]u8 = undefined; - const fn_name = std.fmt.bufPrint(&fn_name_buf, "__float{s}{s}i{s}f", .{ - switch (op_info.signedness) { - .signed => "", - .unsigned => "un", + const op_bitsize = if (op_info.bits <= 32) 32 else std.math.ceilPowerOfTwoAssert(u16, op_info.bits); + + const intrinsic = switch (op_info.signedness) { + inline .signed, .unsigned => |ct_s| switch (op_bitsize) { + inline 32, 64, 128 => |ct_int_bits| switch (dest_bits) { + inline 16, 32, 64, 80, 128 => |ct_float_bits| @field( + Mir.Intrinsic, + "__float" ++ switch (ct_s) { + .signed => "", + .unsigned => "un", + } ++ + compilerRtIntAbbrev(ct_int_bits) ++ "i" ++ + compilerRtFloatAbbrev(ct_float_bits) ++ "f", + ), + else => unreachable, + }, + else => unreachable, }, - target_util.compilerRtIntAbbrev(op_bitsize), - target_util.compilerRtFloatAbbrev(dest_bits), - }) catch unreachable; + }; - const result = try func.callIntrinsic(fn_name, &.{op_ty.ip_index}, dest_ty, &.{operand}); - return func.finishAir(inst, result, &.{ty_op.operand}); + const result = try cg.callIntrinsic(intrinsic, &.{op_ty.ip_index}, dest_ty, &.{operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } - try func.emitWValue(operand); + try cg.emitWValue(operand); const op = buildOpcode(.{ .op = .convert, - .valtype1 = typeToValtype(dest_ty, pt, func.target.*), - .valtype2 = typeToValtype(op_ty, pt, func.target.*), + .valtype1 = typeToValtype(dest_ty, zcu, cg.target), + .valtype2 = typeToValtype(op_ty, zcu, cg.target), .signedness = op_info.signedness, }); - try func.addTag(Mir.Inst.Tag.fromOpcode(op)); + try cg.addTag(Mir.Inst.Tag.fromOpcode(op)); - return func.finishAir(inst, .stack, &.{ty_op.operand}); + return cg.finishAir(inst, .stack, &.{ty_op.operand}); } -fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const ty = func.typeOfIndex(inst); +fn airSplat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand = try cg.resolveInst(ty_op.operand); + const ty = cg.typeOfIndex(inst); const elem_ty = ty.childType(zcu); - if (determineSimdStoreStrategy(ty, zcu, func.target.*) == .direct) blk: { + if (determineSimdStoreStrategy(ty, zcu, cg.target) == .direct) blk: { switch (operand) { // when the operand lives in the linear memory section, we can directly // load and splat the value at once. Meaning we do not first have to load // the scalar value onto the stack. - .stack_offset, .memory, .memory_offset => { + .stack_offset, .nav_ref, .uav_ref => { const opcode = switch (elem_ty.bitSize(zcu)) { - 8 => std.wasm.simdOpcode(.v128_load8_splat), - 16 => std.wasm.simdOpcode(.v128_load16_splat), - 32 => std.wasm.simdOpcode(.v128_load32_splat), - 64 => std.wasm.simdOpcode(.v128_load64_splat), + 8 => @intFromEnum(std.wasm.SimdOpcode.v128_load8_splat), + 16 => @intFromEnum(std.wasm.SimdOpcode.v128_load16_splat), + 32 => @intFromEnum(std.wasm.SimdOpcode.v128_load32_splat), + 64 => @intFromEnum(std.wasm.SimdOpcode.v128_load64_splat), else => break :blk, // Cannot make use of simd-instructions }; - try func.emitWValue(operand); - // TODO: Add helper functions for simd opcodes - const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); + try cg.emitWValue(operand); + const extra_index: u32 = cg.extraLen(); // stores as := opcode, offset, alignment (opcode::memarg) - try func.mir_extra.appendSlice(func.gpa, &[_]u32{ + try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{ opcode, operand.offset(), @intCast(elem_ty.abiAlignment(zcu).toByteUnits().?), }); - try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); - return func.finishAir(inst, .stack, &.{ty_op.operand}); + try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); + return cg.finishAir(inst, .stack, &.{ty_op.operand}); }, .local => { const opcode = switch (elem_ty.bitSize(zcu)) { - 8 => std.wasm.simdOpcode(.i8x16_splat), - 16 => std.wasm.simdOpcode(.i16x8_splat), - 32 => if (elem_ty.isInt(zcu)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), - 64 => if (elem_ty.isInt(zcu)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), + 8 => @intFromEnum(std.wasm.SimdOpcode.i8x16_splat), + 16 => @intFromEnum(std.wasm.SimdOpcode.i16x8_splat), + 32 => if (elem_ty.isInt(zcu)) @intFromEnum(std.wasm.SimdOpcode.i32x4_splat) else @intFromEnum(std.wasm.SimdOpcode.f32x4_splat), + 64 => if (elem_ty.isInt(zcu)) @intFromEnum(std.wasm.SimdOpcode.i64x2_splat) else @intFromEnum(std.wasm.SimdOpcode.f64x2_splat), else => break :blk, // Cannot make use of simd-instructions }; - try func.emitWValue(operand); - const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); - try func.mir_extra.append(func.gpa, opcode); - try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); - return func.finishAir(inst, .stack, &.{ty_op.operand}); + try cg.emitWValue(operand); + const extra_index = cg.extraLen(); + try cg.mir_extra.append(cg.gpa, opcode); + try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); + return cg.finishAir(inst, .stack, &.{ty_op.operand}); }, else => unreachable, } @@ -5209,38 +5076,38 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_size = elem_ty.bitSize(zcu); const vector_len = @as(usize, @intCast(ty.vectorLen(zcu))); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { - return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); + return cg.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } - const result = try func.allocStack(ty); + const result = try cg.allocStack(ty); const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(zcu))); var index: usize = 0; var offset: u32 = 0; while (index < vector_len) : (index += 1) { - try func.store(result, operand, elem_ty, offset); + try cg.store(result, operand, elem_ty, offset); offset += elem_byte_size; } - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const operand = try func.resolveInst(pl_op.operand); +fn airSelect(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const operand = try cg.resolveInst(pl_op.operand); _ = operand; - return func.fail("TODO: Implement wasm airSelect", .{}); + return cg.fail("TODO: Implement wasm airSelect", .{}); } -fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airShuffle(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; - const inst_ty = func.typeOfIndex(inst); - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data; + const inst_ty = cg.typeOfIndex(inst); + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = cg.air.extraData(Air.Shuffle, ty_pl.payload).data; - const a = try func.resolveInst(extra.a); - const b = try func.resolveInst(extra.b); + const a = try cg.resolveInst(extra.a); + const b = try cg.resolveInst(extra.b); const mask = Value.fromInterned(extra.mask); const mask_len = extra.mask_len; @@ -5248,26 +5115,26 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_size = child_ty.abiSize(zcu); // TODO: One of them could be by ref; handle in loop - if (isByRef(func.typeOf(extra.a), pt, func.target.*) or isByRef(inst_ty, pt, func.target.*)) { - const result = try func.allocStack(inst_ty); + if (isByRef(cg.typeOf(extra.a), zcu, cg.target) or isByRef(inst_ty, zcu, cg.target)) { + const result = try cg.allocStack(inst_ty); for (0..mask_len) |index| { const value = (try mask.elemValue(pt, index)).toSignedInt(zcu); - try func.emitWValue(result); + try cg.emitWValue(result); const loaded = if (value >= 0) - try func.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value))) + try cg.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value))) else - try func.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value))); + try cg.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value))); - try func.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index))); + try cg.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index))); } - return func.finishAir(inst, result, &.{ extra.a, extra.b }); + return cg.finishAir(inst, result, &.{ extra.a, extra.b }); } else { var operands = [_]u32{ - std.wasm.simdOpcode(.i8x16_shuffle), + @intFromEnum(std.wasm.SimdOpcode.i8x16_shuffle), } ++ [1]u32{undefined} ** 4; var lanes = mem.asBytes(operands[1..]); @@ -5283,91 +5150,91 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } - try func.emitWValue(a); - try func.emitWValue(b); + try cg.emitWValue(a); + try cg.emitWValue(b); - const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); - try func.mir_extra.appendSlice(func.gpa, &operands); - try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); + const extra_index = cg.extraLen(); + try cg.mir_extra.appendSlice(cg.gpa, &operands); + try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); - return func.finishAir(inst, .stack, &.{ extra.a, extra.b }); + return cg.finishAir(inst, .stack, &.{ extra.a, extra.b }); } } -fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const reduce = func.air.instructions.items(.data)[@intFromEnum(inst)].reduce; - const operand = try func.resolveInst(reduce.operand); +fn airReduce(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const reduce = cg.air.instructions.items(.data)[@intFromEnum(inst)].reduce; + const operand = try cg.resolveInst(reduce.operand); _ = operand; - return func.fail("TODO: Implement wasm airReduce", .{}); + return cg.fail("TODO: Implement wasm airReduce", .{}); } -fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const result_ty = func.typeOfIndex(inst); + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const result_ty = cg.typeOfIndex(inst); const len = @as(usize, @intCast(result_ty.arrayLen(zcu))); - const elements = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[ty_pl.payload..][0..len])); + const elements = @as([]const Air.Inst.Ref, @ptrCast(cg.air.extra[ty_pl.payload..][0..len])); const result: WValue = result_value: { switch (result_ty.zigTypeTag(zcu)) { .array => { - const result = try func.allocStack(result_ty); + const result = try cg.allocStack(result_ty); const elem_ty = result_ty.childType(zcu); const elem_size = @as(u32, @intCast(elem_ty.abiSize(zcu))); const sentinel = if (result_ty.sentinel(zcu)) |sent| blk: { - break :blk try func.lowerConstant(sent, elem_ty); + break :blk try cg.lowerConstant(sent, elem_ty); } else null; // When the element type is by reference, we must copy the entire // value. It is therefore safer to move the offset pointer and store // each value individually, instead of using store offsets. - if (isByRef(elem_ty, pt, func.target.*)) { + if (isByRef(elem_ty, zcu, cg.target)) { // copy stack pointer into a temporary local, which is // moved for each element to store each value in the right position. - const offset = try func.buildPointerOffset(result, 0, .new); + const offset = try cg.buildPointerOffset(result, 0, .new); for (elements, 0..) |elem, elem_index| { - const elem_val = try func.resolveInst(elem); - try func.store(offset, elem_val, elem_ty, 0); + const elem_val = try cg.resolveInst(elem); + try cg.store(offset, elem_val, elem_ty, 0); if (elem_index < elements.len - 1 and sentinel == null) { - _ = try func.buildPointerOffset(offset, elem_size, .modify); + _ = try cg.buildPointerOffset(offset, elem_size, .modify); } } if (sentinel) |sent| { - try func.store(offset, sent, elem_ty, 0); + try cg.store(offset, sent, elem_ty, 0); } } else { var offset: u32 = 0; for (elements) |elem| { - const elem_val = try func.resolveInst(elem); - try func.store(result, elem_val, elem_ty, offset); + const elem_val = try cg.resolveInst(elem); + try cg.store(result, elem_val, elem_ty, offset); offset += elem_size; } if (sentinel) |sent| { - try func.store(result, sent, elem_ty, offset); + try cg.store(result, sent, elem_ty, offset); } } break :result_value result; }, .@"struct" => switch (result_ty.containerLayout(zcu)) { .@"packed" => { - if (isByRef(result_ty, pt, func.target.*)) { - return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); + if (isByRef(result_ty, zcu, cg.target)) { + return cg.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); } const packed_struct = zcu.typeToPackedStruct(result_ty).?; const field_types = packed_struct.field_types; const backing_type = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)); // ensure the result is zero'd - const result = try func.allocLocal(backing_type); + const result = try cg.allocLocal(backing_type); if (backing_type.bitSize(zcu) <= 32) - try func.addImm32(0) + try cg.addImm32(0) else - try func.addImm64(0); - try func.addLabel(.local_set, result.local.value); + try cg.addImm64(0); + try cg.addLocal(.local_set, result.local.value); var current_bit: u16 = 0; for (elements, 0..) |elem, elem_index| { @@ -5379,46 +5246,46 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else .{ .imm64 = current_bit }; - const value = try func.resolveInst(elem); + const value = try cg.resolveInst(elem); const value_bit_size: u16 = @intCast(field_ty.bitSize(zcu)); const int_ty = try pt.intType(.unsigned, value_bit_size); // load our current result on stack so we can perform all transformations // using only stack values. Saving the cost of loads and stores. - try func.emitWValue(result); - const bitcasted = try func.bitcast(int_ty, field_ty, value); - const extended_val = try func.intcast(bitcasted, int_ty, backing_type); + try cg.emitWValue(result); + const bitcasted = try cg.bitcast(int_ty, field_ty, value); + const extended_val = try cg.intcast(bitcasted, int_ty, backing_type); // no need to shift any values when the current offset is 0 const shifted = if (current_bit != 0) shifted: { - break :shifted try func.binOp(extended_val, shift_val, backing_type, .shl); + break :shifted try cg.binOp(extended_val, shift_val, backing_type, .shl); } else extended_val; // we ignore the result as we keep it on the stack to assign it directly to `result` - _ = try func.binOp(.stack, shifted, backing_type, .@"or"); - try func.addLabel(.local_set, result.local.value); + _ = try cg.binOp(.stack, shifted, backing_type, .@"or"); + try cg.addLocal(.local_set, result.local.value); current_bit += value_bit_size; } break :result_value result; }, else => { - const result = try func.allocStack(result_ty); - const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset + const result = try cg.allocStack(result_ty); + const offset = try cg.buildPointerOffset(result, 0, .new); // pointer to offset var prev_field_offset: u64 = 0; for (elements, 0..) |elem, elem_index| { if (try result_ty.structFieldValueComptime(pt, elem_index) != null) continue; const elem_ty = result_ty.fieldType(elem_index, zcu); const field_offset = result_ty.structFieldOffset(elem_index, zcu); - _ = try func.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify); + _ = try cg.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify); prev_field_offset = field_offset; - const value = try func.resolveInst(elem); - try func.store(offset, value, elem_ty, 0); + const value = try cg.resolveInst(elem); + try cg.store(offset, value, elem_ty, 0); } break :result_value result; }, }, - .vector => return func.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}), + .vector => return cg.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}), else => unreachable, } }; @@ -5426,22 +5293,22 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (elements.len <= Liveness.bpi - 1) { var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); @memcpy(buf[0..elements.len], elements); - return func.finishAir(inst, result, &buf); + return cg.finishAir(inst, result, &buf); } - var bt = try func.iterateBigTomb(inst, elements.len); + var bt = try cg.iterateBigTomb(inst, elements.len); for (elements) |arg| bt.feed(arg); return bt.finishAir(result); } -fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airUnionInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = cg.air.extraData(Air.UnionInit, ty_pl.payload).data; const result = result: { - const union_ty = func.typeOfIndex(inst); + const union_ty = cg.typeOfIndex(inst); const layout = union_ty.unionGetLayout(zcu); const union_obj = zcu.typeToUnion(union_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); @@ -5451,34 +5318,34 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const tag_ty = union_ty.unionTagTypeHypothetical(zcu); const enum_field_index = tag_ty.enumFieldIndex(field_name, zcu).?; const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index); - break :blk try func.lowerConstant(tag_val, tag_ty); + break :blk try cg.lowerConstant(tag_val, tag_ty); }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { break :result .none; } - assert(!isByRef(union_ty, pt, func.target.*)); + assert(!isByRef(union_ty, zcu, cg.target)); break :result tag_int; } - if (isByRef(union_ty, pt, func.target.*)) { - const result_ptr = try func.allocStack(union_ty); - const payload = try func.resolveInst(extra.init); + if (isByRef(union_ty, zcu, cg.target)) { + const result_ptr = try cg.allocStack(union_ty); + const payload = try cg.resolveInst(extra.init); if (layout.tag_align.compare(.gte, layout.payload_align)) { - if (isByRef(field_ty, pt, func.target.*)) { - const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); - try func.store(payload_ptr, payload, field_ty, 0); + if (isByRef(field_ty, zcu, cg.target)) { + const payload_ptr = try cg.buildPointerOffset(result_ptr, layout.tag_size, .new); + try cg.store(payload_ptr, payload, field_ty, 0); } else { - try func.store(result_ptr, payload, field_ty, @intCast(layout.tag_size)); + try cg.store(result_ptr, payload, field_ty, @intCast(layout.tag_size)); } if (layout.tag_size > 0) { - try func.store(result_ptr, tag_int, Type.fromInterned(union_obj.enum_tag_ty), 0); + try cg.store(result_ptr, tag_int, Type.fromInterned(union_obj.enum_tag_ty), 0); } } else { - try func.store(result_ptr, payload, field_ty, 0); + try cg.store(result_ptr, payload, field_ty, 0); if (layout.tag_size > 0) { - try func.store( + try cg.store( result_ptr, tag_int, Type.fromInterned(union_obj.enum_tag_ty), @@ -5488,138 +5355,136 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } break :result result_ptr; } else { - const operand = try func.resolveInst(extra.init); + const operand = try cg.resolveInst(extra.init); const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(zcu)))); if (field_ty.zigTypeTag(zcu) == .float) { const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(zcu))); - const bitcasted = try func.bitcast(field_ty, int_type, operand); - break :result try func.trunc(bitcasted, int_type, union_int_type); + const bitcasted = try cg.bitcast(field_ty, int_type, operand); + break :result try cg.trunc(bitcasted, int_type, union_int_type); } else if (field_ty.isPtrAtRuntime(zcu)) { const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(zcu))); - break :result try func.intcast(operand, int_type, union_int_type); + break :result try cg.intcast(operand, int_type, union_int_type); } - break :result try func.intcast(operand, field_ty, union_int_type); + break :result try cg.intcast(operand, field_ty, union_int_type); } }; - return func.finishAir(inst, result, &.{extra.init}); + return cg.finishAir(inst, result, &.{extra.init}); } -fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const prefetch = func.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; - return func.finishAir(inst, .none, &.{prefetch.ptr}); +fn airPrefetch(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const prefetch = cg.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; + return cg.finishAir(inst, .none, &.{prefetch.ptr}); } -fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; +fn airWasmMemorySize(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - try func.addLabel(.memory_size, pl_op.payload); - return func.finishAir(inst, .stack, &.{pl_op.operand}); + try cg.addLabel(.memory_size, pl_op.payload); + return cg.finishAir(inst, .stack, &.{pl_op.operand}); } -fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { - const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; +fn airWasmMemoryGrow(cg: *CodeGen, inst: Air.Inst.Index) !void { + const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const operand = try func.resolveInst(pl_op.operand); - try func.emitWValue(operand); - try func.addLabel(.memory_grow, pl_op.payload); - return func.finishAir(inst, .stack, &.{pl_op.operand}); + const operand = try cg.resolveInst(pl_op.operand); + try cg.emitWValue(operand); + try cg.addLabel(.memory_grow, pl_op.payload); + return cg.finishAir(inst, .stack, &.{pl_op.operand}); } -fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; +fn cmpOptionals(cg: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { + const zcu = cg.pt.zcu; assert(operand_ty.hasRuntimeBitsIgnoreComptime(zcu)); assert(op == .eq or op == .neq); const payload_ty = operand_ty.optionalChild(zcu); // We store the final result in here that will be validated // if the optional is truly equal. - var result = try func.ensureAllocLocal(Type.i32); - defer result.free(func); - - try func.startBlock(.block, wasm.block_empty); - _ = try func.isNull(lhs, operand_ty, .i32_eq); - _ = try func.isNull(rhs, operand_ty, .i32_eq); - try func.addTag(.i32_ne); // inverse so we can exit early - try func.addLabel(.br_if, 0); - - _ = try func.load(lhs, payload_ty, 0); - _ = try func.load(rhs, payload_ty, 0); - const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt, func.target.*) }); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); - try func.addLabel(.br_if, 0); - - try func.addImm32(1); - try func.addLabel(.local_set, result.local.value); - try func.endBlock(); - - try func.emitWValue(result); - try func.addImm32(0); - try func.addTag(if (op == .eq) .i32_ne else .i32_eq); + var result = try cg.ensureAllocLocal(Type.i32); + defer result.free(cg); + + try cg.startBlock(.block, .empty); + _ = try cg.isNull(lhs, operand_ty, .i32_eq); + _ = try cg.isNull(rhs, operand_ty, .i32_eq); + try cg.addTag(.i32_ne); // inverse so we can exit early + try cg.addLabel(.br_if, 0); + + _ = try cg.load(lhs, payload_ty, 0); + _ = try cg.load(rhs, payload_ty, 0); + const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, zcu, cg.target) }); + try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + try cg.addLabel(.br_if, 0); + + try cg.addImm32(1); + try cg.addLocal(.local_set, result.local.value); + try cg.endBlock(); + + try cg.emitWValue(result); + try cg.addImm32(0); + try cg.addTag(if (op == .eq) .i32_ne else .i32_eq); return .stack; } /// Compares big integers by checking both its high bits and low bits. /// NOTE: Leaves the result of the comparison on top of the stack. /// TODO: Lower this to compiler_rt call when bitsize > 128 -fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; +fn cmpBigInt(cg: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { + const zcu = cg.pt.zcu; assert(operand_ty.abiSize(zcu) >= 16); assert(!(lhs != .stack and rhs == .stack)); if (operand_ty.bitSize(zcu) > 128) { - return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(zcu)}); + return cg.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(zcu)}); } - var lhs_msb = try (try func.load(lhs, Type.u64, 8)).toLocal(func, Type.u64); - defer lhs_msb.free(func); - var rhs_msb = try (try func.load(rhs, Type.u64, 8)).toLocal(func, Type.u64); - defer rhs_msb.free(func); + var lhs_msb = try (try cg.load(lhs, Type.u64, 8)).toLocal(cg, Type.u64); + defer lhs_msb.free(cg); + var rhs_msb = try (try cg.load(rhs, Type.u64, 8)).toLocal(cg, Type.u64); + defer rhs_msb.free(cg); switch (op) { .eq, .neq => { - const xor_high = try func.binOp(lhs_msb, rhs_msb, Type.u64, .xor); - const lhs_lsb = try func.load(lhs, Type.u64, 0); - const rhs_lsb = try func.load(rhs, Type.u64, 0); - const xor_low = try func.binOp(lhs_lsb, rhs_lsb, Type.u64, .xor); - const or_result = try func.binOp(xor_high, xor_low, Type.u64, .@"or"); + const xor_high = try cg.binOp(lhs_msb, rhs_msb, Type.u64, .xor); + const lhs_lsb = try cg.load(lhs, Type.u64, 0); + const rhs_lsb = try cg.load(rhs, Type.u64, 0); + const xor_low = try cg.binOp(lhs_lsb, rhs_lsb, Type.u64, .xor); + const or_result = try cg.binOp(xor_high, xor_low, Type.u64, .@"or"); switch (op) { - .eq => return func.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .eq), - .neq => return func.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .neq), + .eq => return cg.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .eq), + .neq => return cg.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .neq), else => unreachable, } }, else => { const ty = if (operand_ty.isSignedInt(zcu)) Type.i64 else Type.u64; // leave those value on top of the stack for '.select' - const lhs_lsb = try func.load(lhs, Type.u64, 0); - const rhs_lsb = try func.load(rhs, Type.u64, 0); - _ = try func.cmp(lhs_lsb, rhs_lsb, Type.u64, op); - _ = try func.cmp(lhs_msb, rhs_msb, ty, op); - _ = try func.cmp(lhs_msb, rhs_msb, ty, .eq); - try func.addTag(.select); + const lhs_lsb = try cg.load(lhs, Type.u64, 0); + const rhs_lsb = try cg.load(rhs, Type.u64, 0); + _ = try cg.cmp(lhs_lsb, rhs_lsb, Type.u64, op); + _ = try cg.cmp(lhs_msb, rhs_msb, ty, op); + _ = try cg.cmp(lhs_msb, rhs_msb, ty, .eq); + try cg.addTag(.select); }, } return .stack; } -fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airSetUnionTag(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const un_ty = func.typeOf(bin_op.lhs).childType(zcu); - const tag_ty = func.typeOf(bin_op.rhs); + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const un_ty = cg.typeOf(bin_op.lhs).childType(zcu); + const tag_ty = cg.typeOf(bin_op.rhs); const layout = un_ty.unionGetLayout(zcu); - if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); + if (layout.tag_size == 0) return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); - const union_ptr = try func.resolveInst(bin_op.lhs); - const new_tag = try func.resolveInst(bin_op.rhs); + const union_ptr = try cg.resolveInst(bin_op.lhs); + const new_tag = try cg.resolveInst(bin_op.rhs); if (layout.payload_size == 0) { - try func.store(union_ptr, new_tag, tag_ty, 0); - return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); + try cg.store(union_ptr, new_tag, tag_ty, 0); + return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); } // when the tag alignment is smaller than the payload, the field will be stored @@ -5627,124 +5492,147 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) blk: { break :blk @intCast(layout.payload_size); } else 0; - try func.store(union_ptr, new_tag, tag_ty, offset); - return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); + try cg.store(union_ptr, new_tag, tag_ty, offset); + return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); } -fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const zcu = func.pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airGetUnionTag(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const un_ty = func.typeOf(ty_op.operand); - const tag_ty = func.typeOfIndex(inst); + const un_ty = cg.typeOf(ty_op.operand); + const tag_ty = cg.typeOfIndex(inst); const layout = un_ty.unionGetLayout(zcu); - if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand}); + if (layout.tag_size == 0) return cg.finishAir(inst, .none, &.{ty_op.operand}); - const operand = try func.resolveInst(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); // when the tag alignment is smaller than the payload, the field will be stored // after the payload. const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) @intCast(layout.payload_size) else 0; - const result = try func.load(operand, tag_ty, offset); - return func.finishAir(inst, result, &.{ty_op.operand}); + const result = try cg.load(operand, tag_ty, offset); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airFpext(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const dest_ty = func.typeOfIndex(inst); - const operand = try func.resolveInst(ty_op.operand); - const result = try func.fpext(operand, func.typeOf(ty_op.operand), dest_ty); - return func.finishAir(inst, result, &.{ty_op.operand}); + const dest_ty = cg.typeOfIndex(inst); + const operand = try cg.resolveInst(ty_op.operand); + const result = try cg.fpext(operand, cg.typeOf(ty_op.operand), dest_ty); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -/// Extends a float from a given `Type` to a larger wanted `Type` -/// NOTE: Leaves the result on the stack -fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { - const given_bits = given.floatBits(func.target.*); - const wanted_bits = wanted.floatBits(func.target.*); - - if (wanted_bits == 64 and given_bits == 32) { - try func.emitWValue(operand); - try func.addTag(.f64_promote_f32); - return .stack; - } else if (given_bits == 16 and wanted_bits <= 64) { - // call __extendhfsf2(f16) f32 - const f32_result = try func.callIntrinsic( - "__extendhfsf2", - &.{.f16_type}, - Type.f32, - &.{operand}, - ); - std.debug.assert(f32_result == .stack); - - if (wanted_bits == 64) { - try func.addTag(.f64_promote_f32); - } - return .stack; - } - - var fn_name_buf: [13]u8 = undefined; - const fn_name = std.fmt.bufPrint(&fn_name_buf, "__extend{s}f{s}f2", .{ - target_util.compilerRtFloatAbbrev(given_bits), - target_util.compilerRtFloatAbbrev(wanted_bits), - }) catch unreachable; +/// Extends a float from a given `Type` to a larger wanted `Type`, leaving the +/// result on the stack. +fn fpext(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { + const given_bits = given.floatBits(cg.target.*); + const wanted_bits = wanted.floatBits(cg.target.*); - return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); + const intrinsic: Mir.Intrinsic = switch (given_bits) { + 16 => switch (wanted_bits) { + 32 => { + assert(.stack == try cg.callIntrinsic(.__extendhfsf2, &.{.f16_type}, Type.f32, &.{operand})); + return .stack; + }, + 64 => { + assert(.stack == try cg.callIntrinsic(.__extendhfsf2, &.{.f16_type}, Type.f32, &.{operand})); + try cg.addTag(.f64_promote_f32); + return .stack; + }, + 80 => .__extendhfxf2, + 128 => .__extendhftf2, + else => unreachable, + }, + 32 => switch (wanted_bits) { + 64 => { + try cg.emitWValue(operand); + try cg.addTag(.f64_promote_f32); + return .stack; + }, + 80 => .__extendsfxf2, + 128 => .__extendsftf2, + else => unreachable, + }, + 64 => switch (wanted_bits) { + 80 => .__extenddfxf2, + 128 => .__extenddftf2, + else => unreachable, + }, + 80 => switch (wanted_bits) { + 128 => .__extendxftf2, + else => unreachable, + }, + else => unreachable, + }; + return cg.callIntrinsic(intrinsic, &.{given.ip_index}, wanted, &.{operand}); } -fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airFptrunc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const dest_ty = func.typeOfIndex(inst); - const operand = try func.resolveInst(ty_op.operand); - const result = try func.fptrunc(operand, func.typeOf(ty_op.operand), dest_ty); - return func.finishAir(inst, result, &.{ty_op.operand}); + const dest_ty = cg.typeOfIndex(inst); + const operand = try cg.resolveInst(ty_op.operand); + const result = try cg.fptrunc(operand, cg.typeOf(ty_op.operand), dest_ty); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -/// Truncates a float from a given `Type` to its wanted `Type` -/// NOTE: The result value remains on the stack -fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { - const given_bits = given.floatBits(func.target.*); - const wanted_bits = wanted.floatBits(func.target.*); - - if (wanted_bits == 32 and given_bits == 64) { - try func.emitWValue(operand); - try func.addTag(.f32_demote_f64); - return .stack; - } else if (wanted_bits == 16 and given_bits <= 64) { - const op: WValue = if (given_bits == 64) blk: { - try func.emitWValue(operand); - try func.addTag(.f32_demote_f64); - break :blk .stack; - } else operand; - - // call __truncsfhf2(f32) f16 - return func.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op}); - } - - var fn_name_buf: [12]u8 = undefined; - const fn_name = std.fmt.bufPrint(&fn_name_buf, "__trunc{s}f{s}f2", .{ - target_util.compilerRtFloatAbbrev(given_bits), - target_util.compilerRtFloatAbbrev(wanted_bits), - }) catch unreachable; +/// Truncates a float from a given `Type` to its wanted `Type`, leaving the +/// result on the stack. +fn fptrunc(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { + const given_bits = given.floatBits(cg.target.*); + const wanted_bits = wanted.floatBits(cg.target.*); - return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); + const intrinsic: Mir.Intrinsic = switch (given_bits) { + 32 => switch (wanted_bits) { + 16 => { + return cg.callIntrinsic(.__truncsfhf2, &.{.f32_type}, Type.f16, &.{operand}); + }, + else => unreachable, + }, + 64 => switch (wanted_bits) { + 16 => { + try cg.emitWValue(operand); + try cg.addTag(.f32_demote_f64); + return cg.callIntrinsic(.__truncsfhf2, &.{.f32_type}, Type.f16, &.{.stack}); + }, + 32 => { + try cg.emitWValue(operand); + try cg.addTag(.f32_demote_f64); + return .stack; + }, + else => unreachable, + }, + 80 => switch (wanted_bits) { + 16 => .__truncxfhf2, + 32 => .__truncxfsf2, + 64 => .__truncxfdf2, + else => unreachable, + }, + 128 => switch (wanted_bits) { + 16 => .__trunctfhf2, + 32 => .__trunctfsf2, + 64 => .__trunctfdf2, + 80 => .__trunctfxf2, + else => unreachable, + }, + else => unreachable, + }; + return cg.callIntrinsic(intrinsic, &.{given.ip_index}, wanted, &.{operand}); } -fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airErrUnionPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const err_set_ty = func.typeOf(ty_op.operand).childType(zcu); + const err_set_ty = cg.typeOf(ty_op.operand).childType(zcu); const payload_ty = err_set_ty.errorUnionPayload(zcu); - const operand = try func.resolveInst(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); // set error-tag to '0' to annotate error union is non-error - try func.store( + try cg.store( operand, .{ .imm32 = 0 }, Type.anyerror, @@ -5753,63 +5641,60 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const result = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - break :result func.reuseOperand(ty_op.operand, operand); + break :result cg.reuseOperand(ty_op.operand, operand); } - break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu))), .new); + break :result try cg.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu))), .new); }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; +fn airFieldParentPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = cg.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const field_ptr = try func.resolveInst(extra.field_ptr); + const field_ptr = try cg.resolveInst(extra.field_ptr); const parent_ty = ty_pl.ty.toType().childType(zcu); const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu); const result = if (field_offset != 0) result: { - const base = try func.buildPointerOffset(field_ptr, 0, .new); - try func.addLabel(.local_get, base.local.value); - try func.addImm32(@intCast(field_offset)); - try func.addTag(.i32_sub); - try func.addLabel(.local_set, base.local.value); + const base = try cg.buildPointerOffset(field_ptr, 0, .new); + try cg.addLocal(.local_get, base.local.value); + try cg.addImm32(@intCast(field_offset)); + try cg.addTag(.i32_sub); + try cg.addLocal(.local_set, base.local.value); break :result base; - } else func.reuseOperand(extra.field_ptr, field_ptr); + } else cg.reuseOperand(extra.field_ptr, field_ptr); - return func.finishAir(inst, result, &.{extra.field_ptr}); + return cg.finishAir(inst, result, &.{extra.field_ptr}); } -fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; +fn sliceOrArrayPtr(cg: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue { + const zcu = cg.pt.zcu; if (ptr_ty.isSlice(zcu)) { - return func.slicePtr(ptr); + return cg.slicePtr(ptr); } else { return ptr; } } -fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const dst = try func.resolveInst(bin_op.lhs); - const dst_ty = func.typeOf(bin_op.lhs); +fn airMemcpy(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const dst = try cg.resolveInst(bin_op.lhs); + const dst_ty = cg.typeOf(bin_op.lhs); const ptr_elem_ty = dst_ty.childType(zcu); - const src = try func.resolveInst(bin_op.rhs); - const src_ty = func.typeOf(bin_op.rhs); + const src = try cg.resolveInst(bin_op.rhs); + const src_ty = cg.typeOf(bin_op.rhs); const len = switch (dst_ty.ptrSize(zcu)) { .Slice => blk: { - const slice_len = try func.sliceLen(dst); + const slice_len = try cg.sliceLen(dst); if (ptr_elem_ty.abiSize(zcu) != 1) { - try func.emitWValue(slice_len); - try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(zcu))) }); - try func.addTag(.i32_mul); - try func.addLabel(.local_set, slice_len.local.value); + try cg.emitWValue(slice_len); + try cg.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(zcu))) }); + try cg.addTag(.i32_mul); + try cg.addLocal(.local_set, slice_len.local.value); } break :blk slice_len; }, @@ -5818,96 +5703,94 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }), .C, .Many => unreachable, }; - const dst_ptr = try func.sliceOrArrayPtr(dst, dst_ty); - const src_ptr = try func.sliceOrArrayPtr(src, src_ty); - try func.memcpy(dst_ptr, src_ptr, len); + const dst_ptr = try cg.sliceOrArrayPtr(dst, dst_ty); + const src_ptr = try cg.sliceOrArrayPtr(src, src_ty); + try cg.memcpy(dst_ptr, src_ptr, len); - return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); } -fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { +fn airRetAddr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { // TODO: Implement this properly once stack serialization is solved - return func.finishAir(inst, switch (func.arch()) { + return cg.finishAir(inst, switch (cg.ptr_size) { .wasm32 => .{ .imm32 = 0 }, .wasm64 => .{ .imm64 = 0 }, - else => unreachable, }, &.{}); } -fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airPopcount(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.typeOf(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); + const op_ty = cg.typeOf(ty_op.operand); if (op_ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: Implement @popCount for vectors", .{}); + return cg.fail("TODO: Implement @popCount for vectors", .{}); } const int_info = op_ty.intInfo(zcu); const bits = int_info.bits; const wasm_bits = toWasmBits(bits) orelse { - return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits}); + return cg.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits}); }; switch (wasm_bits) { 32 => { - try func.emitWValue(operand); + try cg.emitWValue(operand); if (op_ty.isSignedInt(zcu) and bits != wasm_bits) { - _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits)); + _ = try cg.wrapOperand(.stack, try pt.intType(.unsigned, bits)); } - try func.addTag(.i32_popcnt); + try cg.addTag(.i32_popcnt); }, 64 => { - try func.emitWValue(operand); + try cg.emitWValue(operand); if (op_ty.isSignedInt(zcu) and bits != wasm_bits) { - _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits)); + _ = try cg.wrapOperand(.stack, try pt.intType(.unsigned, bits)); } - try func.addTag(.i64_popcnt); - try func.addTag(.i32_wrap_i64); - try func.emitWValue(operand); + try cg.addTag(.i64_popcnt); + try cg.addTag(.i32_wrap_i64); + try cg.emitWValue(operand); }, 128 => { - _ = try func.load(operand, Type.u64, 0); - try func.addTag(.i64_popcnt); - _ = try func.load(operand, Type.u64, 8); + _ = try cg.load(operand, Type.u64, 0); + try cg.addTag(.i64_popcnt); + _ = try cg.load(operand, Type.u64, 8); if (op_ty.isSignedInt(zcu) and bits != wasm_bits) { - _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits - 64)); + _ = try cg.wrapOperand(.stack, try pt.intType(.unsigned, bits - 64)); } - try func.addTag(.i64_popcnt); - try func.addTag(.i64_add); - try func.addTag(.i32_wrap_i64); + try cg.addTag(.i64_popcnt); + try cg.addTag(.i64_add); + try cg.addTag(.i32_wrap_i64); }, else => unreachable, } - return func.finishAir(inst, .stack, &.{ty_op.operand}); + return cg.finishAir(inst, .stack, &.{ty_op.operand}); } -fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airBitReverse(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); - const ty = func.typeOf(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); + const ty = cg.typeOf(ty_op.operand); if (ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: Implement @bitReverse for vectors", .{}); + return cg.fail("TODO: Implement @bitReverse for vectors", .{}); } const int_info = ty.intInfo(zcu); const bits = int_info.bits; const wasm_bits = toWasmBits(bits) orelse { - return func.fail("TODO: Implement @bitReverse for integers with bitsize '{d}'", .{bits}); + return cg.fail("TODO: Implement @bitReverse for integers with bitsize '{d}'", .{bits}); }; switch (wasm_bits) { 32 => { - const intrin_ret = try func.callIntrinsic( - "__bitreversesi2", + const intrin_ret = try cg.callIntrinsic( + .__bitreversesi2, &.{.u32_type}, Type.u32, &.{operand}, @@ -5915,12 +5798,12 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = if (bits == 32) intrin_ret else - try func.binOp(intrin_ret, .{ .imm32 = 32 - bits }, ty, .shr); - return func.finishAir(inst, result, &.{ty_op.operand}); + try cg.binOp(intrin_ret, .{ .imm32 = 32 - bits }, ty, .shr); + return cg.finishAir(inst, result, &.{ty_op.operand}); }, 64 => { - const intrin_ret = try func.callIntrinsic( - "__bitreversedi2", + const intrin_ret = try cg.callIntrinsic( + .__bitreversedi2, &.{.u64_type}, Type.u64, &.{operand}, @@ -5928,68 +5811,63 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = if (bits == 64) intrin_ret else - try func.binOp(intrin_ret, .{ .imm64 = 64 - bits }, ty, .shr); - return func.finishAir(inst, result, &.{ty_op.operand}); + try cg.binOp(intrin_ret, .{ .imm64 = 64 - bits }, ty, .shr); + return cg.finishAir(inst, result, &.{ty_op.operand}); }, 128 => { - const result = try func.allocStack(ty); + const result = try cg.allocStack(ty); - try func.emitWValue(result); - const first_half = try func.load(operand, Type.u64, 8); - const intrin_ret_first = try func.callIntrinsic( - "__bitreversedi2", + try cg.emitWValue(result); + const first_half = try cg.load(operand, Type.u64, 8); + const intrin_ret_first = try cg.callIntrinsic( + .__bitreversedi2, &.{.u64_type}, Type.u64, &.{first_half}, ); - try func.emitWValue(intrin_ret_first); + try cg.emitWValue(intrin_ret_first); if (bits < 128) { - try func.emitWValue(.{ .imm64 = 128 - bits }); - try func.addTag(.i64_shr_u); + try cg.emitWValue(.{ .imm64 = 128 - bits }); + try cg.addTag(.i64_shr_u); } - try func.emitWValue(result); - const second_half = try func.load(operand, Type.u64, 0); - const intrin_ret_second = try func.callIntrinsic( - "__bitreversedi2", + try cg.emitWValue(result); + const second_half = try cg.load(operand, Type.u64, 0); + const intrin_ret_second = try cg.callIntrinsic( + .__bitreversedi2, &.{.u64_type}, Type.u64, &.{second_half}, ); - try func.emitWValue(intrin_ret_second); + try cg.emitWValue(intrin_ret_second); if (bits == 128) { - try func.store(.stack, .stack, Type.u64, result.offset() + 8); - try func.store(.stack, .stack, Type.u64, result.offset()); + try cg.store(.stack, .stack, Type.u64, result.offset() + 8); + try cg.store(.stack, .stack, Type.u64, result.offset()); } else { - var tmp = try func.allocLocal(Type.u64); - defer tmp.free(func); - try func.addLabel(.local_tee, tmp.local.value); - try func.emitWValue(.{ .imm64 = 128 - bits }); + var tmp = try cg.allocLocal(Type.u64); + defer tmp.free(cg); + try cg.addLocal(.local_tee, tmp.local.value); + try cg.emitWValue(.{ .imm64 = 128 - bits }); if (ty.isSignedInt(zcu)) { - try func.addTag(.i64_shr_s); + try cg.addTag(.i64_shr_s); } else { - try func.addTag(.i64_shr_u); + try cg.addTag(.i64_shr_u); } - try func.store(.stack, .stack, Type.u64, result.offset() + 8); - try func.addLabel(.local_get, tmp.local.value); - try func.emitWValue(.{ .imm64 = bits - 64 }); - try func.addTag(.i64_shl); - try func.addTag(.i64_or); - try func.store(.stack, .stack, Type.u64, result.offset()); + try cg.store(.stack, .stack, Type.u64, result.offset() + 8); + try cg.addLocal(.local_get, tmp.local.value); + try cg.emitWValue(.{ .imm64 = bits - 64 }); + try cg.addTag(.i64_shl); + try cg.addTag(.i64_or); + try cg.store(.stack, .stack, Type.u64, result.offset()); } - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); }, else => unreachable, } } -fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - - const operand = try func.resolveInst(un_op); - // First retrieve the symbol index to the error name table - // that will be used to emit a relocation for the pointer - // to the error name table. - // +fn airErrorName(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try cg.resolveInst(un_op); // Each entry to this table is a slice (ptr+len). // The operand in this instruction represents the index within this table. // This means to get the final name, we emit the base pointer and then perform @@ -5997,82 +5875,82 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // // As the names are global and the slice elements are constant, we do not have // to make a copy of the ptr+value but can point towards them directly. - const pt = func.pt; - const error_table_symbol = try func.bin_file.getErrorTableSymbol(pt); + const pt = cg.pt; const name_ty = Type.slice_const_u8_sentinel_0; const abi_size = name_ty.abiSize(pt.zcu); - const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation - try func.emitWValue(error_name_value); - try func.emitWValue(operand); - switch (func.arch()) { + cg.wasm.error_name_table_ref_count += 1; + + // Lowers to a i32.const or i64.const with the error table memory address. + try cg.addTag(.error_name_table_ref); + try cg.emitWValue(operand); + switch (cg.ptr_size) { .wasm32 => { - try func.addImm32(@intCast(abi_size)); - try func.addTag(.i32_mul); - try func.addTag(.i32_add); + try cg.addImm32(@intCast(abi_size)); + try cg.addTag(.i32_mul); + try cg.addTag(.i32_add); }, .wasm64 => { - try func.addImm64(abi_size); - try func.addTag(.i64_mul); - try func.addTag(.i64_add); + try cg.addImm64(abi_size); + try cg.addTag(.i64_mul); + try cg.addTag(.i64_add); }, - else => unreachable, } - return func.finishAir(inst, .stack, &.{un_op}); + return cg.finishAir(inst, .stack, &.{un_op}); } -fn airPtrSliceFieldPtr(func: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerError!void { - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const slice_ptr = try func.resolveInst(ty_op.operand); - const result = try func.buildPointerOffset(slice_ptr, offset, .new); - return func.finishAir(inst, result, &.{ty_op.operand}); +fn airPtrSliceFieldPtr(cg: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerError!void { + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const slice_ptr = try cg.resolveInst(ty_op.operand); + const result = try cg.buildPointerOffset(slice_ptr, offset, .new); + return cg.finishAir(inst, result, &.{ty_op.operand}); } /// NOTE: Allocates place for result on virtual stack, when integer size > 64 bits -fn intZeroValue(func: *CodeGen, ty: Type) InnerError!WValue { - const zcu = func.bin_file.base.comp.zcu.?; +fn intZeroValue(cg: *CodeGen, ty: Type) InnerError!WValue { + const zcu = cg.wasm.base.comp.zcu.?; const int_info = ty.intInfo(zcu); const wasm_bits = toWasmBits(int_info.bits) orelse { - return func.fail("TODO: Implement intZeroValue for integer bitsize: {d}", .{int_info.bits}); + return cg.fail("TODO: Implement intZeroValue for integer bitsize: {d}", .{int_info.bits}); }; switch (wasm_bits) { 32 => return .{ .imm32 = 0 }, 64 => return .{ .imm64 = 0 }, 128 => { - const result = try func.allocStack(ty); - try func.store(result, .{ .imm64 = 0 }, Type.u64, 0); - try func.store(result, .{ .imm64 = 0 }, Type.u64, 8); + const result = try cg.allocStack(ty); + try cg.store(result, .{ .imm64 = 0 }, Type.u64, 0); + try cg.store(result, .{ .imm64 = 0 }, Type.u64, 8); return result; }, else => unreachable, } } -fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { +fn airAddSubWithOverflow(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .add or op == .sub); - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs = try func.resolveInst(extra.lhs); - const rhs = try func.resolveInst(extra.rhs); - const ty = func.typeOf(extra.lhs); - const pt = func.pt; + const lhs = try cg.resolveInst(extra.lhs); + const rhs = try cg.resolveInst(extra.rhs); + const ty = cg.typeOf(extra.lhs); + const pt = cg.pt; const zcu = pt.zcu; if (ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); + return cg.fail("TODO: Implement overflow arithmetic for vectors", .{}); } const int_info = ty.intInfo(zcu); const is_signed = int_info.signedness == .signed; if (int_info.bits > 128) { - return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits}); + return cg.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits}); } - const op_result = try func.wrapBinOp(lhs, rhs, ty, op); - var op_tmp = try op_result.toLocal(func, ty); - defer op_tmp.free(func); + const op_result = try cg.wrapBinOp(lhs, rhs, ty, op); + var op_tmp = try op_result.toLocal(cg, ty); + defer op_tmp.free(cg); const cmp_op: std.math.CompareOperator = switch (op) { .add => .lt, @@ -6080,40 +5958,40 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro else => unreachable, }; const overflow_bit = if (is_signed) blk: { - const zero = try intZeroValue(func, ty); - const rhs_is_neg = try func.cmp(rhs, zero, ty, .lt); - const overflow_cmp = try func.cmp(op_tmp, lhs, ty, cmp_op); - break :blk try func.cmp(rhs_is_neg, overflow_cmp, Type.u1, .neq); - } else try func.cmp(op_tmp, lhs, ty, cmp_op); - var bit_tmp = try overflow_bit.toLocal(func, Type.u1); - defer bit_tmp.free(func); - - const result = try func.allocStack(func.typeOfIndex(inst)); + const zero = try intZeroValue(cg, ty); + const rhs_is_neg = try cg.cmp(rhs, zero, ty, .lt); + const overflow_cmp = try cg.cmp(op_tmp, lhs, ty, cmp_op); + break :blk try cg.cmp(rhs_is_neg, overflow_cmp, Type.u1, .neq); + } else try cg.cmp(op_tmp, lhs, ty, cmp_op); + var bit_tmp = try overflow_bit.toLocal(cg, Type.u1); + defer bit_tmp.free(cg); + + const result = try cg.allocStack(cg.typeOfIndex(inst)); const offset: u32 = @intCast(ty.abiSize(zcu)); - try func.store(result, op_tmp, ty, 0); - try func.store(result, bit_tmp, Type.u1, offset); + try cg.store(result, op_tmp, ty, 0); + try cg.store(result, bit_tmp, Type.u1, offset); - return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); + return cg.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); } -fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; +fn airShlWithOverflow(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs = try func.resolveInst(extra.lhs); - const rhs = try func.resolveInst(extra.rhs); - const ty = func.typeOf(extra.lhs); - const rhs_ty = func.typeOf(extra.rhs); + const lhs = try cg.resolveInst(extra.lhs); + const rhs = try cg.resolveInst(extra.rhs); + const ty = cg.typeOf(extra.lhs); + const rhs_ty = cg.typeOf(extra.rhs); if (ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); + return cg.fail("TODO: Implement overflow arithmetic for vectors", .{}); } const int_info = ty.intInfo(zcu); const wasm_bits = toWasmBits(int_info.bits) orelse { - return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits}); + return cg.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits}); }; // Ensure rhs is coerced to lhs as they must have the same WebAssembly types @@ -6121,50 +5999,50 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(zcu).bits).?; // If wasm_bits == 128, compiler-rt expects i32 for shift const rhs_final = if (wasm_bits != rhs_wasm_bits and wasm_bits == 64) blk: { - const rhs_casted = try func.intcast(rhs, rhs_ty, ty); - break :blk try rhs_casted.toLocal(func, ty); + const rhs_casted = try cg.intcast(rhs, rhs_ty, ty); + break :blk try rhs_casted.toLocal(cg, ty); } else rhs; - var shl = try (try func.wrapBinOp(lhs, rhs_final, ty, .shl)).toLocal(func, ty); - defer shl.free(func); + var shl = try (try cg.wrapBinOp(lhs, rhs_final, ty, .shl)).toLocal(cg, ty); + defer shl.free(cg); const overflow_bit = blk: { - const shr = try func.binOp(shl, rhs_final, ty, .shr); - break :blk try func.cmp(shr, lhs, ty, .neq); + const shr = try cg.binOp(shl, rhs_final, ty, .shr); + break :blk try cg.cmp(shr, lhs, ty, .neq); }; - var overflow_local = try overflow_bit.toLocal(func, Type.u1); - defer overflow_local.free(func); + var overflow_local = try overflow_bit.toLocal(cg, Type.u1); + defer overflow_local.free(cg); - const result = try func.allocStack(func.typeOfIndex(inst)); + const result = try cg.allocStack(cg.typeOfIndex(inst)); const offset: u32 = @intCast(ty.abiSize(zcu)); - try func.store(result, shl, ty, 0); - try func.store(result, overflow_local, Type.u1, offset); + try cg.store(result, shl, ty, 0); + try cg.store(result, overflow_local, Type.u1, offset); - return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); + return cg.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); } -fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; +fn airMulWithOverflow(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs = try func.resolveInst(extra.lhs); - const rhs = try func.resolveInst(extra.rhs); - const ty = func.typeOf(extra.lhs); - const pt = func.pt; + const lhs = try cg.resolveInst(extra.lhs); + const rhs = try cg.resolveInst(extra.rhs); + const ty = cg.typeOf(extra.lhs); + const pt = cg.pt; const zcu = pt.zcu; if (ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); + return cg.fail("TODO: Implement overflow arithmetic for vectors", .{}); } // We store the bit if it's overflowed or not in this. As it's zero-initialized // we only need to update it if an overflow (or underflow) occurred. - var overflow_bit = try func.ensureAllocLocal(Type.u1); - defer overflow_bit.free(func); + var overflow_bit = try cg.ensureAllocLocal(Type.u1); + defer overflow_bit.free(cg); const int_info = ty.intInfo(zcu); const wasm_bits = toWasmBits(int_info.bits) orelse { - return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits}); + return cg.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits}); }; const zero: WValue = switch (wasm_bits) { @@ -6176,248 +6054,250 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // for 32 bit integers we upcast it to a 64bit integer const mul = if (wasm_bits == 32) blk: { const new_ty = if (int_info.signedness == .signed) Type.i64 else Type.u64; - const lhs_upcast = try func.intcast(lhs, ty, new_ty); - const rhs_upcast = try func.intcast(rhs, ty, new_ty); - const bin_op = try (try func.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(func, new_ty); - const res = try (try func.trunc(bin_op, ty, new_ty)).toLocal(func, ty); - const res_upcast = try func.intcast(res, ty, new_ty); - _ = try func.cmp(res_upcast, bin_op, new_ty, .neq); - try func.addLabel(.local_set, overflow_bit.local.value); + const lhs_upcast = try cg.intcast(lhs, ty, new_ty); + const rhs_upcast = try cg.intcast(rhs, ty, new_ty); + const bin_op = try (try cg.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(cg, new_ty); + const res = try (try cg.trunc(bin_op, ty, new_ty)).toLocal(cg, ty); + const res_upcast = try cg.intcast(res, ty, new_ty); + _ = try cg.cmp(res_upcast, bin_op, new_ty, .neq); + try cg.addLocal(.local_set, overflow_bit.local.value); break :blk res; } else if (wasm_bits == 64) blk: { const new_ty = if (int_info.signedness == .signed) Type.i128 else Type.u128; - const lhs_upcast = try func.intcast(lhs, ty, new_ty); - const rhs_upcast = try func.intcast(rhs, ty, new_ty); - const bin_op = try (try func.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(func, new_ty); - const res = try (try func.trunc(bin_op, ty, new_ty)).toLocal(func, ty); - const res_upcast = try func.intcast(res, ty, new_ty); - _ = try func.cmp(res_upcast, bin_op, new_ty, .neq); - try func.addLabel(.local_set, overflow_bit.local.value); + const lhs_upcast = try cg.intcast(lhs, ty, new_ty); + const rhs_upcast = try cg.intcast(rhs, ty, new_ty); + const bin_op = try (try cg.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(cg, new_ty); + const res = try (try cg.trunc(bin_op, ty, new_ty)).toLocal(cg, ty); + const res_upcast = try cg.intcast(res, ty, new_ty); + _ = try cg.cmp(res_upcast, bin_op, new_ty, .neq); + try cg.addLocal(.local_set, overflow_bit.local.value); break :blk res; } else if (int_info.bits == 128 and int_info.signedness == .unsigned) blk: { - var lhs_lsb = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); - defer lhs_lsb.free(func); - var lhs_msb = try (try func.load(lhs, Type.u64, 8)).toLocal(func, Type.u64); - defer lhs_msb.free(func); - var rhs_lsb = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64); - defer rhs_lsb.free(func); - var rhs_msb = try (try func.load(rhs, Type.u64, 8)).toLocal(func, Type.u64); - defer rhs_msb.free(func); - - const cross_1 = try func.callIntrinsic( - "__multi3", + var lhs_lsb = try (try cg.load(lhs, Type.u64, 0)).toLocal(cg, Type.u64); + defer lhs_lsb.free(cg); + var lhs_msb = try (try cg.load(lhs, Type.u64, 8)).toLocal(cg, Type.u64); + defer lhs_msb.free(cg); + var rhs_lsb = try (try cg.load(rhs, Type.u64, 0)).toLocal(cg, Type.u64); + defer rhs_lsb.free(cg); + var rhs_msb = try (try cg.load(rhs, Type.u64, 8)).toLocal(cg, Type.u64); + defer rhs_msb.free(cg); + + const cross_1 = try cg.callIntrinsic( + .__multi3, &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ lhs_msb, zero, rhs_lsb, zero }, ); - const cross_2 = try func.callIntrinsic( - "__multi3", + const cross_2 = try cg.callIntrinsic( + .__multi3, &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ rhs_msb, zero, lhs_lsb, zero }, ); - const mul_lsb = try func.callIntrinsic( - "__multi3", + const mul_lsb = try cg.callIntrinsic( + .__multi3, &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ rhs_lsb, zero, lhs_lsb, zero }, ); - const rhs_msb_not_zero = try func.cmp(rhs_msb, zero, Type.u64, .neq); - const lhs_msb_not_zero = try func.cmp(lhs_msb, zero, Type.u64, .neq); - const both_msb_not_zero = try func.binOp(rhs_msb_not_zero, lhs_msb_not_zero, Type.bool, .@"and"); - const cross_1_msb = try func.load(cross_1, Type.u64, 8); - const cross_1_msb_not_zero = try func.cmp(cross_1_msb, zero, Type.u64, .neq); - const cond_1 = try func.binOp(both_msb_not_zero, cross_1_msb_not_zero, Type.bool, .@"or"); - const cross_2_msb = try func.load(cross_2, Type.u64, 8); - const cross_2_msb_not_zero = try func.cmp(cross_2_msb, zero, Type.u64, .neq); - const cond_2 = try func.binOp(cond_1, cross_2_msb_not_zero, Type.bool, .@"or"); - - const cross_1_lsb = try func.load(cross_1, Type.u64, 0); - const cross_2_lsb = try func.load(cross_2, Type.u64, 0); - const cross_add = try func.binOp(cross_1_lsb, cross_2_lsb, Type.u64, .add); - - var mul_lsb_msb = try (try func.load(mul_lsb, Type.u64, 8)).toLocal(func, Type.u64); - defer mul_lsb_msb.free(func); - var all_add = try (try func.binOp(cross_add, mul_lsb_msb, Type.u64, .add)).toLocal(func, Type.u64); - defer all_add.free(func); - const add_overflow = try func.cmp(all_add, mul_lsb_msb, Type.u64, .lt); + const rhs_msb_not_zero = try cg.cmp(rhs_msb, zero, Type.u64, .neq); + const lhs_msb_not_zero = try cg.cmp(lhs_msb, zero, Type.u64, .neq); + const both_msb_not_zero = try cg.binOp(rhs_msb_not_zero, lhs_msb_not_zero, Type.bool, .@"and"); + const cross_1_msb = try cg.load(cross_1, Type.u64, 8); + const cross_1_msb_not_zero = try cg.cmp(cross_1_msb, zero, Type.u64, .neq); + const cond_1 = try cg.binOp(both_msb_not_zero, cross_1_msb_not_zero, Type.bool, .@"or"); + const cross_2_msb = try cg.load(cross_2, Type.u64, 8); + const cross_2_msb_not_zero = try cg.cmp(cross_2_msb, zero, Type.u64, .neq); + const cond_2 = try cg.binOp(cond_1, cross_2_msb_not_zero, Type.bool, .@"or"); + + const cross_1_lsb = try cg.load(cross_1, Type.u64, 0); + const cross_2_lsb = try cg.load(cross_2, Type.u64, 0); + const cross_add = try cg.binOp(cross_1_lsb, cross_2_lsb, Type.u64, .add); + + var mul_lsb_msb = try (try cg.load(mul_lsb, Type.u64, 8)).toLocal(cg, Type.u64); + defer mul_lsb_msb.free(cg); + var all_add = try (try cg.binOp(cross_add, mul_lsb_msb, Type.u64, .add)).toLocal(cg, Type.u64); + defer all_add.free(cg); + const add_overflow = try cg.cmp(all_add, mul_lsb_msb, Type.u64, .lt); // result for overflow bit - _ = try func.binOp(cond_2, add_overflow, Type.bool, .@"or"); - try func.addLabel(.local_set, overflow_bit.local.value); - - const tmp_result = try func.allocStack(Type.u128); - try func.emitWValue(tmp_result); - const mul_lsb_lsb = try func.load(mul_lsb, Type.u64, 0); - try func.store(.stack, mul_lsb_lsb, Type.u64, tmp_result.offset()); - try func.store(tmp_result, all_add, Type.u64, 8); + _ = try cg.binOp(cond_2, add_overflow, Type.bool, .@"or"); + try cg.addLocal(.local_set, overflow_bit.local.value); + + const tmp_result = try cg.allocStack(Type.u128); + try cg.emitWValue(tmp_result); + const mul_lsb_lsb = try cg.load(mul_lsb, Type.u64, 0); + try cg.store(.stack, mul_lsb_lsb, Type.u64, tmp_result.offset()); + try cg.store(tmp_result, all_add, Type.u64, 8); break :blk tmp_result; } else if (int_info.bits == 128 and int_info.signedness == .signed) blk: { - const overflow_ret = try func.allocStack(Type.i32); - const res = try func.callIntrinsic( - "__muloti4", + const overflow_ret = try cg.allocStack(Type.i32); + const res = try cg.callIntrinsic( + .__muloti4, &[_]InternPool.Index{ .i128_type, .i128_type, .usize_type }, Type.i128, &.{ lhs, rhs, overflow_ret }, ); - _ = try func.load(overflow_ret, Type.i32, 0); - try func.addLabel(.local_set, overflow_bit.local.value); + _ = try cg.load(overflow_ret, Type.i32, 0); + try cg.addLocal(.local_set, overflow_bit.local.value); break :blk res; - } else return func.fail("TODO: @mulWithOverflow for {}", .{ty.fmt(pt)}); - var bin_op_local = try mul.toLocal(func, ty); - defer bin_op_local.free(func); + } else return cg.fail("TODO: @mulWithOverflow for {}", .{ty.fmt(pt)}); + var bin_op_local = try mul.toLocal(cg, ty); + defer bin_op_local.free(cg); - const result = try func.allocStack(func.typeOfIndex(inst)); + const result = try cg.allocStack(cg.typeOfIndex(inst)); const offset: u32 = @intCast(ty.abiSize(zcu)); - try func.store(result, bin_op_local, ty, 0); - try func.store(result, overflow_bit, Type.u1, offset); + try cg.store(result, bin_op_local, ty, 0); + try cg.store(result, overflow_bit, Type.u1, offset); - return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); + return cg.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); } -fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { - assert(op == .max or op == .min); - const pt = func.pt; - const zcu = pt.zcu; - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airMaxMin( + cg: *CodeGen, + inst: Air.Inst.Index, + op: enum { fmax, fmin }, + cmp_op: std.math.CompareOperator, +) InnerError!void { + const zcu = cg.pt.zcu; + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const ty = func.typeOfIndex(inst); + const ty = cg.typeOfIndex(inst); if (ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{}); + return cg.fail("TODO: `@maximum` and `@minimum` for vectors", .{}); } if (ty.abiSize(zcu) > 16) { - return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{}); + return cg.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{}); } - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); if (ty.zigTypeTag(zcu) == .float) { - var fn_name_buf: [64]u8 = undefined; - const float_bits = ty.floatBits(func.target.*); - const fn_name = std.fmt.bufPrint(&fn_name_buf, "{s}f{s}{s}", .{ - target_util.libcFloatPrefix(float_bits), - @tagName(op), - target_util.libcFloatSuffix(float_bits), - }) catch unreachable; - const result = try func.callIntrinsic(fn_name, &.{ ty.ip_index, ty.ip_index }, ty, &.{ lhs, rhs }); - try func.lowerToStack(result); + const intrinsic = switch (op) { + inline .fmin, .fmax => |ct_op| switch (ty.floatBits(cg.target.*)) { + inline 16, 32, 64, 80, 128 => |bits| @field( + Mir.Intrinsic, + libcFloatPrefix(bits) ++ @tagName(ct_op) ++ libcFloatSuffix(bits), + ), + else => unreachable, + }, + }; + const result = try cg.callIntrinsic(intrinsic, &.{ ty.ip_index, ty.ip_index }, ty, &.{ lhs, rhs }); + try cg.lowerToStack(result); } else { // operands to select from - try func.lowerToStack(lhs); - try func.lowerToStack(rhs); - _ = try func.cmp(lhs, rhs, ty, if (op == .max) .gt else .lt); + try cg.lowerToStack(lhs); + try cg.lowerToStack(rhs); + _ = try cg.cmp(lhs, rhs, ty, cmp_op); // based on the result from comparison, return operand 0 or 1. - try func.addTag(.select); + try cg.addTag(.select); } - return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); } -fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data; +fn airMulAdd(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const bin_op = cg.air.extraData(Air.Bin, pl_op.payload).data; - const ty = func.typeOfIndex(inst); + const ty = cg.typeOfIndex(inst); if (ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: `@mulAdd` for vectors", .{}); + return cg.fail("TODO: `@mulAdd` for vectors", .{}); } - const addend = try func.resolveInst(pl_op.operand); - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); + const addend = try cg.resolveInst(pl_op.operand); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); - const result = if (ty.floatBits(func.target.*) == 16) fl_result: { - const rhs_ext = try func.fpext(rhs, ty, Type.f32); - const lhs_ext = try func.fpext(lhs, ty, Type.f32); - const addend_ext = try func.fpext(addend, ty, Type.f32); + const result = if (ty.floatBits(cg.target.*) == 16) fl_result: { + const rhs_ext = try cg.fpext(rhs, ty, Type.f32); + const lhs_ext = try cg.fpext(lhs, ty, Type.f32); + const addend_ext = try cg.fpext(addend, ty, Type.f32); // call to compiler-rt `fn fmaf(f32, f32, f32) f32` - const result = try func.callIntrinsic( - "fmaf", + const result = try cg.callIntrinsic( + .fmaf, &.{ .f32_type, .f32_type, .f32_type }, Type.f32, &.{ rhs_ext, lhs_ext, addend_ext }, ); - break :fl_result try func.fptrunc(result, Type.f32, ty); + break :fl_result try cg.fptrunc(result, Type.f32, ty); } else result: { - const mul_result = try func.binOp(lhs, rhs, ty, .mul); - break :result try func.binOp(mul_result, addend, ty, .add); + const mul_result = try cg.binOp(lhs, rhs, ty, .mul); + break :result try cg.binOp(mul_result, addend, ty, .add); }; - return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand }); + return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand }); } -fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airClz(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const ty = func.typeOf(ty_op.operand); + const ty = cg.typeOf(ty_op.operand); if (ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: `@clz` for vectors", .{}); + return cg.fail("TODO: `@clz` for vectors", .{}); } - const operand = try func.resolveInst(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); const int_info = ty.intInfo(zcu); const wasm_bits = toWasmBits(int_info.bits) orelse { - return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); + return cg.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; switch (wasm_bits) { 32 => { - try func.emitWValue(operand); - try func.addTag(.i32_clz); + try cg.emitWValue(operand); + try cg.addTag(.i32_clz); }, 64 => { - try func.emitWValue(operand); - try func.addTag(.i64_clz); - try func.addTag(.i32_wrap_i64); + try cg.emitWValue(operand); + try cg.addTag(.i64_clz); + try cg.addTag(.i32_wrap_i64); }, 128 => { - var msb = try (try func.load(operand, Type.u64, 8)).toLocal(func, Type.u64); - defer msb.free(func); - - try func.emitWValue(msb); - try func.addTag(.i64_clz); - _ = try func.load(operand, Type.u64, 0); - try func.addTag(.i64_clz); - try func.emitWValue(.{ .imm64 = 64 }); - try func.addTag(.i64_add); - _ = try func.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq); - try func.addTag(.select); - try func.addTag(.i32_wrap_i64); + var msb = try (try cg.load(operand, Type.u64, 8)).toLocal(cg, Type.u64); + defer msb.free(cg); + + try cg.emitWValue(msb); + try cg.addTag(.i64_clz); + _ = try cg.load(operand, Type.u64, 0); + try cg.addTag(.i64_clz); + try cg.emitWValue(.{ .imm64 = 64 }); + try cg.addTag(.i64_add); + _ = try cg.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq); + try cg.addTag(.select); + try cg.addTag(.i32_wrap_i64); }, else => unreachable, } if (wasm_bits != int_info.bits) { - try func.emitWValue(.{ .imm32 = wasm_bits - int_info.bits }); - try func.addTag(.i32_sub); + try cg.emitWValue(.{ .imm32 = wasm_bits - int_info.bits }); + try cg.addTag(.i32_sub); } - return func.finishAir(inst, .stack, &.{ty_op.operand}); + return cg.finishAir(inst, .stack, &.{ty_op.operand}); } -fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airCtz(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const ty = func.typeOf(ty_op.operand); + const ty = cg.typeOf(ty_op.operand); if (ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: `@ctz` for vectors", .{}); + return cg.fail("TODO: `@ctz` for vectors", .{}); } - const operand = try func.resolveInst(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); const int_info = ty.intInfo(zcu); const wasm_bits = toWasmBits(int_info.bits) orelse { - return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); + return cg.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; switch (wasm_bits) { @@ -6425,131 +6305,108 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (wasm_bits != int_info.bits) { const val: u32 = @as(u32, 1) << @as(u5, @intCast(int_info.bits)); // leave value on the stack - _ = try func.binOp(operand, .{ .imm32 = val }, ty, .@"or"); - } else try func.emitWValue(operand); - try func.addTag(.i32_ctz); + _ = try cg.binOp(operand, .{ .imm32 = val }, ty, .@"or"); + } else try cg.emitWValue(operand); + try cg.addTag(.i32_ctz); }, 64 => { if (wasm_bits != int_info.bits) { const val: u64 = @as(u64, 1) << @as(u6, @intCast(int_info.bits)); // leave value on the stack - _ = try func.binOp(operand, .{ .imm64 = val }, ty, .@"or"); - } else try func.emitWValue(operand); - try func.addTag(.i64_ctz); - try func.addTag(.i32_wrap_i64); + _ = try cg.binOp(operand, .{ .imm64 = val }, ty, .@"or"); + } else try cg.emitWValue(operand); + try cg.addTag(.i64_ctz); + try cg.addTag(.i32_wrap_i64); }, 128 => { - var lsb = try (try func.load(operand, Type.u64, 0)).toLocal(func, Type.u64); - defer lsb.free(func); + var lsb = try (try cg.load(operand, Type.u64, 0)).toLocal(cg, Type.u64); + defer lsb.free(cg); - try func.emitWValue(lsb); - try func.addTag(.i64_ctz); - _ = try func.load(operand, Type.u64, 8); + try cg.emitWValue(lsb); + try cg.addTag(.i64_ctz); + _ = try cg.load(operand, Type.u64, 8); if (wasm_bits != int_info.bits) { - try func.addImm64(@as(u64, 1) << @as(u6, @intCast(int_info.bits - 64))); - try func.addTag(.i64_or); + try cg.addImm64(@as(u64, 1) << @as(u6, @intCast(int_info.bits - 64))); + try cg.addTag(.i64_or); } - try func.addTag(.i64_ctz); - try func.addImm64(64); + try cg.addTag(.i64_ctz); + try cg.addImm64(64); if (wasm_bits != int_info.bits) { - try func.addTag(.i64_or); + try cg.addTag(.i64_or); } else { - try func.addTag(.i64_add); + try cg.addTag(.i64_add); } - _ = try func.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq); - try func.addTag(.select); - try func.addTag(.i32_wrap_i64); + _ = try cg.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq); + try cg.addTag(.select); + try cg.addTag(.i32_wrap_i64); }, else => unreachable, } - return func.finishAir(inst, .stack, &.{ty_op.operand}); + return cg.finishAir(inst, .stack, &.{ty_op.operand}); } -fn airDbgStmt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{}); - - const dbg_stmt = func.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; - try func.addInst(.{ .tag = .dbg_line, .data = .{ - .payload = try func.addExtra(Mir.DbgLineColumn{ +fn airDbgStmt(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const dbg_stmt = cg.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; + try cg.addInst(.{ .tag = .dbg_line, .data = .{ + .payload = try cg.addExtra(Mir.DbgLineColumn{ .line = dbg_stmt.line, .column = dbg_stmt.column, }), } }); - return func.finishAir(inst, .none, &.{}); + return cg.finishAir(inst, .none, &.{}); } -fn airDbgInlineBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = func.air.extraData(Air.DbgInlineBlock, ty_pl.payload); +fn airDbgInlineBlock(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = cg.air.extraData(Air.DbgInlineBlock, ty_pl.payload); // TODO - try func.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len])); + try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len])); } fn airDbgVar( - func: *CodeGen, + cg: *CodeGen, inst: Air.Inst.Index, local_tag: link.File.Dwarf.WipNav.LocalTag, is_ptr: bool, ) InnerError!void { _ = is_ptr; - if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{}); - - const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const ty = func.typeOf(pl_op.operand); - const operand = try func.resolveInst(pl_op.operand); - - log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), operand }); - - const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload); - log.debug(" var name = ({s})", .{name.toSlice(func.air)}); - - const loc: link.File.Dwarf.Loc = switch (operand) { - .local => |local| .{ .wasm_ext = .{ .local = local.value } }, - else => blk: { - log.debug("TODO generate debug info for {}", .{operand}); - break :blk .empty; - }, - }; - try func.debug_output.dwarf.genLocalDebugInfo(local_tag, name.toSlice(func.air), ty, loc); - - return func.finishAir(inst, .none, &.{}); + _ = local_tag; + return cg.finishAir(inst, .none, &.{}); } -fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const err_union = try func.resolveInst(pl_op.operand); - const extra = func.air.extraData(Air.Try, pl_op.payload); - const body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]); - const err_union_ty = func.typeOf(pl_op.operand); - const result = try lowerTry(func, inst, err_union, body, err_union_ty, false); - return func.finishAir(inst, result, &.{pl_op.operand}); +fn airTry(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const err_union = try cg.resolveInst(pl_op.operand); + const extra = cg.air.extraData(Air.Try, pl_op.payload); + const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]); + const err_union_ty = cg.typeOf(pl_op.operand); + const result = try lowerTry(cg, inst, err_union, body, err_union_ty, false); + return cg.finishAir(inst, result, &.{pl_op.operand}); } -fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = func.air.extraData(Air.TryPtr, ty_pl.payload); - const err_union_ptr = try func.resolveInst(extra.data.ptr); - const body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]); - const err_union_ty = func.typeOf(extra.data.ptr).childType(zcu); - const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true); - return func.finishAir(inst, result, &.{extra.data.ptr}); +fn airTryPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = cg.air.extraData(Air.TryPtr, ty_pl.payload); + const err_union_ptr = try cg.resolveInst(extra.data.ptr); + const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]); + const err_union_ty = cg.typeOf(extra.data.ptr).childType(zcu); + const result = try lowerTry(cg, inst, err_union_ptr, body, err_union_ty, true); + return cg.finishAir(inst, result, &.{extra.data.ptr}); } fn lowerTry( - func: *CodeGen, + cg: *CodeGen, inst: Air.Inst.Index, err_union: WValue, body: []const Air.Inst.Index, err_union_ty: Type, operand_is_ptr: bool, ) InnerError!WValue { - const pt = func.pt; - const zcu = pt.zcu; + const zcu = cg.pt.zcu; if (operand_is_ptr) { - return func.fail("TODO: lowerTry for pointers", .{}); + return cg.fail("TODO: lowerTry for pointers", .{}); } const pl_ty = err_union_ty.errorUnionPayload(zcu); @@ -6557,29 +6414,29 @@ fn lowerTry( if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { // Block we can jump out of when error is not set - try func.startBlock(.block, wasm.block_empty); + try cg.startBlock(.block, .empty); // check if the error tag is set for the error union. - try func.emitWValue(err_union); + try cg.emitWValue(err_union); if (pl_has_bits) { const err_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu)); - try func.addMemArg(.i32_load16_u, .{ + try cg.addMemArg(.i32_load16_u, .{ .offset = err_union.offset() + err_offset, .alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?), }); } - try func.addTag(.i32_eqz); - try func.addLabel(.br_if, 0); // jump out of block when error is '0' + try cg.addTag(.i32_eqz); + try cg.addLabel(.br_if, 0); // jump out of block when error is '0' - const liveness = func.liveness.getCondBr(inst); - try func.branches.append(func.gpa, .{}); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.else_deaths.len + liveness.then_deaths.len); + const liveness = cg.liveness.getCondBr(inst); + try cg.branches.append(cg.gpa, .{}); + try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.else_deaths.len + liveness.then_deaths.len); defer { - var branch = func.branches.pop(); - branch.deinit(func.gpa); + var branch = cg.branches.pop(); + branch.deinit(cg.gpa); } - try func.genBody(body); - try func.endBlock(); + try cg.genBody(body); + try cg.endBlock(); } // if we reach here it means error was not set, and we want the payload @@ -6588,39 +6445,38 @@ fn lowerTry( } const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, zcu)); - if (isByRef(pl_ty, pt, func.target.*)) { - return buildPointerOffset(func, err_union, pl_offset, .new); + if (isByRef(pl_ty, zcu, cg.target)) { + return buildPointerOffset(cg, err_union, pl_offset, .new); } - const payload = try func.load(err_union, pl_ty, pl_offset); - return payload.toLocal(func, pl_ty); + const payload = try cg.load(err_union, pl_ty, pl_offset); + return payload.toLocal(cg, pl_ty); } -fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airByteSwap(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const ty = func.typeOfIndex(inst); - const operand = try func.resolveInst(ty_op.operand); + const ty = cg.typeOfIndex(inst); + const operand = try cg.resolveInst(ty_op.operand); if (ty.zigTypeTag(zcu) == .vector) { - return func.fail("TODO: @byteSwap for vectors", .{}); + return cg.fail("TODO: @byteSwap for vectors", .{}); } const int_info = ty.intInfo(zcu); const wasm_bits = toWasmBits(int_info.bits) orelse { - return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}); + return cg.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}); }; // bytes are no-op if (int_info.bits == 8) { - return func.finishAir(inst, func.reuseOperand(ty_op.operand, operand), &.{ty_op.operand}); + return cg.finishAir(inst, cg.reuseOperand(ty_op.operand, operand), &.{ty_op.operand}); } const result = result: { switch (wasm_bits) { 32 => { - const intrin_ret = try func.callIntrinsic( - "__bswapsi2", + const intrin_ret = try cg.callIntrinsic( + .__bswapsi2, &.{.u32_type}, Type.u32, &.{operand}, @@ -6628,11 +6484,11 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result if (int_info.bits == 32) intrin_ret else - try func.binOp(intrin_ret, .{ .imm32 = 32 - int_info.bits }, ty, .shr); + try cg.binOp(intrin_ret, .{ .imm32 = 32 - int_info.bits }, ty, .shr); }, 64 => { - const intrin_ret = try func.callIntrinsic( - "__bswapdi2", + const intrin_ret = try cg.callIntrinsic( + .__bswapdi2, &.{.u64_type}, Type.u64, &.{operand}, @@ -6640,61 +6496,60 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result if (int_info.bits == 64) intrin_ret else - try func.binOp(intrin_ret, .{ .imm64 = 64 - int_info.bits }, ty, .shr); + try cg.binOp(intrin_ret, .{ .imm64 = 64 - int_info.bits }, ty, .shr); }, - else => return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}), + else => return cg.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}), } }; - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airDiv(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const ty = func.typeOfIndex(inst); - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); + const ty = cg.typeOfIndex(inst); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); - const result = try func.binOp(lhs, rhs, ty, .div); - return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); + const result = try cg.binOp(lhs, rhs, ty, .div); + return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } -fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airDivTrunc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const ty = func.typeOfIndex(inst); - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); + const ty = cg.typeOfIndex(inst); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); - const div_result = try func.binOp(lhs, rhs, ty, .div); + const div_result = try cg.binOp(lhs, rhs, ty, .div); if (ty.isAnyFloat()) { - const trunc_result = try func.floatOp(.trunc, ty, &.{div_result}); - return func.finishAir(inst, trunc_result, &.{ bin_op.lhs, bin_op.rhs }); + const trunc_result = try cg.floatOp(.trunc, ty, &.{div_result}); + return cg.finishAir(inst, trunc_result, &.{ bin_op.lhs, bin_op.rhs }); } - return func.finishAir(inst, div_result, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, div_result, &.{ bin_op.lhs, bin_op.rhs }); } -fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airDivFloor(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const pt = func.pt; - const zcu = pt.zcu; - const ty = func.typeOfIndex(inst); - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); + const zcu = cg.pt.zcu; + const ty = cg.typeOfIndex(inst); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); if (ty.isUnsignedInt(zcu)) { - _ = try func.binOp(lhs, rhs, ty, .div); + _ = try cg.binOp(lhs, rhs, ty, .div); } else if (ty.isSignedInt(zcu)) { const int_bits = ty.intInfo(zcu).bits; const wasm_bits = toWasmBits(int_bits) orelse { - return func.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); + return cg.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); }; if (wasm_bits > 64) { - return func.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); + return cg.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); } const zero: WValue = switch (wasm_bits) { @@ -6704,108 +6559,108 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; // tee leaves the value on the stack and stores it in a local. - const quotient = try func.allocLocal(ty); - _ = try func.binOp(lhs, rhs, ty, .div); - try func.addLabel(.local_tee, quotient.local.value); + const quotient = try cg.allocLocal(ty); + _ = try cg.binOp(lhs, rhs, ty, .div); + try cg.addLocal(.local_tee, quotient.local.value); // select takes a 32 bit value as the condition, so in the 64 bit case we use eqz to narrow // the 64 bit value we want to use as the condition to 32 bits. // This also inverts the condition (non 0 => 0, 0 => 1), so we put the adjusted and // non-adjusted quotients on the stack in the opposite order for 32 vs 64 bits. if (wasm_bits == 64) { - try func.emitWValue(quotient); + try cg.emitWValue(quotient); } // 0 if the signs of rhs_wasm and lhs_wasm are the same, 1 otherwise. - _ = try func.binOp(lhs, rhs, ty, .xor); - _ = try func.cmp(.stack, zero, ty, .lt); + _ = try cg.binOp(lhs, rhs, ty, .xor); + _ = try cg.cmp(.stack, zero, ty, .lt); switch (wasm_bits) { 32 => { - try func.addTag(.i32_sub); - try func.emitWValue(quotient); + try cg.addTag(.i32_sub); + try cg.emitWValue(quotient); }, 64 => { - try func.addTag(.i64_extend_i32_u); - try func.addTag(.i64_sub); + try cg.addTag(.i64_extend_i32_u); + try cg.addTag(.i64_sub); }, else => unreachable, } - _ = try func.binOp(lhs, rhs, ty, .rem); + _ = try cg.binOp(lhs, rhs, ty, .rem); if (wasm_bits == 64) { - try func.addTag(.i64_eqz); + try cg.addTag(.i64_eqz); } - try func.addTag(.select); + try cg.addTag(.select); // We need to zero the high bits because N bit comparisons consider all 32 or 64 bits, and // expect all but the lowest N bits to be 0. // TODO: Should we be zeroing the high bits here or should we be ignoring the high bits // when performing comparisons? if (int_bits != wasm_bits) { - _ = try func.wrapOperand(.stack, ty); + _ = try cg.wrapOperand(.stack, ty); } } else { - const float_bits = ty.floatBits(func.target.*); + const float_bits = ty.floatBits(cg.target.*); if (float_bits > 64) { - return func.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits}); + return cg.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits}); } const is_f16 = float_bits == 16; - const lhs_wasm = if (is_f16) try func.fpext(lhs, Type.f16, Type.f32) else lhs; - const rhs_wasm = if (is_f16) try func.fpext(rhs, Type.f16, Type.f32) else rhs; + const lhs_wasm = if (is_f16) try cg.fpext(lhs, Type.f16, Type.f32) else lhs; + const rhs_wasm = if (is_f16) try cg.fpext(rhs, Type.f16, Type.f32) else rhs; - try func.emitWValue(lhs_wasm); - try func.emitWValue(rhs_wasm); + try cg.emitWValue(lhs_wasm); + try cg.emitWValue(rhs_wasm); switch (float_bits) { 16, 32 => { - try func.addTag(.f32_div); - try func.addTag(.f32_floor); + try cg.addTag(.f32_div); + try cg.addTag(.f32_floor); }, 64 => { - try func.addTag(.f64_div); - try func.addTag(.f64_floor); + try cg.addTag(.f64_div); + try cg.addTag(.f64_floor); }, else => unreachable, } if (is_f16) { - _ = try func.fptrunc(.stack, Type.f32, Type.f16); + _ = try cg.fptrunc(.stack, Type.f32, Type.f16); } } - return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); } -fn airRem(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airRem(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const ty = func.typeOfIndex(inst); - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); + const ty = cg.typeOfIndex(inst); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); - const result = try func.binOp(lhs, rhs, ty, .rem); + const result = try cg.binOp(lhs, rhs, ty, .rem); - return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } /// Remainder after floor division, defined by: /// @divFloor(a, b) * b + @mod(a, b) = a -fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airMod(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const pt = func.pt; + const pt = cg.pt; const zcu = pt.zcu; - const ty = func.typeOfIndex(inst); - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); + const ty = cg.typeOfIndex(inst); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); const result = result: { if (ty.isUnsignedInt(zcu)) { - break :result try func.binOp(lhs, rhs, ty, .rem); + break :result try cg.binOp(lhs, rhs, ty, .rem); } if (ty.isSignedInt(zcu)) { // The wasm rem instruction gives the remainder after truncating division (rounding towards @@ -6814,153 +6669,152 @@ fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // @mod(a, b) = @rem(@rem(a, b) + b, b) const int_bits = ty.intInfo(zcu).bits; const wasm_bits = toWasmBits(int_bits) orelse { - return func.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); + return cg.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); }; if (wasm_bits > 64) { - return func.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); + return cg.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); } - _ = try func.binOp(lhs, rhs, ty, .rem); - _ = try func.binOp(.stack, rhs, ty, .add); - break :result try func.binOp(.stack, rhs, ty, .rem); + _ = try cg.binOp(lhs, rhs, ty, .rem); + _ = try cg.binOp(.stack, rhs, ty, .add); + break :result try cg.binOp(.stack, rhs, ty, .rem); } if (ty.isAnyFloat()) { - const rem = try func.binOp(lhs, rhs, ty, .rem); - const add = try func.binOp(rem, rhs, ty, .add); - break :result try func.binOp(add, rhs, ty, .rem); + const rem = try cg.binOp(lhs, rhs, ty, .rem); + const add = try cg.binOp(rem, rhs, ty, .add); + break :result try cg.binOp(add, rhs, ty, .rem); } - return func.fail("TODO: @mod for {}", .{ty.fmt(pt)}); + return cg.fail("TODO: @mod for {}", .{ty.fmt(pt)}); }; - return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } -fn airSatMul(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const pt = func.pt; + const pt = cg.pt; const zcu = pt.zcu; - const ty = func.typeOfIndex(inst); + const ty = cg.typeOfIndex(inst); const int_info = ty.intInfo(zcu); const is_signed = int_info.signedness == .signed; - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); const wasm_bits = toWasmBits(int_info.bits) orelse { - return func.fail("TODO: mul_sat for {}", .{ty.fmt(pt)}); + return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)}); }; switch (wasm_bits) { 32 => { const upcast_ty: Type = if (is_signed) Type.i64 else Type.u64; - const lhs_up = try func.intcast(lhs, ty, upcast_ty); - const rhs_up = try func.intcast(rhs, ty, upcast_ty); - var mul_res = try (try func.binOp(lhs_up, rhs_up, upcast_ty, .mul)).toLocal(func, upcast_ty); - defer mul_res.free(func); + const lhs_up = try cg.intcast(lhs, ty, upcast_ty); + const rhs_up = try cg.intcast(rhs, ty, upcast_ty); + var mul_res = try (try cg.binOp(lhs_up, rhs_up, upcast_ty, .mul)).toLocal(cg, upcast_ty); + defer mul_res.free(cg); if (is_signed) { const imm_max: WValue = .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - (int_info.bits - 1)) }; - try func.emitWValue(mul_res); - try func.emitWValue(imm_max); - _ = try func.cmp(mul_res, imm_max, upcast_ty, .lt); - try func.addTag(.select); + try cg.emitWValue(mul_res); + try cg.emitWValue(imm_max); + _ = try cg.cmp(mul_res, imm_max, upcast_ty, .lt); + try cg.addTag(.select); - var tmp = try func.allocLocal(upcast_ty); - defer tmp.free(func); - try func.addLabel(.local_set, tmp.local.value); + var tmp = try cg.allocLocal(upcast_ty); + defer tmp.free(cg); + try cg.addLocal(.local_set, tmp.local.value); const imm_min: WValue = .{ .imm64 = ~@as(u64, 0) << @intCast(int_info.bits - 1) }; - try func.emitWValue(tmp); - try func.emitWValue(imm_min); - _ = try func.cmp(tmp, imm_min, upcast_ty, .gt); - try func.addTag(.select); + try cg.emitWValue(tmp); + try cg.emitWValue(imm_min); + _ = try cg.cmp(tmp, imm_min, upcast_ty, .gt); + try cg.addTag(.select); } else { const imm_max: WValue = .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - int_info.bits) }; - try func.emitWValue(mul_res); - try func.emitWValue(imm_max); - _ = try func.cmp(mul_res, imm_max, upcast_ty, .lt); - try func.addTag(.select); + try cg.emitWValue(mul_res); + try cg.emitWValue(imm_max); + _ = try cg.cmp(mul_res, imm_max, upcast_ty, .lt); + try cg.addTag(.select); } - try func.addTag(.i32_wrap_i64); + try cg.addTag(.i32_wrap_i64); }, 64 => { if (!(int_info.bits == 64 and int_info.signedness == .signed)) { - return func.fail("TODO: mul_sat for {}", .{ty.fmt(pt)}); + return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)}); } - const overflow_ret = try func.allocStack(Type.i32); - _ = try func.callIntrinsic( - "__mulodi4", + const overflow_ret = try cg.allocStack(Type.i32); + _ = try cg.callIntrinsic( + .__mulodi4, &[_]InternPool.Index{ .i64_type, .i64_type, .usize_type }, Type.i64, &.{ lhs, rhs, overflow_ret }, ); - const xor = try func.binOp(lhs, rhs, Type.i64, .xor); - const sign_v = try func.binOp(xor, .{ .imm64 = 63 }, Type.i64, .shr); - _ = try func.binOp(sign_v, .{ .imm64 = ~@as(u63, 0) }, Type.i64, .xor); - _ = try func.load(overflow_ret, Type.i32, 0); - try func.addTag(.i32_eqz); - try func.addTag(.select); + const xor = try cg.binOp(lhs, rhs, Type.i64, .xor); + const sign_v = try cg.binOp(xor, .{ .imm64 = 63 }, Type.i64, .shr); + _ = try cg.binOp(sign_v, .{ .imm64 = ~@as(u63, 0) }, Type.i64, .xor); + _ = try cg.load(overflow_ret, Type.i32, 0); + try cg.addTag(.i32_eqz); + try cg.addTag(.select); }, 128 => { if (!(int_info.bits == 128 and int_info.signedness == .signed)) { - return func.fail("TODO: mul_sat for {}", .{ty.fmt(pt)}); + return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)}); } - const overflow_ret = try func.allocStack(Type.i32); - const ret = try func.callIntrinsic( - "__muloti4", + const overflow_ret = try cg.allocStack(Type.i32); + const ret = try cg.callIntrinsic( + .__muloti4, &[_]InternPool.Index{ .i128_type, .i128_type, .usize_type }, Type.i128, &.{ lhs, rhs, overflow_ret }, ); - try func.lowerToStack(ret); - const xor = try func.binOp(lhs, rhs, Type.i128, .xor); - const sign_v = try func.binOp(xor, .{ .imm32 = 127 }, Type.i128, .shr); + try cg.lowerToStack(ret); + const xor = try cg.binOp(lhs, rhs, Type.i128, .xor); + const sign_v = try cg.binOp(xor, .{ .imm32 = 127 }, Type.i128, .shr); // xor ~@as(u127, 0) - try func.emitWValue(sign_v); - const lsb = try func.load(sign_v, Type.u64, 0); - _ = try func.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor); - try func.store(.stack, .stack, Type.u64, sign_v.offset()); - try func.emitWValue(sign_v); - const msb = try func.load(sign_v, Type.u64, 8); - _ = try func.binOp(msb, .{ .imm64 = ~@as(u63, 0) }, Type.u64, .xor); - try func.store(.stack, .stack, Type.u64, sign_v.offset() + 8); - - try func.lowerToStack(sign_v); - _ = try func.load(overflow_ret, Type.i32, 0); - try func.addTag(.i32_eqz); - try func.addTag(.select); + try cg.emitWValue(sign_v); + const lsb = try cg.load(sign_v, Type.u64, 0); + _ = try cg.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor); + try cg.store(.stack, .stack, Type.u64, sign_v.offset()); + try cg.emitWValue(sign_v); + const msb = try cg.load(sign_v, Type.u64, 8); + _ = try cg.binOp(msb, .{ .imm64 = ~@as(u63, 0) }, Type.u64, .xor); + try cg.store(.stack, .stack, Type.u64, sign_v.offset() + 8); + + try cg.lowerToStack(sign_v); + _ = try cg.load(overflow_ret, Type.i32, 0); + try cg.addTag(.i32_eqz); + try cg.addTag(.select); }, else => unreachable, } - return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); } -fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { +fn airSatBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .add or op == .sub); - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const pt = func.pt; - const zcu = pt.zcu; - const ty = func.typeOfIndex(inst); - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); + const zcu = cg.pt.zcu; + const ty = cg.typeOfIndex(inst); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); const int_info = ty.intInfo(zcu); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { - return func.fail("TODO: saturating arithmetic for integers with bitsize '{d}'", .{int_info.bits}); + return cg.fail("TODO: saturating arithmetic for integers with bitsize '{d}'", .{int_info.bits}); } if (is_signed) { - const result = try signedSat(func, lhs, rhs, ty, op); - return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); + const result = try signedSat(cg, lhs, rhs, ty, op); + return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } const wasm_bits = toWasmBits(int_info.bits).?; - var bin_result = try (try func.binOp(lhs, rhs, ty, op)).toLocal(func, ty); - defer bin_result.free(func); + var bin_result = try (try cg.binOp(lhs, rhs, ty, op)).toLocal(cg, ty); + defer bin_result.free(cg); if (wasm_bits != int_info.bits and op == .add) { const val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits))) - 1)); const imm_val: WValue = switch (wasm_bits) { @@ -6969,25 +6823,25 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { else => unreachable, }; - try func.emitWValue(bin_result); - try func.emitWValue(imm_val); - _ = try func.cmp(bin_result, imm_val, ty, .lt); + try cg.emitWValue(bin_result); + try cg.emitWValue(imm_val); + _ = try cg.cmp(bin_result, imm_val, ty, .lt); } else { switch (wasm_bits) { - 32 => try func.addImm32(if (op == .add) std.math.maxInt(u32) else 0), - 64 => try func.addImm64(if (op == .add) std.math.maxInt(u64) else 0), + 32 => try cg.addImm32(if (op == .add) std.math.maxInt(u32) else 0), + 64 => try cg.addImm64(if (op == .add) std.math.maxInt(u64) else 0), else => unreachable, } - try func.emitWValue(bin_result); - _ = try func.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt); + try cg.emitWValue(bin_result); + _ = try cg.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt); } - try func.addTag(.select); - return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); + try cg.addTag(.select); + return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs }); } -fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - const pt = func.pt; +fn signedSat(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { + const pt = cg.pt; const zcu = pt.zcu; const int_info = ty.intInfo(zcu); const wasm_bits = toWasmBits(int_info.bits).?; @@ -7007,92 +6861,92 @@ fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr else => unreachable, }; - var bin_result = try (try func.binOp(lhs, rhs, ext_ty, op)).toLocal(func, ext_ty); + var bin_result = try (try cg.binOp(lhs, rhs, ext_ty, op)).toLocal(cg, ext_ty); if (!is_wasm_bits) { - defer bin_result.free(func); // not returned in this branch - try func.emitWValue(bin_result); - try func.emitWValue(max_wvalue); - _ = try func.cmp(bin_result, max_wvalue, ext_ty, .lt); - try func.addTag(.select); - try func.addLabel(.local_set, bin_result.local.value); // re-use local - - try func.emitWValue(bin_result); - try func.emitWValue(min_wvalue); - _ = try func.cmp(bin_result, min_wvalue, ext_ty, .gt); - try func.addTag(.select); - try func.addLabel(.local_set, bin_result.local.value); // re-use local - return (try func.wrapOperand(bin_result, ty)).toLocal(func, ty); + defer bin_result.free(cg); // not returned in this branch + try cg.emitWValue(bin_result); + try cg.emitWValue(max_wvalue); + _ = try cg.cmp(bin_result, max_wvalue, ext_ty, .lt); + try cg.addTag(.select); + try cg.addLocal(.local_set, bin_result.local.value); // re-use local + + try cg.emitWValue(bin_result); + try cg.emitWValue(min_wvalue); + _ = try cg.cmp(bin_result, min_wvalue, ext_ty, .gt); + try cg.addTag(.select); + try cg.addLocal(.local_set, bin_result.local.value); // re-use local + return (try cg.wrapOperand(bin_result, ty)).toLocal(cg, ty); } else { const zero: WValue = switch (wasm_bits) { 32 => .{ .imm32 = 0 }, 64 => .{ .imm64 = 0 }, else => unreachable, }; - try func.emitWValue(max_wvalue); - try func.emitWValue(min_wvalue); - _ = try func.cmp(bin_result, zero, ty, .lt); - try func.addTag(.select); - try func.emitWValue(bin_result); + try cg.emitWValue(max_wvalue); + try cg.emitWValue(min_wvalue); + _ = try cg.cmp(bin_result, zero, ty, .lt); + try cg.addTag(.select); + try cg.emitWValue(bin_result); // leave on stack - const cmp_zero_result = try func.cmp(rhs, zero, ty, if (op == .add) .lt else .gt); - const cmp_bin_result = try func.cmp(bin_result, lhs, ty, .lt); - _ = try func.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor. - try func.addTag(.select); - try func.addLabel(.local_set, bin_result.local.value); // re-use local + const cmp_zero_result = try cg.cmp(rhs, zero, ty, if (op == .add) .lt else .gt); + const cmp_bin_result = try cg.cmp(bin_result, lhs, ty, .lt); + _ = try cg.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor. + try cg.addTag(.select); + try cg.addLocal(.local_set, bin_result.local.value); // re-use local return bin_result; } } -fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airShlSat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const pt = func.pt; + const pt = cg.pt; const zcu = pt.zcu; - const ty = func.typeOfIndex(inst); + const ty = cg.typeOfIndex(inst); const int_info = ty.intInfo(zcu); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { - return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits}); + return cg.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits}); } - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); + const lhs = try cg.resolveInst(bin_op.lhs); + const rhs = try cg.resolveInst(bin_op.rhs); const wasm_bits = toWasmBits(int_info.bits).?; - const result = try func.allocLocal(ty); + const result = try cg.allocLocal(ty); if (wasm_bits == int_info.bits) { - var shl = try (try func.binOp(lhs, rhs, ty, .shl)).toLocal(func, ty); - defer shl.free(func); - var shr = try (try func.binOp(shl, rhs, ty, .shr)).toLocal(func, ty); - defer shr.free(func); + var shl = try (try cg.binOp(lhs, rhs, ty, .shl)).toLocal(cg, ty); + defer shl.free(cg); + var shr = try (try cg.binOp(shl, rhs, ty, .shr)).toLocal(cg, ty); + defer shr.free(cg); switch (wasm_bits) { 32 => blk: { if (!is_signed) { - try func.addImm32(std.math.maxInt(u32)); + try cg.addImm32(std.math.maxInt(u32)); break :blk; } - try func.addImm32(@bitCast(@as(i32, std.math.minInt(i32)))); - try func.addImm32(@bitCast(@as(i32, std.math.maxInt(i32)))); - _ = try func.cmp(lhs, .{ .imm32 = 0 }, ty, .lt); - try func.addTag(.select); + try cg.addImm32(@bitCast(@as(i32, std.math.minInt(i32)))); + try cg.addImm32(@bitCast(@as(i32, std.math.maxInt(i32)))); + _ = try cg.cmp(lhs, .{ .imm32 = 0 }, ty, .lt); + try cg.addTag(.select); }, 64 => blk: { if (!is_signed) { - try func.addImm64(std.math.maxInt(u64)); + try cg.addImm64(std.math.maxInt(u64)); break :blk; } - try func.addImm64(@bitCast(@as(i64, std.math.minInt(i64)))); - try func.addImm64(@bitCast(@as(i64, std.math.maxInt(i64)))); - _ = try func.cmp(lhs, .{ .imm64 = 0 }, ty, .lt); - try func.addTag(.select); + try cg.addImm64(@bitCast(@as(i64, std.math.minInt(i64)))); + try cg.addImm64(@bitCast(@as(i64, std.math.maxInt(i64)))); + _ = try cg.cmp(lhs, .{ .imm64 = 0 }, ty, .lt); + try cg.addTag(.select); }, else => unreachable, } - try func.emitWValue(shl); - _ = try func.cmp(lhs, shr, ty, .neq); - try func.addTag(.select); - try func.addLabel(.local_set, result.local.value); + try cg.emitWValue(shl); + _ = try cg.cmp(lhs, shr, ty, .neq); + try cg.addTag(.select); + try cg.addLocal(.local_set, result.local.value); } else { const shift_size = wasm_bits - int_info.bits; const shift_value: WValue = switch (wasm_bits) { @@ -7102,50 +6956,50 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; const ext_ty = try pt.intType(int_info.signedness, wasm_bits); - var shl_res = try (try func.binOp(lhs, shift_value, ext_ty, .shl)).toLocal(func, ext_ty); - defer shl_res.free(func); - var shl = try (try func.binOp(shl_res, rhs, ext_ty, .shl)).toLocal(func, ext_ty); - defer shl.free(func); - var shr = try (try func.binOp(shl, rhs, ext_ty, .shr)).toLocal(func, ext_ty); - defer shr.free(func); + var shl_res = try (try cg.binOp(lhs, shift_value, ext_ty, .shl)).toLocal(cg, ext_ty); + defer shl_res.free(cg); + var shl = try (try cg.binOp(shl_res, rhs, ext_ty, .shl)).toLocal(cg, ext_ty); + defer shl.free(cg); + var shr = try (try cg.binOp(shl, rhs, ext_ty, .shr)).toLocal(cg, ext_ty); + defer shr.free(cg); switch (wasm_bits) { 32 => blk: { if (!is_signed) { - try func.addImm32(std.math.maxInt(u32)); + try cg.addImm32(std.math.maxInt(u32)); break :blk; } - try func.addImm32(@bitCast(@as(i32, std.math.minInt(i32)))); - try func.addImm32(@bitCast(@as(i32, std.math.maxInt(i32)))); - _ = try func.cmp(shl_res, .{ .imm32 = 0 }, ext_ty, .lt); - try func.addTag(.select); + try cg.addImm32(@bitCast(@as(i32, std.math.minInt(i32)))); + try cg.addImm32(@bitCast(@as(i32, std.math.maxInt(i32)))); + _ = try cg.cmp(shl_res, .{ .imm32 = 0 }, ext_ty, .lt); + try cg.addTag(.select); }, 64 => blk: { if (!is_signed) { - try func.addImm64(std.math.maxInt(u64)); + try cg.addImm64(std.math.maxInt(u64)); break :blk; } - try func.addImm64(@bitCast(@as(i64, std.math.minInt(i64)))); - try func.addImm64(@bitCast(@as(i64, std.math.maxInt(i64)))); - _ = try func.cmp(shl_res, .{ .imm64 = 0 }, ext_ty, .lt); - try func.addTag(.select); + try cg.addImm64(@bitCast(@as(i64, std.math.minInt(i64)))); + try cg.addImm64(@bitCast(@as(i64, std.math.maxInt(i64)))); + _ = try cg.cmp(shl_res, .{ .imm64 = 0 }, ext_ty, .lt); + try cg.addTag(.select); }, else => unreachable, } - try func.emitWValue(shl); - _ = try func.cmp(shl_res, shr, ext_ty, .neq); - try func.addTag(.select); - try func.addLabel(.local_set, result.local.value); - var shift_result = try func.binOp(result, shift_value, ext_ty, .shr); + try cg.emitWValue(shl); + _ = try cg.cmp(shl_res, shr, ext_ty, .neq); + try cg.addTag(.select); + try cg.addLocal(.local_set, result.local.value); + var shift_result = try cg.binOp(result, shift_value, ext_ty, .shr); if (is_signed) { - shift_result = try func.wrapOperand(shift_result, ty); + shift_result = try cg.wrapOperand(shift_result, ty); } - try func.addLabel(.local_set, result.local.value); + try cg.addLocal(.local_set, result.local.value); } - return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } /// Calls a compiler-rt intrinsic by creating an undefined symbol, @@ -7155,31 +7009,23 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// passed as the first parameter. /// May leave the return value on the stack. fn callIntrinsic( - func: *CodeGen, - name: []const u8, + cg: *CodeGen, + intrinsic: Mir.Intrinsic, param_types: []const InternPool.Index, return_type: Type, args: []const WValue, ) InnerError!WValue { assert(param_types.len == args.len); - const symbol_index = func.bin_file.getGlobalSymbol(name, null) catch |err| { - return func.fail("Could not find or create global symbol '{s}'", .{@errorName(err)}); - }; + const zcu = cg.pt.zcu; // Always pass over C-ABI - const pt = func.pt; - const zcu = pt.zcu; - var func_type = try genFunctype(func.gpa, .{ .wasm_watc = .{} }, param_types, return_type, pt, func.target.*); - defer func_type.deinit(func.gpa); - const func_type_index = try func.bin_file.zig_object.?.putOrGetFuncType(func.gpa, func_type); - try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index); - const want_sret_param = firstParamSRet(.{ .wasm_watc = .{} }, return_type, pt, func.target.*); + const want_sret_param = firstParamSRet(.{ .wasm_watc = .{} }, return_type, zcu, cg.target); // if we want return as first param, we allocate a pointer to stack, // and emit it as our first argument const sret = if (want_sret_param) blk: { - const sret_local = try func.allocStack(return_type); - try func.lowerToStack(sret_local); + const sret_local = try cg.allocStack(return_type); + try cg.lowerToStack(sret_local); break :blk sret_local; } else .none; @@ -7187,16 +7033,15 @@ fn callIntrinsic( for (args, 0..) |arg, arg_i| { assert(!(want_sret_param and arg == .stack)); assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(zcu)); - try func.lowerArg(.{ .wasm_watc = .{} }, Type.fromInterned(param_types[arg_i]), arg); + try cg.lowerArg(.{ .wasm_watc = .{} }, Type.fromInterned(param_types[arg_i]), arg); } - // Actually call our intrinsic - try func.addLabel(.call, @intFromEnum(symbol_index)); + try cg.addInst(.{ .tag = .call_intrinsic, .data = .{ .intrinsic = intrinsic } }); if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) { return .none; } else if (return_type.isNoReturn(zcu)) { - try func.addTag(.@"unreachable"); + try cg.addTag(.@"unreachable"); return .none; } else if (want_sret_param) { return sret; @@ -7205,194 +7050,30 @@ fn callIntrinsic( } } -fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try func.resolveInst(un_op); - const enum_ty = func.typeOf(un_op); - - const func_sym_index = try func.getTagNameFunction(enum_ty); - - const result_ptr = try func.allocStack(func.typeOfIndex(inst)); - try func.lowerToStack(result_ptr); - try func.emitWValue(operand); - try func.addLabel(.call, func_sym_index); - - return func.finishAir(inst, result_ptr, &.{un_op}); -} - -fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { - const pt = func.pt; - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - - var arena_allocator = std.heap.ArenaAllocator.init(func.gpa); - defer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - - const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{ip.loadEnumType(enum_ty.toIntern()).name.fmt(ip)}); - - // check if we already generated code for this. - if (func.bin_file.findGlobalSymbol(func_name)) |loc| { - return @intFromEnum(loc.index); - } - - const int_tag_ty = enum_ty.intTagType(zcu); - - if (int_tag_ty.bitSize(zcu) > 64) { - return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{}); - } - - var relocs = std.ArrayList(link.File.Wasm.Relocation).init(func.gpa); - defer relocs.deinit(); - - var body_list = std.ArrayList(u8).init(func.gpa); - defer body_list.deinit(); - var writer = body_list.writer(); - - // The locals of the function body (always 0) - try leb.writeUleb128(writer, @as(u32, 0)); - - // outer block - try writer.writeByte(std.wasm.opcode(.block)); - try writer.writeByte(std.wasm.block_empty); - - // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse. - // generate an if-else chain for each tag value as well as constant. - const tag_names = enum_ty.enumFields(zcu); - for (0..tag_names.len) |tag_index| { - const tag_name = tag_names.get(ip)[tag_index]; - const tag_name_len = tag_name.length(ip); - // for each tag name, create an unnamed const, - // and then get a pointer to its value. - const name_ty = try pt.arrayType(.{ - .len = tag_name_len, - .child = .u8_type, - .sentinel = .zero_u8, - }); - const name_val = try pt.intern(.{ .aggregate = .{ - .ty = name_ty.toIntern(), - .storage = .{ .bytes = tag_name.toString() }, - } }); - const tag_sym_index = switch (try func.bin_file.lowerUav(pt, name_val, .none, func.src_loc)) { - .mcv => |mcv| mcv.load_symbol, - .fail => |err_msg| { - func.err_msg = err_msg; - return error.CodegenFail; - }, - }; - - // block for this if case - try writer.writeByte(std.wasm.opcode(.block)); - try writer.writeByte(std.wasm.block_empty); - - // get actual tag value (stored in 2nd parameter); - try writer.writeByte(std.wasm.opcode(.local_get)); - try leb.writeUleb128(writer, @as(u32, 1)); - - const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index)); - const tag_value = try func.lowerConstant(tag_val, enum_ty); - - switch (tag_value) { - .imm32 => |value| { - try writer.writeByte(std.wasm.opcode(.i32_const)); - try leb.writeIleb128(writer, @as(i32, @bitCast(value))); - try writer.writeByte(std.wasm.opcode(.i32_ne)); - }, - .imm64 => |value| { - try writer.writeByte(std.wasm.opcode(.i64_const)); - try leb.writeIleb128(writer, @as(i64, @bitCast(value))); - try writer.writeByte(std.wasm.opcode(.i64_ne)); - }, - else => unreachable, - } - // if they're not equal, break out of current branch - try writer.writeByte(std.wasm.opcode(.br_if)); - try leb.writeUleb128(writer, @as(u32, 0)); - - // store the address of the tagname in the pointer field of the slice - // get the address twice so we can also store the length. - try writer.writeByte(std.wasm.opcode(.local_get)); - try leb.writeUleb128(writer, @as(u32, 0)); - try writer.writeByte(std.wasm.opcode(.local_get)); - try leb.writeUleb128(writer, @as(u32, 0)); - - // get address of tagname and emit a relocation to it - if (func.arch() == .wasm32) { - const encoded_alignment = @ctz(@as(u32, 4)); - try writer.writeByte(std.wasm.opcode(.i32_const)); - try relocs.append(.{ - .relocation_type = .R_WASM_MEMORY_ADDR_LEB, - .offset = @as(u32, @intCast(body_list.items.len)), - .index = tag_sym_index, - }); - try writer.writeAll(&[_]u8{0} ** 5); // will be relocated - - // store pointer - try writer.writeByte(std.wasm.opcode(.i32_store)); - try leb.writeUleb128(writer, encoded_alignment); - try leb.writeUleb128(writer, @as(u32, 0)); - - // store length - try writer.writeByte(std.wasm.opcode(.i32_const)); - try leb.writeUleb128(writer, @as(u32, @intCast(tag_name_len))); - try writer.writeByte(std.wasm.opcode(.i32_store)); - try leb.writeUleb128(writer, encoded_alignment); - try leb.writeUleb128(writer, @as(u32, 4)); - } else { - const encoded_alignment = @ctz(@as(u32, 8)); - try writer.writeByte(std.wasm.opcode(.i64_const)); - try relocs.append(.{ - .relocation_type = .R_WASM_MEMORY_ADDR_LEB64, - .offset = @as(u32, @intCast(body_list.items.len)), - .index = tag_sym_index, - }); - try writer.writeAll(&[_]u8{0} ** 10); // will be relocated - - // store pointer - try writer.writeByte(std.wasm.opcode(.i64_store)); - try leb.writeUleb128(writer, encoded_alignment); - try leb.writeUleb128(writer, @as(u32, 0)); - - // store length - try writer.writeByte(std.wasm.opcode(.i64_const)); - try leb.writeUleb128(writer, @as(u64, @intCast(tag_name_len))); - try writer.writeByte(std.wasm.opcode(.i64_store)); - try leb.writeUleb128(writer, encoded_alignment); - try leb.writeUleb128(writer, @as(u32, 8)); - } - - // break outside blocks - try writer.writeByte(std.wasm.opcode(.br)); - try leb.writeUleb128(writer, @as(u32, 1)); - - // end the block for this case - try writer.writeByte(std.wasm.opcode(.end)); - } +fn airTagName(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try cg.resolveInst(un_op); + const enum_ty = cg.typeOf(un_op); - try writer.writeByte(std.wasm.opcode(.@"unreachable")); // tag value does not have a name - // finish outer block - try writer.writeByte(std.wasm.opcode(.end)); - // finish function body - try writer.writeByte(std.wasm.opcode(.end)); + const result_ptr = try cg.allocStack(cg.typeOfIndex(inst)); + try cg.lowerToStack(result_ptr); + try cg.emitWValue(operand); + try cg.addInst(.{ .tag = .call_tag_name, .data = .{ .ip_index = enum_ty.toIntern() } }); - const slice_ty = Type.slice_const_u8_sentinel_0; - const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, pt, func.target.*); - const sym_index = try func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); - return @intFromEnum(sym_index); + return cg.finishAir(inst, result_ptr, &.{un_op}); } -fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; +fn airErrorSetHasValue(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; const ip = &zcu.intern_pool; - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand = try func.resolveInst(ty_op.operand); + const operand = try cg.resolveInst(ty_op.operand); const error_set_ty = ty_op.ty.toType(); - const result = try func.allocLocal(Type.bool); + const result = try cg.allocLocal(Type.bool); const names = error_set_ty.errorSetNames(zcu); - var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len); + var values = try std.ArrayList(u32).initCapacity(cg.gpa, names.len); defer values.deinit(); var lowest: ?u32 = null; @@ -7418,23 +7099,23 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } // start block for 'true' branch - try func.startBlock(.block, wasm.block_empty); + try cg.startBlock(.block, .empty); // start block for 'false' branch - try func.startBlock(.block, wasm.block_empty); + try cg.startBlock(.block, .empty); // block for the jump table itself - try func.startBlock(.block, wasm.block_empty); + try cg.startBlock(.block, .empty); // lower operand to determine jump table target - try func.emitWValue(operand); - try func.addImm32(lowest.?); - try func.addTag(.i32_sub); + try cg.emitWValue(operand); + try cg.addImm32(lowest.?); + try cg.addTag(.i32_sub); // Account for default branch so always add '1' const depth = @as(u32, @intCast(highest.? - lowest.? + 1)); const jump_table: Mir.JumpTable = .{ .length = depth }; - const table_extra_index = try func.addExtra(jump_table); - try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } }); - try func.mir_extra.ensureUnusedCapacity(func.gpa, depth); + const table_extra_index = try cg.addExtra(jump_table); + try cg.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } }); + try cg.mir_extra.ensureUnusedCapacity(cg.gpa, depth); var value: u32 = lowest.?; while (value <= highest.?) : (value += 1) { @@ -7444,202 +7125,200 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } break :blk 0; }; - func.mir_extra.appendAssumeCapacity(idx); + cg.mir_extra.appendAssumeCapacity(idx); } - try func.endBlock(); + try cg.endBlock(); // 'false' branch (i.e. error set does not have value // ensure we set local to 0 in case the local was re-used. - try func.addImm32(0); - try func.addLabel(.local_set, result.local.value); - try func.addLabel(.br, 1); - try func.endBlock(); + try cg.addImm32(0); + try cg.addLocal(.local_set, result.local.value); + try cg.addLabel(.br, 1); + try cg.endBlock(); // 'true' branch - try func.addImm32(1); - try func.addLabel(.local_set, result.local.value); - try func.addLabel(.br, 0); - try func.endBlock(); + try cg.addImm32(1); + try cg.addLocal(.local_set, result.local.value); + try cg.addLabel(.br, 0); + try cg.endBlock(); - return func.finishAir(inst, result, &.{ty_op.operand}); + return cg.finishAir(inst, result, &.{ty_op.operand}); } -inline fn useAtomicFeature(func: *const CodeGen) bool { - return std.Target.wasm.featureSetHas(func.target.cpu.features, .atomics); +inline fn useAtomicFeature(cg: *const CodeGen) bool { + return std.Target.wasm.featureSetHas(cg.target.cpu.features, .atomics); } -fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; +fn airCmpxchg(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = cg.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const ptr_ty = func.typeOf(extra.ptr); + const ptr_ty = cg.typeOf(extra.ptr); const ty = ptr_ty.childType(zcu); - const result_ty = func.typeOfIndex(inst); + const result_ty = cg.typeOfIndex(inst); - const ptr_operand = try func.resolveInst(extra.ptr); - const expected_val = try func.resolveInst(extra.expected_value); - const new_val = try func.resolveInst(extra.new_value); + const ptr_operand = try cg.resolveInst(extra.ptr); + const expected_val = try cg.resolveInst(extra.expected_value); + const new_val = try cg.resolveInst(extra.new_value); - const cmp_result = try func.allocLocal(Type.bool); + const cmp_result = try cg.allocLocal(Type.bool); - const ptr_val = if (func.useAtomicFeature()) val: { - const val_local = try func.allocLocal(ty); - try func.emitWValue(ptr_operand); - try func.lowerToStack(expected_val); - try func.lowerToStack(new_val); - try func.addAtomicMemArg(switch (ty.abiSize(zcu)) { + const ptr_val = if (cg.useAtomicFeature()) val: { + const val_local = try cg.allocLocal(ty); + try cg.emitWValue(ptr_operand); + try cg.lowerToStack(expected_val); + try cg.lowerToStack(new_val); + try cg.addAtomicMemArg(switch (ty.abiSize(zcu)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, 8 => .i32_atomic_rmw_cmpxchg, - else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}), + else => |size| return cg.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}), }, .{ .offset = ptr_operand.offset(), .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?), }); - try func.addLabel(.local_tee, val_local.local.value); - _ = try func.cmp(.stack, expected_val, ty, .eq); - try func.addLabel(.local_set, cmp_result.local.value); + try cg.addLocal(.local_tee, val_local.local.value); + _ = try cg.cmp(.stack, expected_val, ty, .eq); + try cg.addLocal(.local_set, cmp_result.local.value); break :val val_local; } else val: { if (ty.abiSize(zcu) > 8) { - return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{}); + return cg.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{}); } - const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty); + const ptr_val = try WValue.toLocal(try cg.load(ptr_operand, ty, 0), cg, ty); - try func.lowerToStack(ptr_operand); - try func.lowerToStack(new_val); - try func.emitWValue(ptr_val); - _ = try func.cmp(ptr_val, expected_val, ty, .eq); - try func.addLabel(.local_tee, cmp_result.local.value); - try func.addTag(.select); - try func.store(.stack, .stack, ty, 0); + try cg.lowerToStack(ptr_operand); + try cg.lowerToStack(new_val); + try cg.emitWValue(ptr_val); + _ = try cg.cmp(ptr_val, expected_val, ty, .eq); + try cg.addLocal(.local_tee, cmp_result.local.value); + try cg.addTag(.select); + try cg.store(.stack, .stack, ty, 0); break :val ptr_val; }; - const result = if (isByRef(result_ty, pt, func.target.*)) val: { - try func.emitWValue(cmp_result); - try func.addImm32(~@as(u32, 0)); - try func.addTag(.i32_xor); - try func.addImm32(1); - try func.addTag(.i32_and); - const and_result = try WValue.toLocal(.stack, func, Type.bool); - const result_ptr = try func.allocStack(result_ty); - try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(zcu)))); - try func.store(result_ptr, ptr_val, ty, 0); + const result = if (isByRef(result_ty, zcu, cg.target)) val: { + try cg.emitWValue(cmp_result); + try cg.addImm32(~@as(u32, 0)); + try cg.addTag(.i32_xor); + try cg.addImm32(1); + try cg.addTag(.i32_and); + const and_result = try WValue.toLocal(.stack, cg, Type.bool); + const result_ptr = try cg.allocStack(result_ty); + try cg.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(zcu)))); + try cg.store(result_ptr, ptr_val, ty, 0); break :val result_ptr; } else val: { - try func.addImm32(0); - try func.emitWValue(ptr_val); - try func.emitWValue(cmp_result); - try func.addTag(.select); + try cg.addImm32(0); + try cg.emitWValue(ptr_val); + try cg.emitWValue(cmp_result); + try cg.addTag(.select); break :val .stack; }; - return func.finishAir(inst, result, &.{ extra.ptr, extra.expected_value, extra.new_value }); + return cg.finishAir(inst, result, &.{ extra.ptr, extra.expected_value, extra.new_value }); } -fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const atomic_load = func.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; - const ptr = try func.resolveInst(atomic_load.ptr); - const ty = func.typeOfIndex(inst); +fn airAtomicLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const atomic_load = cg.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; + const ptr = try cg.resolveInst(atomic_load.ptr); + const ty = cg.typeOfIndex(inst); - if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt.zcu)) { + if (cg.useAtomicFeature()) { + const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) { 1 => .i32_atomic_load8_u, 2 => .i32_atomic_load16_u, 4 => .i32_atomic_load, 8 => .i64_atomic_load, - else => |size| return func.fail("TODO: @atomicLoad for types with abi size {d}", .{size}), + else => |size| return cg.fail("TODO: @atomicLoad for types with abi size {d}", .{size}), }; - try func.emitWValue(ptr); - try func.addAtomicMemArg(tag, .{ + try cg.emitWValue(ptr); + try cg.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = @intCast(ty.abiAlignment(pt.zcu).toByteUnits().?), + .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?), }); } else { - _ = try func.load(ptr, ty, 0); + _ = try cg.load(ptr, ty, 0); } - return func.finishAir(inst, .stack, &.{atomic_load.ptr}); + return cg.finishAir(inst, .stack, &.{atomic_load.ptr}); } -fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data; +fn airAtomicRmw(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = cg.air.extraData(Air.AtomicRmw, pl_op.payload).data; - const ptr = try func.resolveInst(pl_op.operand); - const operand = try func.resolveInst(extra.operand); - const ty = func.typeOfIndex(inst); + const ptr = try cg.resolveInst(pl_op.operand); + const operand = try cg.resolveInst(extra.operand); + const ty = cg.typeOfIndex(inst); const op: std.builtin.AtomicRmwOp = extra.op(); - if (func.useAtomicFeature()) { + if (cg.useAtomicFeature()) { switch (op) { .Max, .Min, .Nand, => { - const tmp = try func.load(ptr, ty, 0); - const value = try tmp.toLocal(func, ty); + const tmp = try cg.load(ptr, ty, 0); + const value = try tmp.toLocal(cg, ty); // create a loop to cmpxchg the new value - try func.startBlock(.loop, wasm.block_empty); + try cg.startBlock(.loop, .empty); - try func.emitWValue(ptr); - try func.emitWValue(value); + try cg.emitWValue(ptr); + try cg.emitWValue(value); if (op == .Nand) { const wasm_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?; - const and_res = try func.binOp(value, operand, ty, .@"and"); + const and_res = try cg.binOp(value, operand, ty, .@"and"); if (wasm_bits == 32) - try func.addImm32(~@as(u32, 0)) + try cg.addImm32(~@as(u32, 0)) else if (wasm_bits == 64) - try func.addImm64(~@as(u64, 0)) + try cg.addImm64(~@as(u64, 0)) else - return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{}); - _ = try func.binOp(and_res, .stack, ty, .xor); + return cg.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{}); + _ = try cg.binOp(and_res, .stack, ty, .xor); } else { - try func.emitWValue(value); - try func.emitWValue(operand); - _ = try func.cmp(value, operand, ty, if (op == .Max) .gt else .lt); - try func.addTag(.select); + try cg.emitWValue(value); + try cg.emitWValue(operand); + _ = try cg.cmp(value, operand, ty, if (op == .Max) .gt else .lt); + try cg.addTag(.select); } - try func.addAtomicMemArg( + try cg.addAtomicMemArg( switch (ty.abiSize(zcu)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, 8 => .i64_atomic_rmw_cmpxchg, - else => return func.fail("TODO: implement `@atomicRmw` with operation `{s}` for types larger than 64 bits", .{@tagName(op)}), + else => return cg.fail("TODO: implement `@atomicRmw` with operation `{s}` for types larger than 64 bits", .{@tagName(op)}), }, .{ .offset = ptr.offset(), .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?), }, ); - const select_res = try func.allocLocal(ty); - try func.addLabel(.local_tee, select_res.local.value); - _ = try func.cmp(.stack, value, ty, .neq); // leave on stack so we can use it for br_if + const select_res = try cg.allocLocal(ty); + try cg.addLocal(.local_tee, select_res.local.value); + _ = try cg.cmp(.stack, value, ty, .neq); // leave on stack so we can use it for br_if - try func.emitWValue(select_res); - try func.addLabel(.local_set, value.local.value); + try cg.emitWValue(select_res); + try cg.addLocal(.local_set, value.local.value); - try func.addLabel(.br_if, 0); - try func.endBlock(); - return func.finishAir(inst, value, &.{ pl_op.operand, extra.operand }); + try cg.addLabel(.br_if, 0); + try cg.endBlock(); + return cg.finishAir(inst, value, &.{ pl_op.operand, extra.operand }); }, // the other operations have their own instructions for Wasm. else => { - try func.emitWValue(ptr); - try func.emitWValue(operand); - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) { + try cg.emitWValue(ptr); + try cg.emitWValue(operand); + const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) { 1 => switch (op) { .Xchg => .i32_atomic_rmw8_xchg_u, .Add => .i32_atomic_rmw8_add_u, @@ -7676,22 +7355,22 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .Xor => .i64_atomic_rmw_xor, else => unreachable, }, - else => |size| return func.fail("TODO: Implement `@atomicRmw` for types with abi size {d}", .{size}), + else => |size| return cg.fail("TODO: Implement `@atomicRmw` for types with abi size {d}", .{size}), }; - try func.addAtomicMemArg(tag, .{ + try cg.addAtomicMemArg(tag, .{ .offset = ptr.offset(), .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?), }); - return func.finishAir(inst, .stack, &.{ pl_op.operand, extra.operand }); + return cg.finishAir(inst, .stack, &.{ pl_op.operand, extra.operand }); }, } } else { - const loaded = try func.load(ptr, ty, 0); - const result = try loaded.toLocal(func, ty); + const loaded = try cg.load(ptr, ty, 0); + const result = try loaded.toLocal(cg, ty); switch (op) { .Xchg => { - try func.store(ptr, operand, ty, 0); + try cg.store(ptr, operand, ty, 0); }, .Add, .Sub, @@ -7699,8 +7378,8 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .Or, .Xor, => { - try func.emitWValue(ptr); - _ = try func.binOp(result, operand, ty, switch (op) { + try cg.emitWValue(ptr); + _ = try cg.binOp(result, operand, ty, switch (op) { .Add => .add, .Sub => .sub, .And => .@"and", @@ -7709,87 +7388,123 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, }); if (ty.isInt(zcu) and (op == .Add or op == .Sub)) { - _ = try func.wrapOperand(.stack, ty); + _ = try cg.wrapOperand(.stack, ty); } - try func.store(.stack, .stack, ty, ptr.offset()); + try cg.store(.stack, .stack, ty, ptr.offset()); }, .Max, .Min, => { - try func.emitWValue(ptr); - try func.emitWValue(result); - try func.emitWValue(operand); - _ = try func.cmp(result, operand, ty, if (op == .Max) .gt else .lt); - try func.addTag(.select); - try func.store(.stack, .stack, ty, ptr.offset()); + try cg.emitWValue(ptr); + try cg.emitWValue(result); + try cg.emitWValue(operand); + _ = try cg.cmp(result, operand, ty, if (op == .Max) .gt else .lt); + try cg.addTag(.select); + try cg.store(.stack, .stack, ty, ptr.offset()); }, .Nand => { const wasm_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?; - try func.emitWValue(ptr); - const and_res = try func.binOp(result, operand, ty, .@"and"); + try cg.emitWValue(ptr); + const and_res = try cg.binOp(result, operand, ty, .@"and"); if (wasm_bits == 32) - try func.addImm32(~@as(u32, 0)) + try cg.addImm32(~@as(u32, 0)) else if (wasm_bits == 64) - try func.addImm64(~@as(u64, 0)) + try cg.addImm64(~@as(u64, 0)) else - return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{}); - _ = try func.binOp(and_res, .stack, ty, .xor); - try func.store(.stack, .stack, ty, ptr.offset()); + return cg.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{}); + _ = try cg.binOp(and_res, .stack, ty, .xor); + try cg.store(.stack, .stack, ty, ptr.offset()); }, } - return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand }); + return cg.finishAir(inst, result, &.{ pl_op.operand, extra.operand }); } } -fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const pt = func.pt; - const zcu = pt.zcu; - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airAtomicStore(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const zcu = cg.pt.zcu; + const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const ptr = try func.resolveInst(bin_op.lhs); - const operand = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.typeOf(bin_op.lhs); + const ptr = try cg.resolveInst(bin_op.lhs); + const operand = try cg.resolveInst(bin_op.rhs); + const ptr_ty = cg.typeOf(bin_op.lhs); const ty = ptr_ty.childType(zcu); - if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) { + if (cg.useAtomicFeature()) { + const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) { 1 => .i32_atomic_store8, 2 => .i32_atomic_store16, 4 => .i32_atomic_store, 8 => .i64_atomic_store, - else => |size| return func.fail("TODO: @atomicLoad for types with abi size {d}", .{size}), + else => |size| return cg.fail("TODO: @atomicLoad for types with abi size {d}", .{size}), }; - try func.emitWValue(ptr); - try func.lowerToStack(operand); - try func.addAtomicMemArg(tag, .{ + try cg.emitWValue(ptr); + try cg.lowerToStack(operand); + try cg.addAtomicMemArg(tag, .{ .offset = ptr.offset(), .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?), }); } else { - try func.store(ptr, operand, ty, 0); + try cg.store(ptr, operand, ty, 0); } - return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); + return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); } -fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - if (func.initial_stack_value == .none) { - try func.initializeStack(); +fn airFrameAddress(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + if (cg.initial_stack_value == .none) { + try cg.initializeStack(); } - try func.emitWValue(func.bottom_stack_value); - return func.finishAir(inst, .stack, &.{}); + try cg.emitWValue(cg.bottom_stack_value); + return cg.finishAir(inst, .stack, &.{}); } -fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type { - const pt = func.pt; - const zcu = pt.zcu; - return func.air.typeOf(inst, &zcu.intern_pool); +fn typeOf(cg: *CodeGen, inst: Air.Inst.Ref) Type { + const zcu = cg.pt.zcu; + return cg.air.typeOf(inst, &zcu.intern_pool); } -fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type { - const pt = func.pt; - const zcu = pt.zcu; - return func.air.typeOfIndex(inst, &zcu.intern_pool); +fn typeOfIndex(cg: *CodeGen, inst: Air.Inst.Index) Type { + const zcu = cg.pt.zcu; + return cg.air.typeOfIndex(inst, &zcu.intern_pool); +} + +fn floatCmpIntrinsic(op: std.math.CompareOperator, bits: u16) Mir.Intrinsic { + return switch (op) { + .lt => switch (bits) { + 80 => .__ltxf2, + 128 => .__lttf2, + else => unreachable, + }, + .lte => switch (bits) { + 80 => .__lexf2, + 128 => .__letf2, + else => unreachable, + }, + .eq => switch (bits) { + 80 => .__eqxf2, + 128 => .__eqtf2, + else => unreachable, + }, + .neq => switch (bits) { + 80 => .__nexf2, + 128 => .__netf2, + else => unreachable, + }, + .gte => switch (bits) { + 80 => .__gexf2, + 128 => .__getf2, + else => unreachable, + }, + .gt => switch (bits) { + 80 => .__gtxf2, + 128 => .__gttf2, + else => unreachable, + }, + }; +} + +fn extraLen(cg: *const CodeGen) u32 { + return @intCast(cg.mir_extra.items.len - cg.start_mir_extra_off); } diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index cd744cd53e15..28159f33361e 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -1,673 +1,973 @@ -//! Contains all logic to lower wasm MIR into its binary -//! or textual representation. - const Emit = @This(); + const std = @import("std"); +const assert = std.debug.assert; +const Allocator = std.mem.Allocator; +const leb = std.leb; + +const Wasm = link.File.Wasm; const Mir = @import("Mir.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); const InternPool = @import("../../InternPool.zig"); const codegen = @import("../../codegen.zig"); -const leb128 = std.leb; -/// Contains our list of instructions mir: Mir, -/// Reference to the Wasm module linker -bin_file: *link.File.Wasm, -/// Possible error message. When set, the value is allocated and -/// must be freed manually. -error_msg: ?*Zcu.ErrorMsg = null, -/// The binary representation that will be emit by this module. -code: *std.ArrayList(u8), -/// List of allocated locals. -locals: []const u8, -/// The declaration that code is being generated for. -owner_nav: InternPool.Nav.Index, - -// Debug information -/// Holds the debug information for this emission -dbg_output: link.File.DebugInfoOutput, -/// Previous debug info line -prev_di_line: u32, -/// Previous debug info column -prev_di_column: u32, -/// Previous offset relative to code section -prev_di_offset: u32, - -const InnerError = error{ +wasm: *Wasm, +/// The binary representation that will be emitted by this module. +code: *std.ArrayListUnmanaged(u8), + +pub const Error = error{ OutOfMemory, - EmitFail, }; -pub fn emitMir(emit: *Emit) InnerError!void { - const mir_tags = emit.mir.instructions.items(.tag); - // write the locals in the prologue of the function body - // before we emit the function body when lowering MIR - try emit.emitLocals(); - - for (mir_tags, 0..) |tag, index| { - const inst = @as(u32, @intCast(index)); - switch (tag) { - // block instructions - .block => try emit.emitBlock(tag, inst), - .loop => try emit.emitBlock(tag, inst), - - .dbg_line => try emit.emitDbgLine(inst), - .dbg_epilogue_begin => try emit.emitDbgEpilogueBegin(), - .dbg_prologue_end => try emit.emitDbgPrologueEnd(), - - // branch instructions - .br_if => try emit.emitLabel(tag, inst), - .br_table => try emit.emitBrTable(inst), - .br => try emit.emitLabel(tag, inst), - - // relocatables - .call => try emit.emitCall(inst), - .call_indirect => try emit.emitCallIndirect(inst), - .global_get => try emit.emitGlobal(tag, inst), - .global_set => try emit.emitGlobal(tag, inst), - .function_index => try emit.emitFunctionIndex(inst), - .memory_address => try emit.emitMemAddress(inst), - - // immediates - .f32_const => try emit.emitFloat32(inst), - .f64_const => try emit.emitFloat64(inst), - .i32_const => try emit.emitImm32(inst), - .i64_const => try emit.emitImm64(inst), - - // memory instructions - .i32_load => try emit.emitMemArg(tag, inst), - .i64_load => try emit.emitMemArg(tag, inst), - .f32_load => try emit.emitMemArg(tag, inst), - .f64_load => try emit.emitMemArg(tag, inst), - .i32_load8_s => try emit.emitMemArg(tag, inst), - .i32_load8_u => try emit.emitMemArg(tag, inst), - .i32_load16_s => try emit.emitMemArg(tag, inst), - .i32_load16_u => try emit.emitMemArg(tag, inst), - .i64_load8_s => try emit.emitMemArg(tag, inst), - .i64_load8_u => try emit.emitMemArg(tag, inst), - .i64_load16_s => try emit.emitMemArg(tag, inst), - .i64_load16_u => try emit.emitMemArg(tag, inst), - .i64_load32_s => try emit.emitMemArg(tag, inst), - .i64_load32_u => try emit.emitMemArg(tag, inst), - .i32_store => try emit.emitMemArg(tag, inst), - .i64_store => try emit.emitMemArg(tag, inst), - .f32_store => try emit.emitMemArg(tag, inst), - .f64_store => try emit.emitMemArg(tag, inst), - .i32_store8 => try emit.emitMemArg(tag, inst), - .i32_store16 => try emit.emitMemArg(tag, inst), - .i64_store8 => try emit.emitMemArg(tag, inst), - .i64_store16 => try emit.emitMemArg(tag, inst), - .i64_store32 => try emit.emitMemArg(tag, inst), - - // Instructions with an index that do not require relocations - .local_get => try emit.emitLabel(tag, inst), - .local_set => try emit.emitLabel(tag, inst), - .local_tee => try emit.emitLabel(tag, inst), - .memory_grow => try emit.emitLabel(tag, inst), - .memory_size => try emit.emitLabel(tag, inst), - - // no-ops - .end => try emit.emitTag(tag), - .@"return" => try emit.emitTag(tag), - .@"unreachable" => try emit.emitTag(tag), - - .select => try emit.emitTag(tag), - - // arithmetic - .i32_eqz => try emit.emitTag(tag), - .i32_eq => try emit.emitTag(tag), - .i32_ne => try emit.emitTag(tag), - .i32_lt_s => try emit.emitTag(tag), - .i32_lt_u => try emit.emitTag(tag), - .i32_gt_s => try emit.emitTag(tag), - .i32_gt_u => try emit.emitTag(tag), - .i32_le_s => try emit.emitTag(tag), - .i32_le_u => try emit.emitTag(tag), - .i32_ge_s => try emit.emitTag(tag), - .i32_ge_u => try emit.emitTag(tag), - .i64_eqz => try emit.emitTag(tag), - .i64_eq => try emit.emitTag(tag), - .i64_ne => try emit.emitTag(tag), - .i64_lt_s => try emit.emitTag(tag), - .i64_lt_u => try emit.emitTag(tag), - .i64_gt_s => try emit.emitTag(tag), - .i64_gt_u => try emit.emitTag(tag), - .i64_le_s => try emit.emitTag(tag), - .i64_le_u => try emit.emitTag(tag), - .i64_ge_s => try emit.emitTag(tag), - .i64_ge_u => try emit.emitTag(tag), - .f32_eq => try emit.emitTag(tag), - .f32_ne => try emit.emitTag(tag), - .f32_lt => try emit.emitTag(tag), - .f32_gt => try emit.emitTag(tag), - .f32_le => try emit.emitTag(tag), - .f32_ge => try emit.emitTag(tag), - .f64_eq => try emit.emitTag(tag), - .f64_ne => try emit.emitTag(tag), - .f64_lt => try emit.emitTag(tag), - .f64_gt => try emit.emitTag(tag), - .f64_le => try emit.emitTag(tag), - .f64_ge => try emit.emitTag(tag), - .i32_add => try emit.emitTag(tag), - .i32_sub => try emit.emitTag(tag), - .i32_mul => try emit.emitTag(tag), - .i32_div_s => try emit.emitTag(tag), - .i32_div_u => try emit.emitTag(tag), - .i32_and => try emit.emitTag(tag), - .i32_or => try emit.emitTag(tag), - .i32_xor => try emit.emitTag(tag), - .i32_shl => try emit.emitTag(tag), - .i32_shr_s => try emit.emitTag(tag), - .i32_shr_u => try emit.emitTag(tag), - .i64_add => try emit.emitTag(tag), - .i64_sub => try emit.emitTag(tag), - .i64_mul => try emit.emitTag(tag), - .i64_div_s => try emit.emitTag(tag), - .i64_div_u => try emit.emitTag(tag), - .i64_and => try emit.emitTag(tag), - .i64_or => try emit.emitTag(tag), - .i64_xor => try emit.emitTag(tag), - .i64_shl => try emit.emitTag(tag), - .i64_shr_s => try emit.emitTag(tag), - .i64_shr_u => try emit.emitTag(tag), - .f32_abs => try emit.emitTag(tag), - .f32_neg => try emit.emitTag(tag), - .f32_ceil => try emit.emitTag(tag), - .f32_floor => try emit.emitTag(tag), - .f32_trunc => try emit.emitTag(tag), - .f32_nearest => try emit.emitTag(tag), - .f32_sqrt => try emit.emitTag(tag), - .f32_add => try emit.emitTag(tag), - .f32_sub => try emit.emitTag(tag), - .f32_mul => try emit.emitTag(tag), - .f32_div => try emit.emitTag(tag), - .f32_min => try emit.emitTag(tag), - .f32_max => try emit.emitTag(tag), - .f32_copysign => try emit.emitTag(tag), - .f64_abs => try emit.emitTag(tag), - .f64_neg => try emit.emitTag(tag), - .f64_ceil => try emit.emitTag(tag), - .f64_floor => try emit.emitTag(tag), - .f64_trunc => try emit.emitTag(tag), - .f64_nearest => try emit.emitTag(tag), - .f64_sqrt => try emit.emitTag(tag), - .f64_add => try emit.emitTag(tag), - .f64_sub => try emit.emitTag(tag), - .f64_mul => try emit.emitTag(tag), - .f64_div => try emit.emitTag(tag), - .f64_min => try emit.emitTag(tag), - .f64_max => try emit.emitTag(tag), - .f64_copysign => try emit.emitTag(tag), - .i32_wrap_i64 => try emit.emitTag(tag), - .i64_extend_i32_s => try emit.emitTag(tag), - .i64_extend_i32_u => try emit.emitTag(tag), - .i32_extend8_s => try emit.emitTag(tag), - .i32_extend16_s => try emit.emitTag(tag), - .i64_extend8_s => try emit.emitTag(tag), - .i64_extend16_s => try emit.emitTag(tag), - .i64_extend32_s => try emit.emitTag(tag), - .f32_demote_f64 => try emit.emitTag(tag), - .f64_promote_f32 => try emit.emitTag(tag), - .i32_reinterpret_f32 => try emit.emitTag(tag), - .i64_reinterpret_f64 => try emit.emitTag(tag), - .f32_reinterpret_i32 => try emit.emitTag(tag), - .f64_reinterpret_i64 => try emit.emitTag(tag), - .i32_trunc_f32_s => try emit.emitTag(tag), - .i32_trunc_f32_u => try emit.emitTag(tag), - .i32_trunc_f64_s => try emit.emitTag(tag), - .i32_trunc_f64_u => try emit.emitTag(tag), - .i64_trunc_f32_s => try emit.emitTag(tag), - .i64_trunc_f32_u => try emit.emitTag(tag), - .i64_trunc_f64_s => try emit.emitTag(tag), - .i64_trunc_f64_u => try emit.emitTag(tag), - .f32_convert_i32_s => try emit.emitTag(tag), - .f32_convert_i32_u => try emit.emitTag(tag), - .f32_convert_i64_s => try emit.emitTag(tag), - .f32_convert_i64_u => try emit.emitTag(tag), - .f64_convert_i32_s => try emit.emitTag(tag), - .f64_convert_i32_u => try emit.emitTag(tag), - .f64_convert_i64_s => try emit.emitTag(tag), - .f64_convert_i64_u => try emit.emitTag(tag), - .i32_rem_s => try emit.emitTag(tag), - .i32_rem_u => try emit.emitTag(tag), - .i64_rem_s => try emit.emitTag(tag), - .i64_rem_u => try emit.emitTag(tag), - .i32_popcnt => try emit.emitTag(tag), - .i64_popcnt => try emit.emitTag(tag), - .i32_clz => try emit.emitTag(tag), - .i32_ctz => try emit.emitTag(tag), - .i64_clz => try emit.emitTag(tag), - .i64_ctz => try emit.emitTag(tag), - - .misc_prefix => try emit.emitExtended(inst), - .simd_prefix => try emit.emitSimd(inst), - .atomics_prefix => try emit.emitAtomic(inst), - } - } -} - -fn offset(self: Emit) u32 { - return @as(u32, @intCast(self.code.items.len)); -} - -fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { - @branchHint(.cold); - std.debug.assert(emit.error_msg == null); - const comp = emit.bin_file.base.comp; - const zcu = comp.zcu.?; +pub fn lowerToCode(emit: *Emit) Error!void { + const mir = &emit.mir; + const code = emit.code; + const wasm = emit.wasm; + const comp = wasm.base.comp; const gpa = comp.gpa; - emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.navSrcLoc(emit.owner_nav), format, args); - return error.EmitFail; -} - -fn emitLocals(emit: *Emit) !void { - const writer = emit.code.writer(); - try leb128.writeUleb128(writer, @as(u32, @intCast(emit.locals.len))); - // emit the actual locals amount - for (emit.locals) |local| { - try leb128.writeUleb128(writer, @as(u32, 1)); - try writer.writeByte(local); - } -} - -fn emitTag(emit: *Emit, tag: Mir.Inst.Tag) !void { - try emit.code.append(@intFromEnum(tag)); -} - -fn emitBlock(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void { - const block_type = emit.mir.instructions.items(.data)[inst].block_type; - try emit.code.append(@intFromEnum(tag)); - try emit.code.append(block_type); -} - -fn emitBrTable(emit: *Emit, inst: Mir.Inst.Index) !void { - const extra_index = emit.mir.instructions.items(.data)[inst].payload; - const extra = emit.mir.extraData(Mir.JumpTable, extra_index); - const labels = emit.mir.extra[extra.end..][0..extra.data.length]; - const writer = emit.code.writer(); + const is_obj = comp.config.output_mode == .Obj; + const target = &comp.root_mod.resolved_target.result; + const is_wasm32 = target.cpu.arch == .wasm32; - try emit.code.append(std.wasm.opcode(.br_table)); - try leb128.writeUleb128(writer, extra.data.length - 1); // Default label is not part of length/depth - for (labels) |label| { - try leb128.writeUleb128(writer, label); - } -} + const tags = mir.instruction_tags; + const datas = mir.instruction_datas; + var inst: u32 = 0; -fn emitLabel(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void { - const label = emit.mir.instructions.items(.data)[inst].label; - try emit.code.append(@intFromEnum(tag)); - try leb128.writeUleb128(emit.code.writer(), label); -} + loop: switch (tags[inst]) { + .dbg_epilogue_begin => { + return; + }, + .block, .loop => { + const block_type = datas[inst].block_type; + try code.ensureUnusedCapacity(gpa, 2); + code.appendAssumeCapacity(@intFromEnum(tags[inst])); + code.appendAssumeCapacity(@intFromEnum(block_type)); + + inst += 1; + continue :loop tags[inst]; + }, + .uav_ref => { + if (is_obj) { + try uavRefOffObj(wasm, code, .{ .uav_obj = datas[inst].uav_obj, .offset = 0 }, is_wasm32); + } else { + try uavRefOffExe(wasm, code, .{ .uav_exe = datas[inst].uav_exe, .offset = 0 }, is_wasm32); + } + inst += 1; + continue :loop tags[inst]; + }, + .uav_ref_off => { + if (is_obj) { + try uavRefOffObj(wasm, code, mir.extraData(Mir.UavRefOffObj, datas[inst].payload).data, is_wasm32); + } else { + try uavRefOffExe(wasm, code, mir.extraData(Mir.UavRefOffExe, datas[inst].payload).data, is_wasm32); + } + inst += 1; + continue :loop tags[inst]; + }, + .nav_ref => { + try navRefOff(wasm, code, .{ .nav_index = datas[inst].nav_index, .offset = 0 }, is_wasm32); + inst += 1; + continue :loop tags[inst]; + }, + .nav_ref_off => { + try navRefOff(wasm, code, mir.extraData(Mir.NavRefOff, datas[inst].payload).data, is_wasm32); + inst += 1; + continue :loop tags[inst]; + }, + .func_ref => { + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const)); + if (is_obj) { + @panic("TODO"); + } else { + leb.writeUleb128(code.fixedWriter(), 1 + @intFromEnum(datas[inst].indirect_function_table_index)) catch unreachable; + } + inst += 1; + continue :loop tags[inst]; + }, + .dbg_line => { + inst += 1; + continue :loop tags[inst]; + }, + .errors_len => { + try code.ensureUnusedCapacity(gpa, 6); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const)); + // MIR is lowered during flush, so there is indeed only one thread at this time. + const errors_len = 1 + comp.zcu.?.intern_pool.global_error_set.getNamesFromMainThread().len; + leb.writeIleb128(code.fixedWriter(), errors_len) catch unreachable; + + inst += 1; + continue :loop tags[inst]; + }, + .error_name_table_ref => { + try code.ensureUnusedCapacity(gpa, 11); + const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const; + code.appendAssumeCapacity(@intFromEnum(opcode)); + if (is_obj) { + try wasm.out_relocs.append(gpa, .{ + .offset = @intCast(code.items.len), + .pointee = .{ .symbol_index = try wasm.errorNameTableSymbolIndex() }, + .tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64, + .addend = 0, + }); + code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10); + + inst += 1; + continue :loop tags[inst]; + } else { + const addr: u32 = wasm.errorNameTableAddr(); + leb.writeIleb128(code.fixedWriter(), addr) catch unreachable; + + inst += 1; + continue :loop tags[inst]; + } + }, + .br_if, .br, .memory_grow, .memory_size => { + try code.ensureUnusedCapacity(gpa, 11); + code.appendAssumeCapacity(@intFromEnum(tags[inst])); + leb.writeUleb128(code.fixedWriter(), datas[inst].label) catch unreachable; -fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void { - const comp = emit.bin_file.base.comp; - const gpa = comp.gpa; - const label = emit.mir.instructions.items(.data)[inst].label; - try emit.code.append(@intFromEnum(tag)); - var buf: [5]u8 = undefined; - leb128.writeUnsignedFixed(5, &buf, label); - const global_offset = emit.offset(); - try emit.code.appendSlice(&buf); - - const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom; - const atom = emit.bin_file.getAtomPtr(atom_index); - try atom.relocs.append(gpa, .{ - .index = label, - .offset = global_offset, - .relocation_type = .R_WASM_GLOBAL_INDEX_LEB, - }); -} + inst += 1; + continue :loop tags[inst]; + }, -fn emitImm32(emit: *Emit, inst: Mir.Inst.Index) !void { - const value: i32 = emit.mir.instructions.items(.data)[inst].imm32; - try emit.code.append(std.wasm.opcode(.i32_const)); - try leb128.writeIleb128(emit.code.writer(), value); -} + .local_get, .local_set, .local_tee => { + try code.ensureUnusedCapacity(gpa, 11); + code.appendAssumeCapacity(@intFromEnum(tags[inst])); + leb.writeUleb128(code.fixedWriter(), datas[inst].local) catch unreachable; -fn emitImm64(emit: *Emit, inst: Mir.Inst.Index) !void { - const extra_index = emit.mir.instructions.items(.data)[inst].payload; - const value = emit.mir.extraData(Mir.Imm64, extra_index); - try emit.code.append(std.wasm.opcode(.i64_const)); - try leb128.writeIleb128(emit.code.writer(), @as(i64, @bitCast(value.data.toU64()))); -} + inst += 1; + continue :loop tags[inst]; + }, -fn emitFloat32(emit: *Emit, inst: Mir.Inst.Index) !void { - const value: f32 = emit.mir.instructions.items(.data)[inst].float32; - try emit.code.append(std.wasm.opcode(.f32_const)); - try emit.code.writer().writeInt(u32, @bitCast(value), .little); -} + .br_table => { + const extra_index = datas[inst].payload; + const extra = mir.extraData(Mir.JumpTable, extra_index); + const labels = mir.extra[extra.end..][0..extra.data.length]; + try code.ensureUnusedCapacity(gpa, 11 + 10 * labels.len); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_table)); + // -1 because default label is not part of length/depth. + leb.writeUleb128(code.fixedWriter(), extra.data.length - 1) catch unreachable; + for (labels) |label| leb.writeUleb128(code.fixedWriter(), label) catch unreachable; + + inst += 1; + continue :loop tags[inst]; + }, -fn emitFloat64(emit: *Emit, inst: Mir.Inst.Index) !void { - const extra_index = emit.mir.instructions.items(.data)[inst].payload; - const value = emit.mir.extraData(Mir.Float64, extra_index); - try emit.code.append(std.wasm.opcode(.f64_const)); - try emit.code.writer().writeInt(u64, value.data.toU64(), .little); -} + .call_nav => { + try code.ensureUnusedCapacity(gpa, 6); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call)); + if (is_obj) { + try wasm.out_relocs.append(gpa, .{ + .offset = @intCast(code.items.len), + .pointee = .{ .symbol_index = try wasm.navSymbolIndex(datas[inst].nav_index) }, + .tag = .function_index_leb, + .addend = 0, + }); + code.appendNTimesAssumeCapacity(0, 5); + } else { + appendOutputFunctionIndex(code, .fromIpNav(wasm, datas[inst].nav_index)); + } + + inst += 1; + continue :loop tags[inst]; + }, -fn emitMemArg(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void { - const extra_index = emit.mir.instructions.items(.data)[inst].payload; - const mem_arg = emit.mir.extraData(Mir.MemArg, extra_index).data; - try emit.code.append(@intFromEnum(tag)); - try encodeMemArg(mem_arg, emit.code.writer()); -} + .call_indirect => { + try code.ensureUnusedCapacity(gpa, 11); + const func_ty_index = datas[inst].func_ty; + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call_indirect)); + if (is_obj) { + try wasm.out_relocs.append(gpa, .{ + .offset = @intCast(code.items.len), + .pointee = .{ .type_index = func_ty_index }, + .tag = .type_index_leb, + .addend = 0, + }); + code.appendNTimesAssumeCapacity(0, 5); + } else { + const index: Wasm.Flush.FuncTypeIndex = .fromTypeIndex(func_ty_index, &wasm.flush_buffer); + leb.writeUleb128(code.fixedWriter(), @intFromEnum(index)) catch unreachable; + } + leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // table index + + inst += 1; + continue :loop tags[inst]; + }, -fn encodeMemArg(mem_arg: Mir.MemArg, writer: anytype) !void { - // wasm encodes alignment as power of 2, rather than natural alignment - const encoded_alignment = @ctz(mem_arg.alignment); - try leb128.writeUleb128(writer, encoded_alignment); - try leb128.writeUleb128(writer, mem_arg.offset); -} + .call_tag_name => { + try code.ensureUnusedCapacity(gpa, 6); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call)); + if (is_obj) { + try wasm.out_relocs.append(gpa, .{ + .offset = @intCast(code.items.len), + .pointee = .{ .symbol_index = try wasm.tagNameSymbolIndex(datas[inst].ip_index) }, + .tag = .function_index_leb, + .addend = 0, + }); + code.appendNTimesAssumeCapacity(0, 5); + } else { + appendOutputFunctionIndex(code, .fromTagNameType(wasm, datas[inst].ip_index)); + } + + inst += 1; + continue :loop tags[inst]; + }, -fn emitCall(emit: *Emit, inst: Mir.Inst.Index) !void { - const comp = emit.bin_file.base.comp; - const gpa = comp.gpa; - const label = emit.mir.instructions.items(.data)[inst].label; - try emit.code.append(std.wasm.opcode(.call)); - const call_offset = emit.offset(); - var buf: [5]u8 = undefined; - leb128.writeUnsignedFixed(5, &buf, label); - try emit.code.appendSlice(&buf); - - if (label != 0) { - const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom; - const atom = emit.bin_file.getAtomPtr(atom_index); - try atom.relocs.append(gpa, .{ - .offset = call_offset, - .index = label, - .relocation_type = .R_WASM_FUNCTION_INDEX_LEB, - }); - } -} + .call_intrinsic => { + // Although this currently uses `wasm.internString`, note that it + // *could* be changed to directly index into a preloaded strings + // table initialized based on the `Mir.Intrinsic` enum. + const symbol_name = try wasm.internString(@tagName(datas[inst].intrinsic)); + + try code.ensureUnusedCapacity(gpa, 6); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call)); + if (is_obj) { + try wasm.out_relocs.append(gpa, .{ + .offset = @intCast(code.items.len), + .pointee = .{ .symbol_index = try wasm.symbolNameIndex(symbol_name) }, + .tag = .function_index_leb, + .addend = 0, + }); + code.appendNTimesAssumeCapacity(0, 5); + } else { + appendOutputFunctionIndex(code, .fromSymbolName(wasm, symbol_name)); + } + + inst += 1; + continue :loop tags[inst]; + }, -fn emitCallIndirect(emit: *Emit, inst: Mir.Inst.Index) !void { - const type_index = emit.mir.instructions.items(.data)[inst].label; - try emit.code.append(std.wasm.opcode(.call_indirect)); - // NOTE: If we remove unused function types in the future for incremental - // linking, we must also emit a relocation for this `type_index` - const call_offset = emit.offset(); - var buf: [5]u8 = undefined; - leb128.writeUnsignedFixed(5, &buf, type_index); - try emit.code.appendSlice(&buf); - if (type_index != 0) { - const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom; - const atom = emit.bin_file.getAtomPtr(atom_index); - try atom.relocs.append(emit.bin_file.base.comp.gpa, .{ - .offset = call_offset, - .index = type_index, - .relocation_type = .R_WASM_TYPE_INDEX_LEB, - }); - } - try leb128.writeUleb128(emit.code.writer(), @as(u32, 0)); // TODO: Emit relocation for table index -} + .global_set_sp => { + try code.ensureUnusedCapacity(gpa, 6); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set)); + if (is_obj) { + try wasm.out_relocs.append(gpa, .{ + .offset = @intCast(code.items.len), + .pointee = .{ .symbol_index = try wasm.stackPointerSymbolIndex() }, + .tag = .global_index_leb, + .addend = 0, + }); + code.appendNTimesAssumeCapacity(0, 5); + } else { + const sp_global: Wasm.GlobalIndex = .stack_pointer; + std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable; + } + + inst += 1; + continue :loop tags[inst]; + }, -fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void { - const comp = emit.bin_file.base.comp; - const gpa = comp.gpa; - const symbol_index = emit.mir.instructions.items(.data)[inst].label; - try emit.code.append(std.wasm.opcode(.i32_const)); - const index_offset = emit.offset(); - var buf: [5]u8 = undefined; - leb128.writeUnsignedFixed(5, &buf, symbol_index); - try emit.code.appendSlice(&buf); - - if (symbol_index != 0) { - const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom; - const atom = emit.bin_file.getAtomPtr(atom_index); - try atom.relocs.append(gpa, .{ - .offset = index_offset, - .index = symbol_index, - .relocation_type = .R_WASM_TABLE_INDEX_SLEB, - }); - } -} + .f32_const => { + try code.ensureUnusedCapacity(gpa, 5); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.f32_const)); + std.mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @bitCast(datas[inst].float32), .little); -fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void { - const extra_index = emit.mir.instructions.items(.data)[inst].payload; - const mem = emit.mir.extraData(Mir.Memory, extra_index).data; - const mem_offset = emit.offset() + 1; - const comp = emit.bin_file.base.comp; - const gpa = comp.gpa; - const target = comp.root_mod.resolved_target.result; - const is_wasm32 = target.cpu.arch == .wasm32; - if (is_wasm32) { - try emit.code.append(std.wasm.opcode(.i32_const)); - var buf: [5]u8 = undefined; - leb128.writeUnsignedFixed(5, &buf, mem.pointer); - try emit.code.appendSlice(&buf); - } else { - try emit.code.append(std.wasm.opcode(.i64_const)); - var buf: [10]u8 = undefined; - leb128.writeUnsignedFixed(10, &buf, mem.pointer); - try emit.code.appendSlice(&buf); - } + inst += 1; + continue :loop tags[inst]; + }, - if (mem.pointer != 0) { - const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom; - const atom = emit.bin_file.getAtomPtr(atom_index); - try atom.relocs.append(gpa, .{ - .offset = mem_offset, - .index = mem.pointer, - .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64, - .addend = @as(i32, @intCast(mem.offset)), - }); - } -} + .f64_const => { + try code.ensureUnusedCapacity(gpa, 9); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.f64_const)); + const float64 = mir.extraData(Mir.Float64, datas[inst].payload).data; + std.mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), float64.toInt(), .little); -fn emitExtended(emit: *Emit, inst: Mir.Inst.Index) !void { - const extra_index = emit.mir.instructions.items(.data)[inst].payload; - const opcode = emit.mir.extra[extra_index]; - const writer = emit.code.writer(); - try emit.code.append(std.wasm.opcode(.misc_prefix)); - try leb128.writeUleb128(writer, opcode); - switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) { - // bulk-memory opcodes - .data_drop => { - const segment = emit.mir.extra[extra_index + 1]; - try leb128.writeUleb128(writer, segment); - }, - .memory_init => { - const segment = emit.mir.extra[extra_index + 1]; - try leb128.writeUleb128(writer, segment); - try leb128.writeUleb128(writer, @as(u32, 0)); // memory index + inst += 1; + continue :loop tags[inst]; }, - .memory_fill => { - try leb128.writeUleb128(writer, @as(u32, 0)); // memory index + .i32_const => { + try code.ensureUnusedCapacity(gpa, 6); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const)); + leb.writeIleb128(code.fixedWriter(), datas[inst].imm32) catch unreachable; + + inst += 1; + continue :loop tags[inst]; }, - .memory_copy => { - try leb128.writeUleb128(writer, @as(u32, 0)); // dst memory index - try leb128.writeUleb128(writer, @as(u32, 0)); // src memory index + .i64_const => { + try code.ensureUnusedCapacity(gpa, 11); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const)); + const int64: i64 = @bitCast(mir.extraData(Mir.Imm64, datas[inst].payload).data.toInt()); + leb.writeIleb128(code.fixedWriter(), int64) catch unreachable; + + inst += 1; + continue :loop tags[inst]; }, - // nontrapping-float-to-int-conversion opcodes - .i32_trunc_sat_f32_s, - .i32_trunc_sat_f32_u, - .i32_trunc_sat_f64_s, - .i32_trunc_sat_f64_u, - .i64_trunc_sat_f32_s, - .i64_trunc_sat_f32_u, - .i64_trunc_sat_f64_s, - .i64_trunc_sat_f64_u, - => {}, // opcode already written - else => |tag| return emit.fail("TODO: Implement extension instruction: {s}\n", .{@tagName(tag)}), - } -} - -fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void { - const extra_index = emit.mir.instructions.items(.data)[inst].payload; - const opcode = emit.mir.extra[extra_index]; - const writer = emit.code.writer(); - try emit.code.append(std.wasm.opcode(.simd_prefix)); - try leb128.writeUleb128(writer, opcode); - switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) { - .v128_store, - .v128_load, - .v128_load8_splat, - .v128_load16_splat, - .v128_load32_splat, - .v128_load64_splat, + .i32_load, + .i64_load, + .f32_load, + .f64_load, + .i32_load8_s, + .i32_load8_u, + .i32_load16_s, + .i32_load16_u, + .i64_load8_s, + .i64_load8_u, + .i64_load16_s, + .i64_load16_u, + .i64_load32_s, + .i64_load32_u, + .i32_store, + .i64_store, + .f32_store, + .f64_store, + .i32_store8, + .i32_store16, + .i64_store8, + .i64_store16, + .i64_store32, => { - const mem_arg = emit.mir.extraData(Mir.MemArg, extra_index + 1).data; - try encodeMemArg(mem_arg, writer); + try code.ensureUnusedCapacity(gpa, 1 + 20); + code.appendAssumeCapacity(@intFromEnum(tags[inst])); + encodeMemArg(code, mir.extraData(Mir.MemArg, datas[inst].payload).data); + inst += 1; + continue :loop tags[inst]; }, - .v128_const, - .i8x16_shuffle, - => { - const simd_value = emit.mir.extra[extra_index + 1 ..][0..4]; - try writer.writeAll(std.mem.asBytes(simd_value)); - }, - .i8x16_extract_lane_s, - .i8x16_extract_lane_u, - .i8x16_replace_lane, - .i16x8_extract_lane_s, - .i16x8_extract_lane_u, - .i16x8_replace_lane, - .i32x4_extract_lane, - .i32x4_replace_lane, - .i64x2_extract_lane, - .i64x2_replace_lane, - .f32x4_extract_lane, - .f32x4_replace_lane, - .f64x2_extract_lane, - .f64x2_replace_lane, + + .end, + .@"return", + .@"unreachable", + .select, + .i32_eqz, + .i32_eq, + .i32_ne, + .i32_lt_s, + .i32_lt_u, + .i32_gt_s, + .i32_gt_u, + .i32_le_s, + .i32_le_u, + .i32_ge_s, + .i32_ge_u, + .i64_eqz, + .i64_eq, + .i64_ne, + .i64_lt_s, + .i64_lt_u, + .i64_gt_s, + .i64_gt_u, + .i64_le_s, + .i64_le_u, + .i64_ge_s, + .i64_ge_u, + .f32_eq, + .f32_ne, + .f32_lt, + .f32_gt, + .f32_le, + .f32_ge, + .f64_eq, + .f64_ne, + .f64_lt, + .f64_gt, + .f64_le, + .f64_ge, + .i32_add, + .i32_sub, + .i32_mul, + .i32_div_s, + .i32_div_u, + .i32_and, + .i32_or, + .i32_xor, + .i32_shl, + .i32_shr_s, + .i32_shr_u, + .i64_add, + .i64_sub, + .i64_mul, + .i64_div_s, + .i64_div_u, + .i64_and, + .i64_or, + .i64_xor, + .i64_shl, + .i64_shr_s, + .i64_shr_u, + .f32_abs, + .f32_neg, + .f32_ceil, + .f32_floor, + .f32_trunc, + .f32_nearest, + .f32_sqrt, + .f32_add, + .f32_sub, + .f32_mul, + .f32_div, + .f32_min, + .f32_max, + .f32_copysign, + .f64_abs, + .f64_neg, + .f64_ceil, + .f64_floor, + .f64_trunc, + .f64_nearest, + .f64_sqrt, + .f64_add, + .f64_sub, + .f64_mul, + .f64_div, + .f64_min, + .f64_max, + .f64_copysign, + .i32_wrap_i64, + .i64_extend_i32_s, + .i64_extend_i32_u, + .i32_extend8_s, + .i32_extend16_s, + .i64_extend8_s, + .i64_extend16_s, + .i64_extend32_s, + .f32_demote_f64, + .f64_promote_f32, + .i32_reinterpret_f32, + .i64_reinterpret_f64, + .f32_reinterpret_i32, + .f64_reinterpret_i64, + .i32_trunc_f32_s, + .i32_trunc_f32_u, + .i32_trunc_f64_s, + .i32_trunc_f64_u, + .i64_trunc_f32_s, + .i64_trunc_f32_u, + .i64_trunc_f64_s, + .i64_trunc_f64_u, + .f32_convert_i32_s, + .f32_convert_i32_u, + .f32_convert_i64_s, + .f32_convert_i64_u, + .f64_convert_i32_s, + .f64_convert_i32_u, + .f64_convert_i64_s, + .f64_convert_i64_u, + .i32_rem_s, + .i32_rem_u, + .i64_rem_s, + .i64_rem_u, + .i32_popcnt, + .i64_popcnt, + .i32_clz, + .i32_ctz, + .i64_clz, + .i64_ctz, => { - try writer.writeByte(@as(u8, @intCast(emit.mir.extra[extra_index + 1]))); + try code.append(gpa, @intFromEnum(tags[inst])); + inst += 1; + continue :loop tags[inst]; }, - .i8x16_splat, - .i16x8_splat, - .i32x4_splat, - .i64x2_splat, - .f32x4_splat, - .f64x2_splat, - => {}, // opcode already written - else => |tag| return emit.fail("TODO: Implement simd instruction: {s}", .{@tagName(tag)}), - } -} -fn emitAtomic(emit: *Emit, inst: Mir.Inst.Index) !void { - const extra_index = emit.mir.instructions.items(.data)[inst].payload; - const opcode = emit.mir.extra[extra_index]; - const writer = emit.code.writer(); - try emit.code.append(std.wasm.opcode(.atomics_prefix)); - try leb128.writeUleb128(writer, opcode); - switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) { - .i32_atomic_load, - .i64_atomic_load, - .i32_atomic_load8_u, - .i32_atomic_load16_u, - .i64_atomic_load8_u, - .i64_atomic_load16_u, - .i64_atomic_load32_u, - .i32_atomic_store, - .i64_atomic_store, - .i32_atomic_store8, - .i32_atomic_store16, - .i64_atomic_store8, - .i64_atomic_store16, - .i64_atomic_store32, - .i32_atomic_rmw_add, - .i64_atomic_rmw_add, - .i32_atomic_rmw8_add_u, - .i32_atomic_rmw16_add_u, - .i64_atomic_rmw8_add_u, - .i64_atomic_rmw16_add_u, - .i64_atomic_rmw32_add_u, - .i32_atomic_rmw_sub, - .i64_atomic_rmw_sub, - .i32_atomic_rmw8_sub_u, - .i32_atomic_rmw16_sub_u, - .i64_atomic_rmw8_sub_u, - .i64_atomic_rmw16_sub_u, - .i64_atomic_rmw32_sub_u, - .i32_atomic_rmw_and, - .i64_atomic_rmw_and, - .i32_atomic_rmw8_and_u, - .i32_atomic_rmw16_and_u, - .i64_atomic_rmw8_and_u, - .i64_atomic_rmw16_and_u, - .i64_atomic_rmw32_and_u, - .i32_atomic_rmw_or, - .i64_atomic_rmw_or, - .i32_atomic_rmw8_or_u, - .i32_atomic_rmw16_or_u, - .i64_atomic_rmw8_or_u, - .i64_atomic_rmw16_or_u, - .i64_atomic_rmw32_or_u, - .i32_atomic_rmw_xor, - .i64_atomic_rmw_xor, - .i32_atomic_rmw8_xor_u, - .i32_atomic_rmw16_xor_u, - .i64_atomic_rmw8_xor_u, - .i64_atomic_rmw16_xor_u, - .i64_atomic_rmw32_xor_u, - .i32_atomic_rmw_xchg, - .i64_atomic_rmw_xchg, - .i32_atomic_rmw8_xchg_u, - .i32_atomic_rmw16_xchg_u, - .i64_atomic_rmw8_xchg_u, - .i64_atomic_rmw16_xchg_u, - .i64_atomic_rmw32_xchg_u, - - .i32_atomic_rmw_cmpxchg, - .i64_atomic_rmw_cmpxchg, - .i32_atomic_rmw8_cmpxchg_u, - .i32_atomic_rmw16_cmpxchg_u, - .i64_atomic_rmw8_cmpxchg_u, - .i64_atomic_rmw16_cmpxchg_u, - .i64_atomic_rmw32_cmpxchg_u, - => { - const mem_arg = emit.mir.extraData(Mir.MemArg, extra_index + 1).data; - try encodeMemArg(mem_arg, writer); + .misc_prefix => { + try code.ensureUnusedCapacity(gpa, 6 + 6); + const extra_index = datas[inst].payload; + const opcode = mir.extra[extra_index]; + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix)); + leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable; + switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) { + // bulk-memory opcodes + .data_drop => { + const segment = mir.extra[extra_index + 1]; + leb.writeUleb128(code.fixedWriter(), segment) catch unreachable; + + inst += 1; + continue :loop tags[inst]; + }, + .memory_init => { + const segment = mir.extra[extra_index + 1]; + leb.writeUleb128(code.fixedWriter(), segment) catch unreachable; + leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index + + inst += 1; + continue :loop tags[inst]; + }, + .memory_fill => { + leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index + + inst += 1; + continue :loop tags[inst]; + }, + .memory_copy => { + leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // dst memory index + leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // src memory index + + inst += 1; + continue :loop tags[inst]; + }, + + // nontrapping-float-to-int-conversion opcodes + .i32_trunc_sat_f32_s, + .i32_trunc_sat_f32_u, + .i32_trunc_sat_f64_s, + .i32_trunc_sat_f64_u, + .i64_trunc_sat_f32_s, + .i64_trunc_sat_f32_u, + .i64_trunc_sat_f64_s, + .i64_trunc_sat_f64_u, + => { + inst += 1; + continue :loop tags[inst]; + }, + + .table_init => @panic("TODO"), + .elem_drop => @panic("TODO"), + .table_copy => @panic("TODO"), + .table_grow => @panic("TODO"), + .table_size => @panic("TODO"), + .table_fill => @panic("TODO"), + + _ => unreachable, + } + comptime unreachable; }, - .atomic_fence => { - // TODO: When multi-memory proposal is accepted and implemented in the compiler, - // change this to (user-)specified index, rather than hardcode it to memory index 0. - const memory_index: u32 = 0; - try leb128.writeUleb128(writer, memory_index); + .simd_prefix => { + try code.ensureUnusedCapacity(gpa, 6 + 20); + const extra_index = datas[inst].payload; + const opcode = mir.extra[extra_index]; + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.simd_prefix)); + leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable; + switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) { + .v128_store, + .v128_load, + .v128_load8_splat, + .v128_load16_splat, + .v128_load32_splat, + .v128_load64_splat, + => { + encodeMemArg(code, mir.extraData(Mir.MemArg, extra_index + 1).data); + inst += 1; + continue :loop tags[inst]; + }, + .v128_const, .i8x16_shuffle => { + code.appendSliceAssumeCapacity(std.mem.asBytes(mir.extra[extra_index + 1 ..][0..4])); + inst += 1; + continue :loop tags[inst]; + }, + .i8x16_extract_lane_s, + .i8x16_extract_lane_u, + .i8x16_replace_lane, + .i16x8_extract_lane_s, + .i16x8_extract_lane_u, + .i16x8_replace_lane, + .i32x4_extract_lane, + .i32x4_replace_lane, + .i64x2_extract_lane, + .i64x2_replace_lane, + .f32x4_extract_lane, + .f32x4_replace_lane, + .f64x2_extract_lane, + .f64x2_replace_lane, + => { + code.appendAssumeCapacity(@intCast(mir.extra[extra_index + 1])); + inst += 1; + continue :loop tags[inst]; + }, + .i8x16_splat, + .i16x8_splat, + .i32x4_splat, + .i64x2_splat, + .f32x4_splat, + .f64x2_splat, + => { + inst += 1; + continue :loop tags[inst]; + }, + + .v128_load8x8_s => @panic("TODO"), + .v128_load8x8_u => @panic("TODO"), + .v128_load16x4_s => @panic("TODO"), + .v128_load16x4_u => @panic("TODO"), + .v128_load32x2_s => @panic("TODO"), + .v128_load32x2_u => @panic("TODO"), + .i8x16_swizzle => @panic("TODO"), + .i8x16_eq => @panic("TODO"), + .i16x8_eq => @panic("TODO"), + .i32x4_eq => @panic("TODO"), + .i8x16_ne => @panic("TODO"), + .i16x8_ne => @panic("TODO"), + .i32x4_ne => @panic("TODO"), + .i8x16_lt_s => @panic("TODO"), + .i16x8_lt_s => @panic("TODO"), + .i32x4_lt_s => @panic("TODO"), + .i8x16_lt_u => @panic("TODO"), + .i16x8_lt_u => @panic("TODO"), + .i32x4_lt_u => @panic("TODO"), + .i8x16_gt_s => @panic("TODO"), + .i16x8_gt_s => @panic("TODO"), + .i32x4_gt_s => @panic("TODO"), + .i8x16_gt_u => @panic("TODO"), + .i16x8_gt_u => @panic("TODO"), + .i32x4_gt_u => @panic("TODO"), + .i8x16_le_s => @panic("TODO"), + .i16x8_le_s => @panic("TODO"), + .i32x4_le_s => @panic("TODO"), + .i8x16_le_u => @panic("TODO"), + .i16x8_le_u => @panic("TODO"), + .i32x4_le_u => @panic("TODO"), + .i8x16_ge_s => @panic("TODO"), + .i16x8_ge_s => @panic("TODO"), + .i32x4_ge_s => @panic("TODO"), + .i8x16_ge_u => @panic("TODO"), + .i16x8_ge_u => @panic("TODO"), + .i32x4_ge_u => @panic("TODO"), + .f32x4_eq => @panic("TODO"), + .f64x2_eq => @panic("TODO"), + .f32x4_ne => @panic("TODO"), + .f64x2_ne => @panic("TODO"), + .f32x4_lt => @panic("TODO"), + .f64x2_lt => @panic("TODO"), + .f32x4_gt => @panic("TODO"), + .f64x2_gt => @panic("TODO"), + .f32x4_le => @panic("TODO"), + .f64x2_le => @panic("TODO"), + .f32x4_ge => @panic("TODO"), + .f64x2_ge => @panic("TODO"), + .v128_not => @panic("TODO"), + .v128_and => @panic("TODO"), + .v128_andnot => @panic("TODO"), + .v128_or => @panic("TODO"), + .v128_xor => @panic("TODO"), + .v128_bitselect => @panic("TODO"), + .v128_any_true => @panic("TODO"), + .v128_load8_lane => @panic("TODO"), + .v128_load16_lane => @panic("TODO"), + .v128_load32_lane => @panic("TODO"), + .v128_load64_lane => @panic("TODO"), + .v128_store8_lane => @panic("TODO"), + .v128_store16_lane => @panic("TODO"), + .v128_store32_lane => @panic("TODO"), + .v128_store64_lane => @panic("TODO"), + .v128_load32_zero => @panic("TODO"), + .v128_load64_zero => @panic("TODO"), + .f32x4_demote_f64x2_zero => @panic("TODO"), + .f64x2_promote_low_f32x4 => @panic("TODO"), + .i8x16_abs => @panic("TODO"), + .i16x8_abs => @panic("TODO"), + .i32x4_abs => @panic("TODO"), + .i64x2_abs => @panic("TODO"), + .i8x16_neg => @panic("TODO"), + .i16x8_neg => @panic("TODO"), + .i32x4_neg => @panic("TODO"), + .i64x2_neg => @panic("TODO"), + .i8x16_popcnt => @panic("TODO"), + .i16x8_q15mulr_sat_s => @panic("TODO"), + .i8x16_all_true => @panic("TODO"), + .i16x8_all_true => @panic("TODO"), + .i32x4_all_true => @panic("TODO"), + .i64x2_all_true => @panic("TODO"), + .i8x16_bitmask => @panic("TODO"), + .i16x8_bitmask => @panic("TODO"), + .i32x4_bitmask => @panic("TODO"), + .i64x2_bitmask => @panic("TODO"), + .i8x16_narrow_i16x8_s => @panic("TODO"), + .i16x8_narrow_i32x4_s => @panic("TODO"), + .i8x16_narrow_i16x8_u => @panic("TODO"), + .i16x8_narrow_i32x4_u => @panic("TODO"), + .f32x4_ceil => @panic("TODO"), + .i16x8_extend_low_i8x16_s => @panic("TODO"), + .i32x4_extend_low_i16x8_s => @panic("TODO"), + .i64x2_extend_low_i32x4_s => @panic("TODO"), + .f32x4_floor => @panic("TODO"), + .i16x8_extend_high_i8x16_s => @panic("TODO"), + .i32x4_extend_high_i16x8_s => @panic("TODO"), + .i64x2_extend_high_i32x4_s => @panic("TODO"), + .f32x4_trunc => @panic("TODO"), + .i16x8_extend_low_i8x16_u => @panic("TODO"), + .i32x4_extend_low_i16x8_u => @panic("TODO"), + .i64x2_extend_low_i32x4_u => @panic("TODO"), + .f32x4_nearest => @panic("TODO"), + .i16x8_extend_high_i8x16_u => @panic("TODO"), + .i32x4_extend_high_i16x8_u => @panic("TODO"), + .i64x2_extend_high_i32x4_u => @panic("TODO"), + .i8x16_shl => @panic("TODO"), + .i16x8_shl => @panic("TODO"), + .i32x4_shl => @panic("TODO"), + .i64x2_shl => @panic("TODO"), + .i8x16_shr_s => @panic("TODO"), + .i16x8_shr_s => @panic("TODO"), + .i32x4_shr_s => @panic("TODO"), + .i64x2_shr_s => @panic("TODO"), + .i8x16_shr_u => @panic("TODO"), + .i16x8_shr_u => @panic("TODO"), + .i32x4_shr_u => @panic("TODO"), + .i64x2_shr_u => @panic("TODO"), + .i8x16_add => @panic("TODO"), + .i16x8_add => @panic("TODO"), + .i32x4_add => @panic("TODO"), + .i64x2_add => @panic("TODO"), + .i8x16_add_sat_s => @panic("TODO"), + .i16x8_add_sat_s => @panic("TODO"), + .i8x16_add_sat_u => @panic("TODO"), + .i16x8_add_sat_u => @panic("TODO"), + .i8x16_sub => @panic("TODO"), + .i16x8_sub => @panic("TODO"), + .i32x4_sub => @panic("TODO"), + .i64x2_sub => @panic("TODO"), + .i8x16_sub_sat_s => @panic("TODO"), + .i16x8_sub_sat_s => @panic("TODO"), + .i8x16_sub_sat_u => @panic("TODO"), + .i16x8_sub_sat_u => @panic("TODO"), + .f64x2_ceil => @panic("TODO"), + .f64x2_nearest => @panic("TODO"), + .f64x2_floor => @panic("TODO"), + .i16x8_mul => @panic("TODO"), + .i32x4_mul => @panic("TODO"), + .i64x2_mul => @panic("TODO"), + .i8x16_min_s => @panic("TODO"), + .i16x8_min_s => @panic("TODO"), + .i32x4_min_s => @panic("TODO"), + .i64x2_eq => @panic("TODO"), + .i8x16_min_u => @panic("TODO"), + .i16x8_min_u => @panic("TODO"), + .i32x4_min_u => @panic("TODO"), + .i64x2_ne => @panic("TODO"), + .i8x16_max_s => @panic("TODO"), + .i16x8_max_s => @panic("TODO"), + .i32x4_max_s => @panic("TODO"), + .i64x2_lt_s => @panic("TODO"), + .i8x16_max_u => @panic("TODO"), + .i16x8_max_u => @panic("TODO"), + .i32x4_max_u => @panic("TODO"), + .i64x2_gt_s => @panic("TODO"), + .f64x2_trunc => @panic("TODO"), + .i32x4_dot_i16x8_s => @panic("TODO"), + .i64x2_le_s => @panic("TODO"), + .i8x16_avgr_u => @panic("TODO"), + .i16x8_avgr_u => @panic("TODO"), + .i64x2_ge_s => @panic("TODO"), + .i16x8_extadd_pairwise_i8x16_s => @panic("TODO"), + .i16x8_extmul_low_i8x16_s => @panic("TODO"), + .i32x4_extmul_low_i16x8_s => @panic("TODO"), + .i64x2_extmul_low_i32x4_s => @panic("TODO"), + .i16x8_extadd_pairwise_i8x16_u => @panic("TODO"), + .i16x8_extmul_high_i8x16_s => @panic("TODO"), + .i32x4_extmul_high_i16x8_s => @panic("TODO"), + .i64x2_extmul_high_i32x4_s => @panic("TODO"), + .i32x4_extadd_pairwise_i16x8_s => @panic("TODO"), + .i16x8_extmul_low_i8x16_u => @panic("TODO"), + .i32x4_extmul_low_i16x8_u => @panic("TODO"), + .i64x2_extmul_low_i32x4_u => @panic("TODO"), + .i32x4_extadd_pairwise_i16x8_u => @panic("TODO"), + .i16x8_extmul_high_i8x16_u => @panic("TODO"), + .i32x4_extmul_high_i16x8_u => @panic("TODO"), + .i64x2_extmul_high_i32x4_u => @panic("TODO"), + .f32x4_abs => @panic("TODO"), + .f64x2_abs => @panic("TODO"), + .f32x4_neg => @panic("TODO"), + .f64x2_neg => @panic("TODO"), + .f32x4_sqrt => @panic("TODO"), + .f64x2_sqrt => @panic("TODO"), + .f32x4_add => @panic("TODO"), + .f64x2_add => @panic("TODO"), + .f32x4_sub => @panic("TODO"), + .f64x2_sub => @panic("TODO"), + .f32x4_mul => @panic("TODO"), + .f64x2_mul => @panic("TODO"), + .f32x4_div => @panic("TODO"), + .f64x2_div => @panic("TODO"), + .f32x4_min => @panic("TODO"), + .f64x2_min => @panic("TODO"), + .f32x4_max => @panic("TODO"), + .f64x2_max => @panic("TODO"), + .f32x4_pmin => @panic("TODO"), + .f64x2_pmin => @panic("TODO"), + .f32x4_pmax => @panic("TODO"), + .f64x2_pmax => @panic("TODO"), + .i32x4_trunc_sat_f32x4_s => @panic("TODO"), + .i32x4_trunc_sat_f32x4_u => @panic("TODO"), + .f32x4_convert_i32x4_s => @panic("TODO"), + .f32x4_convert_i32x4_u => @panic("TODO"), + .i32x4_trunc_sat_f64x2_s_zero => @panic("TODO"), + .i32x4_trunc_sat_f64x2_u_zero => @panic("TODO"), + .f64x2_convert_low_i32x4_s => @panic("TODO"), + .f64x2_convert_low_i32x4_u => @panic("TODO"), + .i8x16_relaxed_swizzle => @panic("TODO"), + .i32x4_relaxed_trunc_f32x4_s => @panic("TODO"), + .i32x4_relaxed_trunc_f32x4_u => @panic("TODO"), + .i32x4_relaxed_trunc_f64x2_s_zero => @panic("TODO"), + .i32x4_relaxed_trunc_f64x2_u_zero => @panic("TODO"), + .f32x4_relaxed_madd => @panic("TODO"), + .f32x4_relaxed_nmadd => @panic("TODO"), + .f64x2_relaxed_madd => @panic("TODO"), + .f64x2_relaxed_nmadd => @panic("TODO"), + .i8x16_relaxed_laneselect => @panic("TODO"), + .i16x8_relaxed_laneselect => @panic("TODO"), + .i32x4_relaxed_laneselect => @panic("TODO"), + .i64x2_relaxed_laneselect => @panic("TODO"), + .f32x4_relaxed_min => @panic("TODO"), + .f32x4_relaxed_max => @panic("TODO"), + .f64x2_relaxed_min => @panic("TODO"), + .f64x2_relaxed_max => @panic("TODO"), + .i16x8_relaxed_q15mulr_s => @panic("TODO"), + .i16x8_relaxed_dot_i8x16_i7x16_s => @panic("TODO"), + .i32x4_relaxed_dot_i8x16_i7x16_add_s => @panic("TODO"), + .f32x4_relaxed_dot_bf16x8_add_f32x4 => @panic("TODO"), + } + comptime unreachable; + }, + .atomics_prefix => { + try code.ensureUnusedCapacity(gpa, 6 + 20); + + const extra_index = datas[inst].payload; + const opcode = mir.extra[extra_index]; + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix)); + leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable; + switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) { + .i32_atomic_load, + .i64_atomic_load, + .i32_atomic_load8_u, + .i32_atomic_load16_u, + .i64_atomic_load8_u, + .i64_atomic_load16_u, + .i64_atomic_load32_u, + .i32_atomic_store, + .i64_atomic_store, + .i32_atomic_store8, + .i32_atomic_store16, + .i64_atomic_store8, + .i64_atomic_store16, + .i64_atomic_store32, + .i32_atomic_rmw_add, + .i64_atomic_rmw_add, + .i32_atomic_rmw8_add_u, + .i32_atomic_rmw16_add_u, + .i64_atomic_rmw8_add_u, + .i64_atomic_rmw16_add_u, + .i64_atomic_rmw32_add_u, + .i32_atomic_rmw_sub, + .i64_atomic_rmw_sub, + .i32_atomic_rmw8_sub_u, + .i32_atomic_rmw16_sub_u, + .i64_atomic_rmw8_sub_u, + .i64_atomic_rmw16_sub_u, + .i64_atomic_rmw32_sub_u, + .i32_atomic_rmw_and, + .i64_atomic_rmw_and, + .i32_atomic_rmw8_and_u, + .i32_atomic_rmw16_and_u, + .i64_atomic_rmw8_and_u, + .i64_atomic_rmw16_and_u, + .i64_atomic_rmw32_and_u, + .i32_atomic_rmw_or, + .i64_atomic_rmw_or, + .i32_atomic_rmw8_or_u, + .i32_atomic_rmw16_or_u, + .i64_atomic_rmw8_or_u, + .i64_atomic_rmw16_or_u, + .i64_atomic_rmw32_or_u, + .i32_atomic_rmw_xor, + .i64_atomic_rmw_xor, + .i32_atomic_rmw8_xor_u, + .i32_atomic_rmw16_xor_u, + .i64_atomic_rmw8_xor_u, + .i64_atomic_rmw16_xor_u, + .i64_atomic_rmw32_xor_u, + .i32_atomic_rmw_xchg, + .i64_atomic_rmw_xchg, + .i32_atomic_rmw8_xchg_u, + .i32_atomic_rmw16_xchg_u, + .i64_atomic_rmw8_xchg_u, + .i64_atomic_rmw16_xchg_u, + .i64_atomic_rmw32_xchg_u, + + .i32_atomic_rmw_cmpxchg, + .i64_atomic_rmw_cmpxchg, + .i32_atomic_rmw8_cmpxchg_u, + .i32_atomic_rmw16_cmpxchg_u, + .i64_atomic_rmw8_cmpxchg_u, + .i64_atomic_rmw16_cmpxchg_u, + .i64_atomic_rmw32_cmpxchg_u, + => { + const mem_arg = mir.extraData(Mir.MemArg, extra_index + 1).data; + encodeMemArg(code, mem_arg); + inst += 1; + continue :loop tags[inst]; + }, + .atomic_fence => { + // Hard-codes memory index 0 since multi-memory proposal is + // not yet accepted nor implemented. + const memory_index: u32 = 0; + leb.writeUleb128(code.fixedWriter(), memory_index) catch unreachable; + inst += 1; + continue :loop tags[inst]; + }, + .memory_atomic_notify => @panic("TODO"), + .memory_atomic_wait32 => @panic("TODO"), + .memory_atomic_wait64 => @panic("TODO"), + } + comptime unreachable; }, - else => |tag| return emit.fail("TODO: Implement atomic instruction: {s}", .{@tagName(tag)}), } + comptime unreachable; } -fn emitMemFill(emit: *Emit) !void { - try emit.code.append(0xFC); - try emit.code.append(0x0B); - // When multi-memory proposal reaches phase 4, we - // can emit a different memory index here. - // For now we will always emit index 0. - try leb128.writeUleb128(emit.code.writer(), @as(u32, 0)); -} - -fn emitDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void { - const extra_index = emit.mir.instructions.items(.data)[inst].payload; - const dbg_line = emit.mir.extraData(Mir.DbgLineColumn, extra_index).data; - try emit.dbgAdvancePCAndLine(dbg_line.line, dbg_line.column); +/// Asserts 20 unused capacity. +fn encodeMemArg(code: *std.ArrayListUnmanaged(u8), mem_arg: Mir.MemArg) void { + assert(code.unusedCapacitySlice().len >= 20); + // Wasm encodes alignment as power of 2, rather than natural alignment. + const encoded_alignment = @ctz(mem_arg.alignment); + leb.writeUleb128(code.fixedWriter(), encoded_alignment) catch unreachable; + leb.writeUleb128(code.fixedWriter(), mem_arg.offset) catch unreachable; } -fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void { - if (emit.dbg_output != .dwarf) return; +fn uavRefOffObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.UavRefOffObj, is_wasm32: bool) !void { + const comp = wasm.base.comp; + const gpa = comp.gpa; + const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const; - const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line)); - const delta_pc = emit.offset() - emit.prev_di_offset; - // TODO: This must emit a relocation to calculate the offset relative - // to the code section start. - try emit.dbg_output.dwarf.advancePCAndLine(delta_line, delta_pc); + try code.ensureUnusedCapacity(gpa, 11); + code.appendAssumeCapacity(@intFromEnum(opcode)); - emit.prev_di_line = line; - emit.prev_di_column = column; - emit.prev_di_offset = emit.offset(); + try wasm.out_relocs.append(gpa, .{ + .offset = @intCast(code.items.len), + .pointee = .{ .symbol_index = try wasm.uavSymbolIndex(data.uav_obj.key(wasm).*) }, + .tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64, + .addend = data.offset, + }); + code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10); } -fn emitDbgPrologueEnd(emit: *Emit) !void { - if (emit.dbg_output != .dwarf) return; +fn uavRefOffExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.UavRefOffExe, is_wasm32: bool) !void { + const comp = wasm.base.comp; + const gpa = comp.gpa; + const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const; + + try code.ensureUnusedCapacity(gpa, 11); + code.appendAssumeCapacity(@intFromEnum(opcode)); - try emit.dbg_output.dwarf.setPrologueEnd(); - try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); + const addr = wasm.uavAddr(data.uav_exe); + leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable; } -fn emitDbgEpilogueBegin(emit: *Emit) !void { - if (emit.dbg_output != .dwarf) return; +fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff, is_wasm32: bool) !void { + const comp = wasm.base.comp; + const zcu = comp.zcu.?; + const ip = &zcu.intern_pool; + const gpa = comp.gpa; + const is_obj = comp.config.output_mode == .Obj; + const nav_ty = ip.getNav(data.nav_index).typeOf(ip); + assert(!ip.isFunctionType(nav_ty)); + + try code.ensureUnusedCapacity(gpa, 11); + + const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const; + code.appendAssumeCapacity(@intFromEnum(opcode)); + if (is_obj) { + try wasm.out_relocs.append(gpa, .{ + .offset = @intCast(code.items.len), + .pointee = .{ .symbol_index = try wasm.navSymbolIndex(data.nav_index) }, + .tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64, + .addend = data.offset, + }); + code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10); + } else { + const addr = wasm.navAddr(data.nav_index); + leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable; + } +} - try emit.dbg_output.dwarf.setEpilogueBegin(); - try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); +fn appendOutputFunctionIndex(code: *std.ArrayListUnmanaged(u8), i: Wasm.OutputFunctionIndex) void { + leb.writeUleb128(code.fixedWriter(), @intFromEnum(i)) catch unreachable; } diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig index 2d4f624b22c5..5c8c558926be 100644 --- a/src/arch/wasm/Mir.zig +++ b/src/arch/wasm/Mir.zig @@ -7,11 +7,15 @@ //! and known jump labels for blocks. const Mir = @This(); +const InternPool = @import("../../InternPool.zig"); +const Wasm = @import("../../link/Wasm.zig"); +const builtin = @import("builtin"); const std = @import("std"); +const assert = std.debug.assert; -/// A struct of array that represents each individual wasm -instructions: std.MultiArrayList(Inst).Slice, +instruction_tags: []const Inst.Tag, +instruction_datas: []const Inst.Data, /// A slice of indexes where the meaning of the data is determined by the /// `Inst.Tag` value. extra: []const u32, @@ -26,16 +30,14 @@ pub const Inst = struct { /// The position of a given MIR isntruction with the instruction list. pub const Index = u32; - /// Contains all possible wasm opcodes the Zig compiler may emit - /// Rather than re-using std.wasm.Opcode, we only declare the opcodes - /// we need, and also use this possibility to document how to access - /// their payload. - /// - /// Note: Uses its actual opcode value representation to easily convert - /// to and from its binary representation. + /// Some tags match wasm opcode values to facilitate trivial lowering. pub const Tag = enum(u8) { - /// Uses `nop` + /// Uses `tag`. @"unreachable" = 0x00, + /// Emits epilogue begin debug information. Marks the end of the function. + /// + /// Uses `tag` (no additional data). + dbg_epilogue_begin, /// Creates a new block that can be jump from. /// /// Type of the block is given in data `block_type` @@ -44,56 +46,92 @@ pub const Inst = struct { /// /// Type of the loop is given in data `block_type` loop = 0x03, + /// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the + /// memory address of an unnamed constant. When emitting an object + /// file, this adds a relocation. + /// + /// This may not refer to a function. + /// + /// Uses `ip_index`. + uav_ref, + /// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the + /// memory address of an unnamed constant, offset by an integer value. + /// When emitting an object file, this adds a relocation. + /// + /// This may not refer to a function. + /// + /// Uses `payload` pointing to a `UavRefOff`. + uav_ref_off, + /// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the + /// memory address of a named constant. + /// + /// May not refer to a function. + /// + /// Uses `nav_index`. + nav_ref, + /// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the + /// memory address of named constant, offset by an integer value. + /// When emitting an object file, this adds a relocation. + /// + /// May not refer to a function. + /// + /// Uses `payload` pointing to a `NavRefOff`. + nav_ref_off, + /// Lowers to an i32_const which is the index of the function in the + /// table section. + /// + /// Uses `indirect_function_table_index`. + func_ref, /// Inserts debug information about the current line and column /// of the source code /// /// Uses `payload` of which the payload type is `DbgLineColumn` - dbg_line = 0x06, - /// Emits epilogue begin debug information - /// - /// Uses `nop` - dbg_epilogue_begin = 0x07, - /// Emits prologue end debug information - /// - /// Uses `nop` - dbg_prologue_end = 0x08, + dbg_line, + /// Lowers to an i32_const containing the number of unique Zig error + /// names. + /// Uses `tag`. + errors_len, /// Represents the end of a function body or an initialization expression /// - /// Payload is `nop` + /// Uses `tag` (no additional data). end = 0x0B, /// Breaks from the current block to a label /// - /// Data is `label` where index represents the label to jump to + /// Uses `label` where index represents the label to jump to br = 0x0C, /// Breaks from the current block if the stack value is non-zero /// - /// Data is `label` where index represents the label to jump to + /// Uses `label` where index represents the label to jump to br_if = 0x0D, /// Jump table that takes the stack value as an index where each value /// represents the label to jump to. /// /// Data is extra of which the Payload's type is `JumpTable` - br_table = 0x0E, + br_table, /// Returns from the function /// - /// Uses `nop` + /// Uses `tag`. @"return" = 0x0F, - /// Calls a function by its index - /// - /// Uses `label` - call = 0x10, + /// Lowers to an i32_const (wasm32) or i64_const (wasm64) containing + /// the base address of the table of error code names, with each + /// element being a null-terminated slice. + /// + /// Uses `tag`. + error_name_table_ref, + /// Calls a function using `nav_index`. + call_nav, /// Calls a function pointer by its function signature /// and index into the function table. /// - /// Uses `label` - call_indirect = 0x11, - /// Contains a symbol to a function pointer - /// uses `label` - /// - /// Note: This uses `0x16` as value which is reserved by the WebAssembly - /// specification but unused, meaning we must update this if the specification were to - /// use this value. - function_index = 0x16, + /// Uses `func_ty` + call_indirect, + /// Calls a function by its index. + /// + /// The function is the auto-generated tag name function for the type + /// provided in `ip_index`. + call_tag_name, + /// Lowers to a `call` instruction, using `intrinsic`. + call_intrinsic, /// Pops three values from the stack and pushes /// the first or second value dependent on the third value. /// Uses `tag` @@ -112,15 +150,11 @@ pub const Inst = struct { /// /// Uses `label` local_tee = 0x22, - /// Loads a (mutable) global at given index onto the stack - /// - /// Uses `label` - global_get = 0x23, - /// Pops a value from the stack and sets the global at given index. - /// Note: Both types must be equal and global must be marked mutable. + /// Pops a value from the stack and sets the stack pointer global. + /// The value must be the same type as the stack pointer global. /// - /// Uses `label`. - global_set = 0x24, + /// Uses `tag` (no additional data). + global_set_sp, /// Loads a 32-bit integer from memory (data section) onto the stack /// Pops the value from the stack which represents the offset into memory. /// @@ -256,19 +290,19 @@ pub const Inst = struct { /// Loads a 32-bit signed immediate value onto the stack /// /// Uses `imm32` - i32_const = 0x41, + i32_const, /// Loads a i64-bit signed immediate value onto the stack /// /// uses `payload` of type `Imm64` - i64_const = 0x42, + i64_const, /// Loads a 32-bit float value onto the stack. /// /// Uses `float32` - f32_const = 0x43, + f32_const, /// Loads a 64-bit float value onto the stack. /// /// Uses `payload` of type `Float64` - f64_const = 0x44, + f64_const, /// Uses `tag` i32_eqz = 0x45, /// Uses `tag` @@ -522,25 +556,19 @@ pub const Inst = struct { /// /// The `data` field depends on the extension instruction and /// may contain additional data. - misc_prefix = 0xFC, + misc_prefix, /// The instruction consists of a simd opcode. /// The actual simd-opcode is found at payload's index. /// /// The `data` field depends on the simd instruction and /// may contain additional data. - simd_prefix = 0xFD, + simd_prefix, /// The instruction consists of an atomics opcode. /// The actual atomics-opcode is found at payload's index. /// /// The `data` field depends on the atomics instruction and /// may contain additional data. atomics_prefix = 0xFE, - /// Contains a symbol to a memory address - /// Uses `label` - /// - /// Note: This uses `0xFF` as value as it is unused and not reserved - /// by the wasm specification, making it safe to use. - memory_address = 0xFF, /// From a given wasm opcode, returns a MIR tag. pub fn fromOpcode(opcode: std.wasm.Opcode) Tag { @@ -560,26 +588,41 @@ pub const Inst = struct { /// Uses no additional data tag: void, /// Contains the result type of a block - /// - /// Used by `block` and `loop` - block_type: u8, - /// Contains an u32 index into a wasm section entry, such as a local. - /// Note: This is not an index to another instruction. - /// - /// Used by e.g. `local_get`, `local_set`, etc. + block_type: std.wasm.BlockType, + /// Label: Each structured control instruction introduces an implicit label. + /// Labels are targets for branch instructions that reference them with + /// label indices. Unlike with other index spaces, indexing of labels + /// is relative by nesting depth, that is, label 0 refers to the + /// innermost structured control instruction enclosing the referring + /// branch instruction, while increasing indices refer to those farther + /// out. Consequently, labels can only be referenced from within the + /// associated structured control instruction. label: u32, + /// Local: The index space for locals is only accessible inside a function and + /// includes the parameters of that function, which precede the local + /// variables. + local: u32, /// A 32-bit immediate value. - /// - /// Used by `i32_const` imm32: i32, /// A 32-bit float value - /// - /// Used by `f32_float` float32: f32, /// Index into `extra`. Meaning of what can be found there is context-dependent. - /// - /// Used by e.g. `br_table` payload: u32, + + ip_index: InternPool.Index, + nav_index: InternPool.Nav.Index, + func_ty: Wasm.FunctionType.Index, + intrinsic: Intrinsic, + uav_obj: Wasm.UavsObjIndex, + uav_exe: Wasm.UavsExeIndex, + indirect_function_table_index: Wasm.ZcuIndirectFunctionSetIndex, + + comptime { + switch (builtin.mode) { + .Debug, .ReleaseSafe => {}, + .ReleaseFast, .ReleaseSmall => assert(@sizeOf(Data) == 4), + } + } }; }; @@ -596,6 +639,11 @@ pub fn extraData(self: *const Mir, comptime T: type, index: usize) struct { data inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => self.extra[i], + i32 => @bitCast(self.extra[i]), + Wasm.UavsObjIndex, + Wasm.UavsExeIndex, + InternPool.Nav.Index, + => @enumFromInt(self.extra[i]), else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)), }; i += 1; @@ -609,28 +657,19 @@ pub const JumpTable = struct { length: u32, }; -/// Stores an unsigned 64bit integer -/// into a 32bit most significant bits field -/// and a 32bit least significant bits field. -/// -/// This uses an unsigned integer rather than a signed integer -/// as we can easily store those into `extra` pub const Imm64 = struct { msb: u32, lsb: u32, - pub fn fromU64(imm: u64) Imm64 { + pub fn init(full: u64) Imm64 { return .{ - .msb = @as(u32, @truncate(imm >> 32)), - .lsb = @as(u32, @truncate(imm)), + .msb = @truncate(full >> 32), + .lsb = @truncate(full), }; } - pub fn toU64(self: Imm64) u64 { - var result: u64 = 0; - result |= @as(u64, self.msb) << 32; - result |= @as(u64, self.lsb); - return result; + pub fn toInt(i: Imm64) u64 { + return (@as(u64, i.msb) << 32) | @as(u64, i.lsb); } }; @@ -638,23 +677,16 @@ pub const Float64 = struct { msb: u32, lsb: u32, - pub fn fromFloat64(float: f64) Float64 { - const tmp = @as(u64, @bitCast(float)); + pub fn init(f: f64) Float64 { + const int: u64 = @bitCast(f); return .{ - .msb = @as(u32, @truncate(tmp >> 32)), - .lsb = @as(u32, @truncate(tmp)), + .msb = @truncate(int >> 32), + .lsb = @truncate(int), }; } - pub fn toF64(self: Float64) f64 { - @as(f64, @bitCast(self.toU64())); - } - - pub fn toU64(self: Float64) u64 { - var result: u64 = 0; - result |= @as(u64, self.msb) << 32; - result |= @as(u64, self.lsb); - return result; + pub fn toInt(f: Float64) u64 { + return (@as(u64, f.msb) << 32) | @as(u64, f.lsb); } }; @@ -663,11 +695,19 @@ pub const MemArg = struct { alignment: u32, }; -/// Represents a memory address, which holds both the pointer -/// or the parent pointer and the offset to it. -pub const Memory = struct { - pointer: u32, - offset: u32, +pub const UavRefOffObj = struct { + uav_obj: Wasm.UavsObjIndex, + offset: i32, +}; + +pub const UavRefOffExe = struct { + uav_exe: Wasm.UavsExeIndex, + offset: i32, +}; + +pub const NavRefOff = struct { + nav_index: InternPool.Nav.Index, + offset: i32, }; /// Maps a source line with wasm bytecode @@ -675,3 +715,199 @@ pub const DbgLineColumn = struct { line: u32, column: u32, }; + +/// Tag names exactly match the corresponding symbol name. +pub const Intrinsic = enum(u32) { + __addhf3, + __addtf3, + __addxf3, + __ashlti3, + __ashrti3, + __bitreversedi2, + __bitreversesi2, + __bswapdi2, + __bswapsi2, + __ceilh, + __ceilx, + __cosh, + __cosx, + __divhf3, + __divtf3, + __divti3, + __divxf3, + __eqtf2, + __eqxf2, + __exp2h, + __exp2x, + __exph, + __expx, + __extenddftf2, + __extenddfxf2, + __extendhfsf2, + __extendhftf2, + __extendhfxf2, + __extendsftf2, + __extendsfxf2, + __extendxftf2, + __fabsh, + __fabsx, + __fixdfdi, + __fixdfsi, + __fixdfti, + __fixhfdi, + __fixhfsi, + __fixhfti, + __fixsfdi, + __fixsfsi, + __fixsfti, + __fixtfdi, + __fixtfsi, + __fixtfti, + __fixunsdfdi, + __fixunsdfsi, + __fixunsdfti, + __fixunshfdi, + __fixunshfsi, + __fixunshfti, + __fixunssfdi, + __fixunssfsi, + __fixunssfti, + __fixunstfdi, + __fixunstfsi, + __fixunstfti, + __fixunsxfdi, + __fixunsxfsi, + __fixunsxfti, + __fixxfdi, + __fixxfsi, + __fixxfti, + __floatdidf, + __floatdihf, + __floatdisf, + __floatditf, + __floatdixf, + __floatsidf, + __floatsihf, + __floatsisf, + __floatsitf, + __floatsixf, + __floattidf, + __floattihf, + __floattisf, + __floattitf, + __floattixf, + __floatundidf, + __floatundihf, + __floatundisf, + __floatunditf, + __floatundixf, + __floatunsidf, + __floatunsihf, + __floatunsisf, + __floatunsitf, + __floatunsixf, + __floatuntidf, + __floatuntihf, + __floatuntisf, + __floatuntitf, + __floatuntixf, + __floorh, + __floorx, + __fmah, + __fmax, + __fmaxh, + __fmaxx, + __fminh, + __fminx, + __fmodh, + __fmodx, + __getf2, + __gexf2, + __gttf2, + __gtxf2, + __letf2, + __lexf2, + __log10h, + __log10x, + __log2h, + __log2x, + __logh, + __logx, + __lshrti3, + __lttf2, + __ltxf2, + __modti3, + __mulhf3, + __mulodi4, + __muloti4, + __multf3, + __multi3, + __mulxf3, + __netf2, + __nexf2, + __roundh, + __roundx, + __sinh, + __sinx, + __sqrth, + __sqrtx, + __subhf3, + __subtf3, + __subxf3, + __tanh, + __tanx, + __trunch, + __truncsfhf2, + __trunctfdf2, + __trunctfhf2, + __trunctfsf2, + __trunctfxf2, + __truncx, + __truncxfdf2, + __truncxfhf2, + __truncxfsf2, + __udivti3, + __umodti3, + ceilq, + cos, + cosf, + cosq, + exp, + exp2, + exp2f, + exp2q, + expf, + expq, + fabsq, + floorq, + fma, + fmaf, + fmaq, + fmax, + fmaxf, + fmaxq, + fmin, + fminf, + fminq, + fmod, + fmodf, + fmodq, + log, + log10, + log10f, + log10q, + log2, + log2f, + log2q, + logf, + logq, + roundq, + sin, + sinf, + sinq, + sqrtq, + tan, + tanf, + tanq, + truncq, +}; diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 5a1d2fdb0bd0..d7ca4cf7154d 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -22,7 +22,7 @@ const direct: [2]Class = .{ .direct, .none }; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. /// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, zcu: *Zcu) [2]Class { +pub fn classifyType(ty: Type, zcu: *const Zcu) [2]Class { const ip = &zcu.intern_pool; const target = zcu.getTarget(); if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return none; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 298b2e11e0ec..940cd001958d 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -19,7 +19,6 @@ const Allocator = mem.Allocator; const CodeGenError = codegen.CodeGenError; const Compilation = @import("../../Compilation.zig"); const ErrorMsg = Zcu.ErrorMsg; -const Result = codegen.Result; const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); const Lower = @import("Lower.zig"); @@ -59,7 +58,6 @@ target: *const std.Target, owner: Owner, inline_func: InternPool.Index, mod: *Package.Module, -err_msg: ?*ErrorMsg, arg_index: u32, args: []MCValue, va_info: union { @@ -819,9 +817,9 @@ pub fn generate( func_index: InternPool.Index, air: Air, liveness: Liveness, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), debug_output: link.File.DebugInfoOutput, -) CodeGenError!Result { +) CodeGenError!void { const zcu = pt.zcu; const comp = zcu.comp; const gpa = zcu.gpa; @@ -841,7 +839,6 @@ pub fn generate( .debug_output = debug_output, .owner = .{ .nav_index = func.owner_nav }, .inline_func = func_index, - .err_msg = null, .arg_index = undefined, .args = undefined, // populated after `resolveCallingConventionValues` .va_info = undefined, // populated after `resolveCallingConventionValues` @@ -881,15 +878,7 @@ pub fn generate( const fn_info = zcu.typeToFunc(fn_type).?; const cc = abi.resolveCallingConvention(fn_info.cc, function.target.*); var call_info = function.resolveCallingConventionValues(fn_info, &.{}, .args_frame) catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create( - gpa, - src_loc, - "CodeGen ran out of registers. This is a bug in the Zig compiler.", - .{}, - ), - }, + error.CodegenFail => return error.CodegenFail, else => |e| return e, }; defer call_info.deinit(&function); @@ -926,10 +915,8 @@ pub fn generate( }; function.gen() catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, + error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}), else => |e| return e, }; @@ -953,10 +940,7 @@ pub fn generate( .pic = mod.pic, }, .atom_index = function.owner.getSymbolIndex(&function) catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, else => |e| return e, }, .debug_output = debug_output, @@ -974,29 +958,11 @@ pub fn generate( }; defer emit.deinit(); emit.emitMir() catch |err| switch (err) { - error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? }, - error.InvalidInstruction, error.CannotEncode => |e| { - const msg = switch (e) { - error.InvalidInstruction => "CodeGen failed to find a viable instruction.", - error.CannotEncode => "CodeGen failed to encode the instruction.", - }; - return Result{ - .fail = try ErrorMsg.create( - gpa, - src_loc, - "{s} This is a bug in the Zig compiler.", - .{msg}, - ), - }; - }, - else => |e| return e, - }; + error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?), - if (function.err_msg) |em| { - return Result{ .fail = em }; - } else { - return Result.ok; - } + error.InvalidInstruction, error.CannotEncode => |e| return function.fail("emit MIR failed: {s} (Zig compiler bug)", .{@errorName(e)}), + else => |e| return function.fail("emit MIR failed: {s}", .{@errorName(e)}), + }; } pub fn generateLazy( @@ -1004,9 +970,9 @@ pub fn generateLazy( pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, lazy_sym: link.File.LazySymbol, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), debug_output: link.File.DebugInfoOutput, -) CodeGenError!Result { +) CodeGenError!void { const comp = bin_file.comp; const gpa = comp.gpa; // This function is for generating global code, so we use the root module. @@ -1022,7 +988,6 @@ pub fn generateLazy( .debug_output = debug_output, .owner = .{ .lazy_sym = lazy_sym }, .inline_func = undefined, - .err_msg = null, .arg_index = undefined, .args = undefined, .va_info = undefined, @@ -1038,10 +1003,8 @@ pub fn generateLazy( } function.genLazy(lazy_sym) catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, + error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}), else => |e| return e, }; @@ -1065,10 +1028,7 @@ pub fn generateLazy( .pic = mod.pic, }, .atom_index = function.owner.getSymbolIndex(&function) catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, - error.OutOfRegisters => return Result{ - .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), - }, + error.CodegenFail => return error.CodegenFail, else => |e| return e, }, .debug_output = debug_output, @@ -1078,29 +1038,11 @@ pub fn generateLazy( }; defer emit.deinit(); emit.emitMir() catch |err| switch (err) { - error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? }, - error.InvalidInstruction, error.CannotEncode => |e| { - const msg = switch (e) { - error.InvalidInstruction => "CodeGen failed to find a viable instruction.", - error.CannotEncode => "CodeGen failed to encode the instruction.", - }; - return Result{ - .fail = try ErrorMsg.create( - gpa, - src_loc, - "{s} This is a bug in the Zig compiler.", - .{msg}, - ), - }; - }, - else => |e| return e, + error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?), + error.InvalidInstruction => return function.fail("failed to find a viable x86 instruction (Zig compiler bug)", .{}), + error.CannotEncode => return function.fail("failed to encode x86 instruction (Zig compiler bug)", .{}), + else => |e| return function.fail("failed to emit MIR: {s}", .{@errorName(e)}), }; - - if (function.err_msg) |em| { - return Result{ .fail = em }; - } else { - return Result.ok; - } } const FormatNavData = struct { @@ -19276,10 +19218,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { .load_got => |sym_index| .{ .lea_got = sym_index }, .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, }, - .fail => |msg| { - self.err_msg = msg; - return error.CodegenFail; - }, + .fail => |msg| return self.failMsg(msg), }; } @@ -19592,11 +19531,23 @@ fn resolveCallingConventionValues( return result; } -fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { +fn fail(self: *Self, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } { @branchHint(.cold); - assert(self.err_msg == null); - const gpa = self.gpa; - self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args); + const zcu = self.pt.zcu; + switch (self.owner) { + .nav_index => |i| return zcu.codegenFail(i, format, args), + .lazy_sym => |s| return zcu.codegenFailType(s.ty, format, args), + } + return error.CodegenFail; +} + +fn failMsg(self: *Self, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } { + @branchHint(.cold); + const zcu = self.pt.zcu; + switch (self.owner) { + .nav_index => |i| return zcu.codegenFailMsg(i, msg), + .lazy_sym => |s| return zcu.codegenFailTypeMsg(s.ty, msg), + } return error.CodegenFail; } diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index f744eb3fc4d7..6e0d75f88351 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -4,7 +4,7 @@ air: Air, lower: Lower, atom_index: u32, debug_output: link.File.DebugInfoOutput, -code: *std.ArrayList(u8), +code: *std.ArrayListUnmanaged(u8), prev_di_loc: Loc, /// Relative to the beginning of `code`. @@ -18,6 +18,7 @@ pub const Error = Lower.Error || error{ } || link.File.UpdateDebugInfoError; pub fn emitMir(emit: *Emit) Error!void { + const gpa = emit.lower.bin_file.comp.gpa; for (0..emit.lower.mir.instructions.len) |mir_i| { const mir_index: Mir.Inst.Index = @intCast(mir_i); try emit.code_offset_mapping.putNoClobber( @@ -82,7 +83,7 @@ pub fn emitMir(emit: *Emit) Error!void { } continue; } - try lowered_inst.encode(emit.code.writer(), .{}); + try lowered_inst.encode(emit.code.writer(gpa), .{}); const end_offset: u32 = @intCast(emit.code.items.len); while (lowered_relocs.len > 0 and lowered_relocs[0].lowered_inst_index == lowered_index) : ({ @@ -100,7 +101,7 @@ pub fn emitMir(emit: *Emit) Error!void { const zo = elf_file.zigObjectPtr().?; const atom_ptr = zo.symbol(emit.atom_index).atom(elf_file).?; const r_type = @intFromEnum(std.elf.R_X86_64.PLT32); - try atom_ptr.addReloc(elf_file.base.comp.gpa, .{ + try atom_ptr.addReloc(gpa, .{ .r_offset = end_offset - 4, .r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, .r_addend = lowered_relocs[0].off - 4, @@ -147,7 +148,7 @@ pub fn emitMir(emit: *Emit) Error!void { const zo = elf_file.zigObjectPtr().?; const atom = zo.symbol(emit.atom_index).atom(elf_file).?; const r_type = @intFromEnum(std.elf.R_X86_64.TLSLD); - try atom.addReloc(elf_file.base.comp.gpa, .{ + try atom.addReloc(gpa, .{ .r_offset = end_offset - 4, .r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, .r_addend = lowered_relocs[0].off - 4, @@ -158,7 +159,7 @@ pub fn emitMir(emit: *Emit) Error!void { const zo = elf_file.zigObjectPtr().?; const atom = zo.symbol(emit.atom_index).atom(elf_file).?; const r_type = @intFromEnum(std.elf.R_X86_64.DTPOFF32); - try atom.addReloc(elf_file.base.comp.gpa, .{ + try atom.addReloc(gpa, .{ .r_offset = end_offset - 4, .r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, .r_addend = lowered_relocs[0].off, @@ -173,7 +174,7 @@ pub fn emitMir(emit: *Emit) Error!void { @intFromEnum(std.elf.R_X86_64.GOTPCREL) else @intFromEnum(std.elf.R_X86_64.PC32); - try atom.addReloc(elf_file.base.comp.gpa, .{ + try atom.addReloc(gpa, .{ .r_offset = end_offset - 4, .r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, .r_addend = lowered_relocs[0].off - 4, @@ -183,7 +184,7 @@ pub fn emitMir(emit: *Emit) Error!void { @intFromEnum(std.elf.R_X86_64.TPOFF32) else @intFromEnum(std.elf.R_X86_64.@"32"); - try atom.addReloc(elf_file.base.comp.gpa, .{ + try atom.addReloc(gpa, .{ .r_offset = end_offset - 4, .r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, .r_addend = lowered_relocs[0].off, diff --git a/src/codegen.zig b/src/codegen.zig index a70fc8642d3e..acd7a4965d06 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2,7 +2,6 @@ const std = @import("std"); const build_options = @import("build_options"); const builtin = @import("builtin"); const assert = std.debug.assert; -const leb128 = std.leb; const link = @import("link.zig"); const log = std.log.scoped(.codegen); const mem = std.mem; @@ -24,19 +23,13 @@ const Zir = std.zig.Zir; const Alignment = InternPool.Alignment; const dev = @import("dev.zig"); -pub const Result = union(enum) { - /// The `code` parameter passed to `generateSymbol` has the value ok. - ok, - - /// There was a codegen error. - fail: *ErrorMsg, -}; - pub const CodeGenError = error{ OutOfMemory, + /// Compiler was asked to operate on a number larger than supported. Overflow, + /// Indicates the error is already stored in Zcu `failed_codegen`. CodegenFail, -} || link.File.UpdateDebugInfoError; +}; fn devFeatureForBackend(comptime backend: std.builtin.CompilerBackend) dev.Feature { comptime assert(mem.startsWith(u8, @tagName(backend), "stage2_")); @@ -49,7 +42,6 @@ fn importBackend(comptime backend: std.builtin.CompilerBackend) type { .stage2_arm => @import("arch/arm/CodeGen.zig"), .stage2_riscv64 => @import("arch/riscv64/CodeGen.zig"), .stage2_sparc64 => @import("arch/sparc64/CodeGen.zig"), - .stage2_wasm => @import("arch/wasm/CodeGen.zig"), .stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"), else => unreachable, }; @@ -62,9 +54,9 @@ pub fn generateFunction( func_index: InternPool.Index, air: Air, liveness: Liveness, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), debug_output: link.File.DebugInfoOutput, -) CodeGenError!Result { +) CodeGenError!void { const zcu = pt.zcu; const func = zcu.funcInfo(func_index); const target = zcu.navFileScope(func.owner_nav).mod.resolved_target.result; @@ -74,7 +66,6 @@ pub fn generateFunction( .stage2_arm, .stage2_riscv64, .stage2_sparc64, - .stage2_wasm, .stage2_x86_64, => |backend| { dev.check(devFeatureForBackend(backend)); @@ -88,17 +79,15 @@ pub fn generateLazyFunction( pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, lazy_sym: link.File.LazySymbol, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), debug_output: link.File.DebugInfoOutput, -) CodeGenError!Result { +) CodeGenError!void { const zcu = pt.zcu; const file = Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(&zcu.intern_pool); const target = zcu.fileByIndex(file).mod.resolved_target.result; switch (target_util.zigBackend(target, false)) { else => unreachable, - inline .stage2_x86_64, - .stage2_riscv64, - => |backend| { + inline .stage2_x86_64, .stage2_riscv64 => |backend| { dev.check(devFeatureForBackend(backend)); return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output); }, @@ -120,20 +109,21 @@ pub fn generateLazySymbol( lazy_sym: link.File.LazySymbol, // TODO don't use an "out" parameter like this; put it in the result instead alignment: *Alignment, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), debug_output: link.File.DebugInfoOutput, reloc_parent: link.File.RelocInfo.Parent, -) CodeGenError!Result { +) CodeGenError!void { _ = reloc_parent; const tracy = trace(@src()); defer tracy.end(); const comp = bin_file.comp; - const ip = &pt.zcu.intern_pool; + const gpa = comp.gpa; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const target = comp.root_mod.resolved_target.result; const endian = target.cpu.arch.endian(); - const gpa = comp.gpa; log.debug("generateLazySymbol: kind = {s}, ty = {}", .{ @tagName(lazy_sym.kind), @@ -150,52 +140,56 @@ pub fn generateLazySymbol( const err_names = ip.global_error_set.getNamesFromMainThread(); var offset_index: u32 = @intCast(code.items.len); var string_index: u32 = @intCast(4 * (1 + err_names.len + @intFromBool(err_names.len > 0))); - try code.resize(offset_index + string_index); + try code.resize(gpa, offset_index + string_index); mem.writeInt(u32, code.items[offset_index..][0..4], @intCast(err_names.len), endian); - if (err_names.len == 0) return .ok; + if (err_names.len == 0) return; offset_index += 4; for (err_names) |err_name_nts| { const err_name = err_name_nts.toSlice(ip); mem.writeInt(u32, code.items[offset_index..][0..4], string_index, endian); offset_index += 4; - try code.ensureUnusedCapacity(err_name.len + 1); + try code.ensureUnusedCapacity(gpa, err_name.len + 1); code.appendSliceAssumeCapacity(err_name); code.appendAssumeCapacity(0); string_index += @intCast(err_name.len + 1); } mem.writeInt(u32, code.items[offset_index..][0..4], string_index, endian); - return .ok; - } else if (Type.fromInterned(lazy_sym.ty).zigTypeTag(pt.zcu) == .@"enum") { + } else if (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu) == .@"enum") { alignment.* = .@"1"; const enum_ty = Type.fromInterned(lazy_sym.ty); - const tag_names = enum_ty.enumFields(pt.zcu); + const tag_names = enum_ty.enumFields(zcu); for (0..tag_names.len) |tag_index| { const tag_name = tag_names.get(ip)[tag_index].toSlice(ip); - try code.ensureUnusedCapacity(tag_name.len + 1); + try code.ensureUnusedCapacity(gpa, tag_name.len + 1); code.appendSliceAssumeCapacity(tag_name); code.appendAssumeCapacity(0); } - return .ok; - } else return .{ .fail = try .create( - gpa, - src_loc, - "TODO implement generateLazySymbol for {s} {}", - .{ @tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt) }, - ) }; + } else { + return zcu.codegenFailType(lazy_sym.ty, "TODO implement generateLazySymbol for {s} {}", .{ + @tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt), + }); + } } +pub const GenerateSymbolError = error{ + OutOfMemory, + /// Compiler was asked to operate on a number larger than supported. + Overflow, +}; + pub fn generateSymbol( bin_file: *link.File, pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, val: Value, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), reloc_parent: link.File.RelocInfo.Parent, -) CodeGenError!Result { +) GenerateSymbolError!void { const tracy = trace(@src()); defer tracy.end(); const zcu = pt.zcu; + const gpa = zcu.gpa; const ip = &zcu.intern_pool; const ty = val.typeOf(zcu); @@ -206,8 +200,8 @@ pub fn generateSymbol( if (val.isUndefDeep(zcu)) { const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow; - try code.appendNTimes(0xaa, abi_size); - return .ok; + try code.appendNTimes(gpa, 0xaa, abi_size); + return; } switch (ip.indexToKey(val.toIntern())) { @@ -231,14 +225,13 @@ pub fn generateSymbol( .undef => unreachable, // handled above .simple_value => |simple_value| switch (simple_value) { - .undefined, - .void, - .null, - .empty_tuple, - .@"unreachable", - .generic_poison, - => unreachable, // non-runtime values - .false, .true => try code.append(switch (simple_value) { + .undefined => unreachable, // non-runtime value + .void => unreachable, // non-runtime value + .null => unreachable, // non-runtime value + .@"unreachable" => unreachable, // non-runtime value + .generic_poison => unreachable, // non-runtime value + .empty_tuple => return, + .false, .true => try code.append(gpa, switch (simple_value) { .false => 0, .true => 1, else => unreachable, @@ -254,11 +247,11 @@ pub fn generateSymbol( const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow; var space: Value.BigIntSpace = undefined; const int_val = val.toBigInt(&space, zcu); - int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian); + int_val.writeTwosComplement(try code.addManyAsSlice(gpa, abi_size), endian); }, .err => |err| { const int = try pt.getErrorValue(err.name); - try code.writer().writeInt(u16, @intCast(int), endian); + try code.writer(gpa).writeInt(u16, @intCast(int), endian); }, .error_union => |error_union| { const payload_ty = ty.errorUnionPayload(zcu); @@ -268,8 +261,8 @@ pub fn generateSymbol( }; if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - try code.writer().writeInt(u16, err_val, endian); - return .ok; + try code.writer(gpa).writeInt(u16, err_val, endian); + return; } const payload_align = payload_ty.abiAlignment(zcu); @@ -278,72 +271,57 @@ pub fn generateSymbol( // error value first when its type is larger than the error union's payload if (error_align.order(payload_align) == .gt) { - try code.writer().writeInt(u16, err_val, endian); + try code.writer(gpa).writeInt(u16, err_val, endian); } // emit payload part of the error union { const begin = code.items.len; - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) { + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) { .err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }), .payload => |payload| payload, - }), code, reloc_parent)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, - } + }), code, reloc_parent); const unpadded_end = code.items.len - begin; const padded_end = abi_align.forward(unpadded_end); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; if (padding > 0) { - try code.appendNTimes(0, padding); + try code.appendNTimes(gpa, 0, padding); } } // Payload size is larger than error set, so emit our error set last if (error_align.compare(.lte, payload_align)) { const begin = code.items.len; - try code.writer().writeInt(u16, err_val, endian); + try code.writer(gpa).writeInt(u16, err_val, endian); const unpadded_end = code.items.len - begin; const padded_end = abi_align.forward(unpadded_end); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; if (padding > 0) { - try code.appendNTimes(0, padding); + try code.appendNTimes(gpa, 0, padding); } } }, .enum_tag => |enum_tag| { const int_tag_ty = ty.intTagType(zcu); - switch (try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, reloc_parent)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, - } + try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, reloc_parent); }, .float => |float| switch (float.storage) { - .f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(2)), - .f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(4)), - .f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)), + .f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(gpa, 2)), + .f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(gpa, 4)), + .f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(gpa, 8)), .f80 => |f80_val| { - writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10)); + writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(gpa, 10)); const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow; - try code.appendNTimes(0, abi_size - 10); + try code.appendNTimes(gpa, 0, abi_size - 10); }, - .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)), - }, - .ptr => switch (try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, reloc_parent, 0)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, + .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(gpa, 16)), }, + .ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, reloc_parent, 0), .slice => |slice| { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, reloc_parent)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, - } - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, reloc_parent)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, - } + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, reloc_parent); + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, reloc_parent); }, .opt => { const payload_type = ty.optionalChild(zcu); @@ -352,12 +330,9 @@ pub fn generateSymbol( if (ty.optionalReprIsPayload(zcu)) { if (payload_val) |value| { - switch (try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent); } else { - try code.appendNTimes(0, abi_size); + try code.appendNTimes(gpa, 0, abi_size); } } else { const padding = abi_size - (math.cast(usize, payload_type.abiSize(zcu)) orelse return error.Overflow) - 1; @@ -365,39 +340,33 @@ pub fn generateSymbol( const value = payload_val orelse Value.fromInterned(try pt.intern(.{ .undef = payload_type.toIntern(), })); - switch (try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent); } - try code.writer().writeByte(@intFromBool(payload_val != null)); - try code.appendNTimes(0, padding); + try code.writer(gpa).writeByte(@intFromBool(payload_val != null)); + try code.appendNTimes(gpa, 0, padding); } }, .aggregate => |aggregate| switch (ip.indexToKey(ty.toIntern())) { .array_type => |array_type| switch (aggregate.storage) { - .bytes => |bytes| try code.appendSlice(bytes.toSlice(array_type.lenIncludingSentinel(), ip)), + .bytes => |bytes| try code.appendSlice(gpa, bytes.toSlice(array_type.lenIncludingSentinel(), ip)), .elems, .repeated_elem => { var index: u64 = 0; while (index < array_type.lenIncludingSentinel()) : (index += 1) { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) { + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) { .bytes => unreachable, .elems => |elems| elems[@intCast(index)], .repeated_elem => |elem| if (index < array_type.len) elem else array_type.sentinel, - }), code, reloc_parent)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, - } + }), code, reloc_parent); } }, }, .vector_type => |vector_type| { const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow; if (vector_type.child == .bool_type) { - const bytes = try code.addManyAsSlice(abi_size); + const bytes = try code.addManyAsSlice(gpa, abi_size); @memset(bytes, 0xaa); var index: usize = 0; const len = math.cast(usize, vector_type.len) orelse return error.Overflow; @@ -436,20 +405,17 @@ pub fn generateSymbol( } } else { switch (aggregate.storage) { - .bytes => |bytes| try code.appendSlice(bytes.toSlice(vector_type.len, ip)), + .bytes => |bytes| try code.appendSlice(gpa, bytes.toSlice(vector_type.len, ip)), .elems, .repeated_elem => { var index: u64 = 0; while (index < vector_type.len) : (index += 1) { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) { + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) { .bytes => unreachable, .elems => |elems| elems[ math.cast(usize, index) orelse return error.Overflow ], .repeated_elem => |elem| elem, - }), code, reloc_parent)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, - } + }), code, reloc_parent); } }, } @@ -457,7 +423,7 @@ pub fn generateSymbol( const padding = abi_size - (math.cast(usize, Type.fromInterned(vector_type.child).abiSize(zcu) * vector_type.len) orelse return error.Overflow); - if (padding > 0) try code.appendNTimes(0, padding); + if (padding > 0) try code.appendNTimes(gpa, 0, padding); } }, .tuple_type => |tuple| { @@ -479,10 +445,7 @@ pub fn generateSymbol( .repeated_elem => |elem| elem, }; - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent); const unpadded_field_end = code.items.len - struct_begin; // Pad struct members if required @@ -491,7 +454,7 @@ pub fn generateSymbol( return error.Overflow; if (padding > 0) { - try code.appendNTimes(0, padding); + try code.appendNTimes(gpa, 0, padding); } } }, @@ -501,7 +464,7 @@ pub fn generateSymbol( .@"packed" => { const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow; const current_pos = code.items.len; - try code.appendNTimes(0, abi_size); + try code.appendNTimes(gpa, 0, abi_size); var bits: u16 = 0; for (struct_type.field_types.get(ip), 0..) |field_ty, index| { @@ -519,12 +482,10 @@ pub fn generateSymbol( if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .pointer) { const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(zcu)) orelse return error.Overflow; - var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); - defer tmp_list.deinit(); - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), &tmp_list, reloc_parent)) { - .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), - .fail => |em| return Result{ .fail = em }, - } + var tmp_list = try std.ArrayListUnmanaged(u8).initCapacity(gpa, field_size); + defer tmp_list.deinit(gpa); + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), &tmp_list, reloc_parent); + @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items); } else { Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable; } @@ -554,12 +515,9 @@ pub fn generateSymbol( usize, offsets[field_index] - (code.items.len - struct_begin), ) orelse return error.Overflow; - if (padding > 0) try code.appendNTimes(0, padding); + if (padding > 0) try code.appendNTimes(gpa, 0, padding); - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent); } const size = struct_type.sizeUnordered(ip); @@ -570,7 +528,7 @@ pub fn generateSymbol( std.mem.alignForward(u64, size, @max(alignment, 1)) - (code.items.len - struct_begin), ) orelse return error.Overflow; - if (padding > 0) try code.appendNTimes(0, padding); + if (padding > 0) try code.appendNTimes(gpa, 0, padding); }, } }, @@ -585,10 +543,7 @@ pub fn generateSymbol( // Check if we should store the tag first. if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent); } const union_obj = zcu.typeToUnion(ty).?; @@ -596,39 +551,29 @@ pub fn generateSymbol( const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); if (!field_ty.hasRuntimeBits(zcu)) { - try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); + try code.appendNTimes(gpa, 0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); } else { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent); const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(zcu)) orelse return error.Overflow; if (padding > 0) { - try code.appendNTimes(0, padding); + try code.appendNTimes(gpa, 0, padding); } } } else { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent); } if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent); if (layout.padding > 0) { - try code.appendNTimes(0, layout.padding); + try code.appendNTimes(gpa, 0, layout.padding); } } }, .memoized_call => unreachable, } - return .ok; } fn lowerPtr( @@ -636,15 +581,15 @@ fn lowerPtr( pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, ptr_val: InternPool.Index, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), reloc_parent: link.File.RelocInfo.Parent, prev_offset: u64, -) CodeGenError!Result { +) GenerateSymbolError!void { const zcu = pt.zcu; const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { - .nav => |nav| try lowerNavRef(bin_file, pt, src_loc, nav, code, reloc_parent, offset), + .nav => |nav| try lowerNavRef(bin_file, pt, nav, code, reloc_parent, offset), .uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, code, reloc_parent, offset), .int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, reloc_parent), .eu_payload => |eu_ptr| try lowerPtr( @@ -689,29 +634,62 @@ fn lowerUavRef( pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, uav: InternPool.Key.Ptr.BaseAddr.Uav, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), reloc_parent: link.File.RelocInfo.Parent, offset: u64, -) CodeGenError!Result { +) GenerateSymbolError!void { const zcu = pt.zcu; + const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const target = lf.comp.root_mod.resolved_target.result; - + const comp = lf.comp; + const target = &comp.root_mod.resolved_target.result; const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8); + const is_obj = comp.config.output_mode == .Obj; const uav_val = uav.val; const uav_ty = Type.fromInterned(ip.typeOf(uav_val)); - log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)}); const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn"; + + log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)}); + try code.ensureUnusedCapacity(gpa, ptr_width_bytes); + if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) { - try code.appendNTimes(0xaa, ptr_width_bytes); - return Result.ok; + code.appendNTimesAssumeCapacity(0xaa, ptr_width_bytes); + return; + } + + switch (lf.tag) { + .c => unreachable, + .spirv => unreachable, + .nvptx => unreachable, + .wasm => { + dev.check(link.File.Tag.wasm.devFeature()); + const wasm = lf.cast(.wasm).?; + assert(reloc_parent == .none); + if (is_obj) { + try wasm.out_relocs.append(gpa, .{ + .offset = @intCast(code.items.len), + .pointee = .{ .symbol_index = try wasm.uavSymbolIndex(uav.val) }, + .tag = if (ptr_width_bytes == 4) .memory_addr_i32 else .memory_addr_i64, + .addend = @intCast(offset), + }); + } else { + try wasm.uav_fixups.ensureUnusedCapacity(gpa, 1); + wasm.uav_fixups.appendAssumeCapacity(.{ + .uavs_exe_index = try wasm.refUavExe(uav.val, uav.orig_ty), + .offset = @intCast(code.items.len), + .addend = @intCast(offset), + }); + } + code.appendNTimesAssumeCapacity(0, ptr_width_bytes); + return; + }, + else => {}, } const uav_align = ip.indexToKey(uav.orig_ty).ptr_type.flags.alignment; - const res = try lf.lowerUav(pt, uav_val, uav_align, src_loc); - switch (res) { + switch (try lf.lowerUav(pt, uav_val, uav_align, src_loc)) { .mcv => {}, - .fail => |em| return .{ .fail = em }, + .fail => |em| std.debug.panic("TODO rework lowerUav. internal error: {s}", .{em.msg}), } const vaddr = try lf.getUavVAddr(uav_val, .{ @@ -721,51 +699,91 @@ fn lowerUavRef( }); const endian = target.cpu.arch.endian(); switch (ptr_width_bytes) { - 2 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(vaddr), endian), - 4 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(vaddr), endian), - 8 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian), + 2 => mem.writeInt(u16, code.addManyAsArrayAssumeCapacity(2), @intCast(vaddr), endian), + 4 => mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @intCast(vaddr), endian), + 8 => mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), vaddr, endian), else => unreachable, } - - return Result.ok; } fn lowerNavRef( lf: *link.File, pt: Zcu.PerThread, - src_loc: Zcu.LazySrcLoc, nav_index: InternPool.Nav.Index, - code: *std.ArrayList(u8), + code: *std.ArrayListUnmanaged(u8), reloc_parent: link.File.RelocInfo.Parent, offset: u64, -) CodeGenError!Result { - _ = src_loc; +) GenerateSymbolError!void { const zcu = pt.zcu; + const gpa = zcu.gpa; const ip = &zcu.intern_pool; const target = zcu.navFileScope(nav_index).mod.resolved_target.result; - - const ptr_width = target.ptrBitWidth(); + const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8); + const is_obj = lf.comp.config.output_mode == .Obj; const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip)); const is_fn_body = nav_ty.zigTypeTag(zcu) == .@"fn"; + + try code.ensureUnusedCapacity(gpa, ptr_width_bytes); + if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) { - try code.appendNTimes(0xaa, @divExact(ptr_width, 8)); - return Result.ok; + code.appendNTimesAssumeCapacity(0xaa, ptr_width_bytes); + return; } - const vaddr = try lf.getNavVAddr(pt, nav_index, .{ + switch (lf.tag) { + .c => unreachable, + .spirv => unreachable, + .nvptx => unreachable, + .wasm => { + dev.check(link.File.Tag.wasm.devFeature()); + const wasm = lf.cast(.wasm).?; + assert(reloc_parent == .none); + if (is_fn_body) { + const gop = try wasm.zcu_indirect_function_set.getOrPut(gpa, nav_index); + if (!gop.found_existing) gop.value_ptr.* = {}; + if (is_obj) { + @panic("TODO add out_reloc for this"); + } else { + try wasm.func_table_fixups.append(gpa, .{ + .table_index = @enumFromInt(gop.index), + .offset = @intCast(code.items.len), + }); + } + } else { + if (is_obj) { + try wasm.out_relocs.append(gpa, .{ + .offset = @intCast(code.items.len), + .pointee = .{ .symbol_index = try wasm.navSymbolIndex(nav_index) }, + .tag = if (ptr_width_bytes == 4) .memory_addr_i32 else .memory_addr_i64, + .addend = @intCast(offset), + }); + } else { + try wasm.nav_fixups.ensureUnusedCapacity(gpa, 1); + wasm.nav_fixups.appendAssumeCapacity(.{ + .navs_exe_index = try wasm.refNavExe(nav_index), + .offset = @intCast(code.items.len), + .addend = @intCast(offset), + }); + } + } + code.appendNTimesAssumeCapacity(0, ptr_width_bytes); + return; + }, + else => {}, + } + + const vaddr = lf.getNavVAddr(pt, nav_index, .{ .parent = reloc_parent, .offset = code.items.len, .addend = @intCast(offset), - }); + }) catch @panic("TODO rework getNavVAddr"); const endian = target.cpu.arch.endian(); - switch (ptr_width) { - 16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(vaddr), endian), - 32 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(vaddr), endian), - 64 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian), + switch (ptr_width_bytes) { + 2 => mem.writeInt(u16, code.addManyAsArrayAssumeCapacity(2), @intCast(vaddr), endian), + 4 => mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @intCast(vaddr), endian), + 8 => mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), vaddr, endian), else => unreachable, } - - return Result.ok; } /// Helper struct to denote that the value is in memory but requires a linker relocation fixup: diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 5ce1ecc21c5b..6fee1beb89b6 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -3052,12 +3052,12 @@ pub fn genDeclValue( try w.writeAll(";\n"); } -pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const u32) !void { +pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const Zcu.Export.Index) !void { const zcu = dg.pt.zcu; const ip = &zcu.intern_pool; const fwd = dg.fwdDeclWriter(); - const main_name = zcu.all_exports.items[export_indices[0]].opts.name; + const main_name = export_indices[0].ptr(zcu).opts.name; try fwd.writeAll("#define "); switch (exported) { .nav => |nav| try dg.renderNavName(fwd, nav), @@ -3069,7 +3069,7 @@ pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const const exported_val = exported.getValue(zcu); if (ip.isFunctionType(exported_val.typeOf(zcu).toIntern())) return for (export_indices) |export_index| { - const @"export" = &zcu.all_exports.items[export_index]; + const @"export" = export_index.ptr(zcu); try fwd.writeAll("zig_extern "); if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn "); try dg.renderFunctionSignature( @@ -3091,7 +3091,7 @@ pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const else => true, }; for (export_indices) |export_index| { - const @"export" = &zcu.all_exports.items[export_index]; + const @"export" = export_index.ptr(zcu); try fwd.writeAll("zig_extern "); if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage "); const extern_name = @"export".opts.name.toSlice(ip); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 492506e9a682..c23cc4354c69 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1059,9 +1059,10 @@ pub const Object = struct { lto: Compilation.Config.LtoMode, }; - pub fn emit(o: *Object, options: EmitOptions) !void { + pub fn emit(o: *Object, options: EmitOptions) error{ LinkFailure, OutOfMemory }!void { const zcu = o.pt.zcu; const comp = zcu.comp; + const diags = &comp.link_diags; { try o.genErrorNameTable(); @@ -1223,27 +1224,30 @@ pub const Object = struct { o.builder.clearAndFree(); if (options.pre_bc_path) |path| { - var file = try std.fs.cwd().createFile(path, .{}); + var file = std.fs.cwd().createFile(path, .{}) catch |err| + return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) }); defer file.close(); const ptr: [*]const u8 = @ptrCast(bitcode.ptr); - try file.writeAll(ptr[0..(bitcode.len * 4)]); + file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err| + return diags.fail("failed to write to '{s}': {s}", .{ path, @errorName(err) }); } if (options.asm_path == null and options.bin_path == null and options.post_ir_path == null and options.post_bc_path == null) return; if (options.post_bc_path) |path| { - var file = try std.fs.cwd().createFileZ(path, .{}); + var file = std.fs.cwd().createFileZ(path, .{}) catch |err| + return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) }); defer file.close(); const ptr: [*]const u8 = @ptrCast(bitcode.ptr); - try file.writeAll(ptr[0..(bitcode.len * 4)]); + file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err| + return diags.fail("failed to write to '{s}': {s}", .{ path, @errorName(err) }); } if (!build_options.have_llvm or !comp.config.use_lib_llvm) { - log.err("emitting without libllvm not implemented", .{}); - return error.FailedToEmit; + return diags.fail("emitting without libllvm not implemented", .{}); } initializeLLVMTarget(comp.root_mod.resolved_target.result.cpu.arch); @@ -1263,8 +1267,7 @@ pub const Object = struct { var module: *llvm.Module = undefined; if (context.parseBitcodeInContext2(bitcode_memory_buffer, &module).toBool() or context.getBrokenDebugInfo()) { - log.err("Failed to parse bitcode", .{}); - return error.FailedToEmit; + return diags.fail("Failed to parse bitcode", .{}); } break :emit .{ context, module }; }; @@ -1274,12 +1277,7 @@ pub const Object = struct { var error_message: [*:0]const u8 = undefined; if (llvm.Target.getFromTriple(target_triple_sentinel, &target, &error_message).toBool()) { defer llvm.disposeMessage(error_message); - - log.err("LLVM failed to parse '{s}': {s}", .{ - target_triple_sentinel, - error_message, - }); - @panic("Invalid LLVM triple"); + return diags.fail("LLVM failed to parse '{s}': {s}", .{ target_triple_sentinel, error_message }); } const optimize_mode = comp.root_mod.optimize_mode; @@ -1374,10 +1372,9 @@ pub const Object = struct { if (options.asm_path != null and options.bin_path != null) { if (target_machine.emitToFile(module, &error_message, &lowered_options)) { defer llvm.disposeMessage(error_message); - log.err("LLVM failed to emit bin={s} ir={s}: {s}", .{ + return diags.fail("LLVM failed to emit bin={s} ir={s}: {s}", .{ emit_bin_msg, post_llvm_ir_msg, error_message, }); - return error.FailedToEmit; } lowered_options.bin_filename = null; lowered_options.llvm_ir_filename = null; @@ -1386,11 +1383,9 @@ pub const Object = struct { lowered_options.asm_filename = options.asm_path; if (target_machine.emitToFile(module, &error_message, &lowered_options)) { defer llvm.disposeMessage(error_message); - log.err("LLVM failed to emit asm={s} bin={s} ir={s} bc={s}: {s}", .{ - emit_asm_msg, emit_bin_msg, post_llvm_ir_msg, post_llvm_bc_msg, - error_message, + return diags.fail("LLVM failed to emit asm={s} bin={s} ir={s} bc={s}: {s}", .{ + emit_asm_msg, emit_bin_msg, post_llvm_ir_msg, post_llvm_bc_msg, error_message, }); - return error.FailedToEmit; } } @@ -1815,7 +1810,7 @@ pub const Object = struct { self: *Object, pt: Zcu.PerThread, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) link.File.UpdateExportsError!void { assert(std.meta.eql(pt, self.pt)); const zcu = pt.zcu; @@ -1843,11 +1838,11 @@ pub const Object = struct { o: *Object, zcu: *Zcu, exported_value: InternPool.Index, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) link.File.UpdateExportsError!void { const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const main_exp_name = try o.builder.strtabString(zcu.all_exports.items[export_indices[0]].opts.name.toSlice(ip)); + const main_exp_name = try o.builder.strtabString(export_indices[0].ptr(zcu).opts.name.toSlice(ip)); const global_index = i: { const gop = try o.uav_map.getOrPut(gpa, exported_value); if (gop.found_existing) { @@ -1878,11 +1873,11 @@ pub const Object = struct { o: *Object, zcu: *Zcu, global_index: Builder.Global.Index, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) link.File.UpdateExportsError!void { const comp = zcu.comp; const ip = &zcu.intern_pool; - const first_export = zcu.all_exports.items[export_indices[0]]; + const first_export = export_indices[0].ptr(zcu); // We will rename this global to have a name matching `first_export`. // Successive exports become aliases. @@ -1939,7 +1934,7 @@ pub const Object = struct { // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. for (export_indices[1..]) |export_idx| { - const exp = zcu.all_exports.items[export_idx]; + const exp = export_idx.ptr(zcu); const exp_name = try o.builder.strtabString(exp.opts.name.toSlice(ip)); if (o.builder.getGlobal(exp_name)) |global| { switch (global.ptrConst(&o.builder).kind) { @@ -1967,11 +1962,6 @@ pub const Object = struct { } } - pub fn freeDecl(self: *Object, decl_index: InternPool.DeclIndex) void { - const global = self.decl_map.get(decl_index) orelse return; - global.delete(&self.builder); - } - fn getDebugFile(o: *Object, file_index: Zcu.File.Index) Allocator.Error!Builder.Metadata { const gpa = o.gpa; const gop = try o.debug_file_map.getOrPut(gpa, file_index); diff --git a/src/dev.zig b/src/dev.zig index d623a708e77a..2573e63f25a3 100644 --- a/src/dev.zig +++ b/src/dev.zig @@ -30,6 +30,10 @@ pub const Env = enum { /// - `zig build-* -fno-llvm -fno-lld -target riscv64-linux` @"riscv64-linux", + /// - sema + /// - `zig build-* -fno-llvm -fno-lld -target wasm32-* --listen=-` + wasm, + pub inline fn supports(comptime dev_env: Env, comptime feature: Feature) bool { return switch (dev_env) { .full => true, @@ -144,6 +148,14 @@ pub const Env = enum { => true, else => Env.sema.supports(feature), }, + .wasm => switch (feature) { + .stdio_listen, + .incremental, + .wasm_backend, + .wasm_linker, + => true, + else => Env.sema.supports(feature), + }, }; } diff --git a/src/glibc.zig b/src/glibc.zig index 5bad947e5dba..744e4d176672 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -1217,6 +1217,18 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi }); } +pub fn sharedObjectsCount(target: *const std.Target) u8 { + const target_version = target.os.versionRange().gnuLibCVersion() orelse return 0; + var count: u8 = 0; + for (libs) |lib| { + if (lib.removed_in) |rem_in| { + if (target_version.order(rem_in) != .lt) continue; + } + count += 1; + } + return count; +} + fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void { const target_version = comp.getTarget().os.versionRange().gnuLibCVersion().?; diff --git a/src/link.zig b/src/link.zig index fd1ef7ce334a..e6ee788095f7 100644 --- a/src/link.zig +++ b/src/link.zig @@ -38,6 +38,11 @@ pub const Diags = struct { flags: Flags, lld: std.ArrayListUnmanaged(Lld), + pub const SourceLocation = union(enum) { + none, + wasm: File.Wasm.SourceLocation, + }; + pub const Flags = packed struct { no_entry_point_found: bool = false, missing_libc: bool = false, @@ -70,9 +75,25 @@ pub const Diags = struct { }; pub const Msg = struct { + source_location: SourceLocation = .none, msg: []const u8, notes: []Msg = &.{}, + fn string( + msg: *const Msg, + bundle: *std.zig.ErrorBundle.Wip, + base: ?*File, + ) Allocator.Error!std.zig.ErrorBundle.String { + return switch (msg.source_location) { + .none => try bundle.addString(msg.msg), + .wasm => |sl| { + dev.check(.wasm_linker); + const wasm = base.?.cast(.wasm).?; + return sl.string(msg.msg, bundle, wasm); + }, + }; + } + pub fn deinit(self: *Msg, gpa: Allocator) void { for (self.notes) |*note| note.deinit(gpa); gpa.free(self.notes); @@ -97,15 +118,12 @@ pub const Diags = struct { err_msg.msg = try std.fmt.allocPrint(gpa, format, args); } - pub fn addNote( - err: *ErrorWithNotes, - comptime format: []const u8, - args: anytype, - ) error{OutOfMemory}!void { + pub fn addNote(err: *ErrorWithNotes, comptime format: []const u8, args: anytype) void { const gpa = err.diags.gpa; + const msg = std.fmt.allocPrint(gpa, format, args) catch return err.diags.setAllocFailure(); const err_msg = &err.diags.msgs.items[err.index]; assert(err.note_slot < err_msg.notes.len); - err_msg.notes[err.note_slot] = .{ .msg = try std.fmt.allocPrint(gpa, format, args) }; + err_msg.notes[err.note_slot] = .{ .msg = msg }; err.note_slot += 1; } }; @@ -196,22 +214,35 @@ pub const Diags = struct { return error.LinkFailure; } + pub fn failSourceLocation(diags: *Diags, sl: SourceLocation, comptime format: []const u8, args: anytype) error{LinkFailure} { + @branchHint(.cold); + addErrorSourceLocation(diags, sl, format, args); + return error.LinkFailure; + } + pub fn addError(diags: *Diags, comptime format: []const u8, args: anytype) void { + return addErrorSourceLocation(diags, .none, format, args); + } + + pub fn addErrorSourceLocation(diags: *Diags, sl: SourceLocation, comptime format: []const u8, args: anytype) void { @branchHint(.cold); const gpa = diags.gpa; const eu_main_msg = std.fmt.allocPrint(gpa, format, args); diags.mutex.lock(); defer diags.mutex.unlock(); - addErrorLockedFallible(diags, eu_main_msg) catch |err| switch (err) { + addErrorLockedFallible(diags, sl, eu_main_msg) catch |err| switch (err) { error.OutOfMemory => diags.setAllocFailureLocked(), }; } - fn addErrorLockedFallible(diags: *Diags, eu_main_msg: Allocator.Error![]u8) Allocator.Error!void { + fn addErrorLockedFallible(diags: *Diags, sl: SourceLocation, eu_main_msg: Allocator.Error![]u8) Allocator.Error!void { const gpa = diags.gpa; const main_msg = try eu_main_msg; errdefer gpa.free(main_msg); - try diags.msgs.append(gpa, .{ .msg = main_msg }); + try diags.msgs.append(gpa, .{ + .msg = main_msg, + .source_location = sl, + }); } pub fn addErrorWithNotes(diags: *Diags, note_count: usize) error{OutOfMemory}!ErrorWithNotes { @@ -329,16 +360,16 @@ pub const Diags = struct { diags.flags.alloc_failure_occurred = true; } - pub fn addMessagesToBundle(diags: *const Diags, bundle: *std.zig.ErrorBundle.Wip) Allocator.Error!void { + pub fn addMessagesToBundle(diags: *const Diags, bundle: *std.zig.ErrorBundle.Wip, base: ?*File) Allocator.Error!void { for (diags.msgs.items) |link_err| { try bundle.addRootErrorMessage(.{ - .msg = try bundle.addString(link_err.msg), + .msg = try link_err.string(bundle, base), .notes_len = @intCast(link_err.notes.len), }); const notes_start = try bundle.reserveNotes(@intCast(link_err.notes.len)); for (link_err.notes, 0..) |note, i| { bundle.extra.items[notes_start + i] = @intFromEnum(try bundle.addErrorMessage(.{ - .msg = try bundle.addString(note.msg), + .msg = try note.string(bundle, base), })); } } @@ -364,6 +395,7 @@ pub const File = struct { build_id: std.zig.BuildId, allow_shlib_undefined: bool, stack_size: u64, + post_prelink: bool = false, /// Prevents other processes from clobbering files in the output directory /// of this linking operation. @@ -400,6 +432,7 @@ pub const File = struct { export_table: bool, initial_memory: ?u64, max_memory: ?u64, + object_host_name: ?[]const u8, export_symbol_names: []const []const u8, global_base: ?u64, build_id: std.zig.BuildId, @@ -632,43 +665,15 @@ pub const File = struct { pub const UpdateDebugInfoError = Dwarf.UpdateError; pub const FlushDebugInfoError = Dwarf.FlushError; + /// Note that `LinkFailure` is not a member of this error set because the error message + /// must be attached to `Zcu.failed_codegen` rather than `Compilation.link_diags`. pub const UpdateNavError = error{ - OutOfMemory, Overflow, - Underflow, - FileTooBig, - InputOutput, - FilesOpenedWithWrongFlags, - IsDir, - NoSpaceLeft, - Unseekable, - PermissionDenied, - SwapFile, - CorruptedData, - SystemResources, - OperationAborted, - BrokenPipe, - ConnectionResetByPeer, - ConnectionTimedOut, - SocketNotConnected, - NotOpenForReading, - WouldBlock, - Canceled, - AccessDenied, - Unexpected, - DiskQuota, - NotOpenForWriting, - AnalysisFail, + OutOfMemory, + /// Indicates the error is already reported and stored in + /// `failed_codegen` on the Zcu. CodegenFail, - EmitFail, - NameTooLong, - CurrentWorkingDirectoryUnlinked, - LockViolation, - NetNameDeleted, - DeviceBusy, - InvalidArgument, - HotSwapUnavailableOnHostOperatingSystem, - } || UpdateDebugInfoError; + }; /// Called from within CodeGen to retrieve the symbol index of a global symbol. /// If no symbol exists yet with this name, a new undefined global symbol will @@ -701,7 +706,13 @@ pub const File = struct { } } - pub fn updateContainerType(base: *File, pt: Zcu.PerThread, ty: InternPool.Index) UpdateNavError!void { + pub const UpdateContainerTypeError = error{ + OutOfMemory, + /// `Zcu.failed_types` is already populated with the error message. + TypeFailureReported, + }; + + pub fn updateContainerType(base: *File, pt: Zcu.PerThread, ty: InternPool.Index) UpdateContainerTypeError!void { switch (base.tag) { else => {}, inline .elf => |tag| { @@ -727,9 +738,15 @@ pub const File = struct { } } + pub const UpdateLineNumberError = error{ + OutOfMemory, + Overflow, + LinkFailure, + }; + /// On an incremental update, fixup the line number of all `Nav`s at the given `TrackedInst`, because /// its line number has changed. The ZIR instruction `ti_id` has tag `.declaration`. - pub fn updateLineNumber(base: *File, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) UpdateNavError!void { + pub fn updateLineNumber(base: *File, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) UpdateLineNumberError!void { { const ti = ti_id.resolveFull(&pt.zcu.intern_pool).?; const file = pt.zcu.fileByIndex(ti.file); @@ -771,83 +788,11 @@ pub const File = struct { } } - /// TODO audit this error set. most of these should be collapsed into one error, - /// and Diags.Flags should be updated to convey the meaning to the user. pub const FlushError = error{ - CacheCheckFailed, - CurrentWorkingDirectoryUnlinked, - DivisionByZero, - DllImportLibraryNotFound, - ExpectedFuncType, - FailedToEmit, - FileSystem, - FilesOpenedWithWrongFlags, - /// Deprecated. Use `LinkFailure` instead. - /// Formerly used to indicate an error will be present in `Compilation.link_errors`. - FlushFailure, - /// Indicates an error will be present in `Compilation.link_errors`. + /// Indicates an error will be present in `Compilation.link_diags`. LinkFailure, - FunctionSignatureMismatch, - GlobalTypeMismatch, - HotSwapUnavailableOnHostOperatingSystem, - InvalidCharacter, - InvalidEntryKind, - InvalidFeatureSet, - InvalidFormat, - InvalidIndex, - InvalidInitFunc, - InvalidMagicByte, - InvalidWasmVersion, - LLDCrashed, - LLDReportedFailure, - LLD_LinkingIsTODO_ForSpirV, - LibCInstallationMissingCrtDir, - LibCInstallationNotAvailable, - LinkingWithoutZigSourceUnimplemented, - MalformedArchive, - MalformedDwarf, - MalformedSection, - MemoryTooBig, - MemoryTooSmall, - MissAlignment, - MissingEndForBody, - MissingEndForExpression, - MissingSymbol, - MissingTableSymbols, - ModuleNameMismatch, - NoObjectsToLink, - NotObjectFile, - NotSupported, OutOfMemory, - Overflow, - PermissionDenied, - StreamTooLong, - SwapFile, - SymbolCollision, - SymbolMismatchingType, - TODOImplementPlan9Objs, - TODOImplementWritingLibFiles, - UnableToSpawnSelf, - UnableToSpawnWasm, - UnableToWriteArchive, - UndefinedLocal, - UndefinedSymbol, - Underflow, - UnexpectedRemainder, - UnexpectedTable, - UnexpectedValue, - UnknownFeature, - UnrecognizedVolume, - Unseekable, - UnsupportedCpuArchitecture, - UnsupportedVersion, - UnexpectedEndOfFile, - } || - fs.File.WriteFileError || - fs.File.OpenError || - std.process.Child.SpawnError || - fs.Dir.CopyFileError || - FlushDebugInfoError; + }; /// Commit pending changes and write headers. Takes into account final output mode /// and `use_lld`, not only `effectiveOutputMode`. @@ -864,10 +809,17 @@ pub const File = struct { assert(comp.c_object_table.count() == 1); const the_key = comp.c_object_table.keys()[0]; const cached_pp_file_path = the_key.status.success.object_path; - try cached_pp_file_path.root_dir.handle.copyFile(cached_pp_file_path.sub_path, emit.root_dir.handle, emit.sub_path, .{}); + cached_pp_file_path.root_dir.handle.copyFile(cached_pp_file_path.sub_path, emit.root_dir.handle, emit.sub_path, .{}) catch |err| { + const diags = &base.comp.link_diags; + return diags.fail("failed to copy '{'}' to '{'}': {s}", .{ + @as(Path, cached_pp_file_path), @as(Path, emit), @errorName(err), + }); + }; return; } + assert(base.post_prelink); + const use_lld = build_options.have_llvm and comp.config.use_lld; const output_mode = comp.config.output_mode; const link_mode = comp.config.link_mode; @@ -893,16 +845,6 @@ pub const File = struct { } } - /// Called when a Decl is deleted from the Zcu. - pub fn freeDecl(base: *File, decl_index: InternPool.DeclIndex) void { - switch (base.tag) { - inline else => |tag| { - dev.check(tag.devFeature()); - @as(*tag.Type(), @fieldParentPtr("base", base)).freeDecl(decl_index); - }, - } - } - pub const UpdateExportsError = error{ OutOfMemory, AnalysisFail, @@ -916,7 +858,7 @@ pub const File = struct { base: *File, pt: Zcu.PerThread, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) UpdateExportsError!void { switch (base.tag) { inline else => |tag| { @@ -932,6 +874,7 @@ pub const File = struct { addend: u32, pub const Parent = union(enum) { + none, atom_index: u32, debug_output: DebugInfoOutput, }; @@ -948,6 +891,7 @@ pub const File = struct { .c => unreachable, .spirv => unreachable, .nvptx => unreachable, + .wasm => unreachable, inline else => |tag| { dev.check(tag.devFeature()); return @as(*tag.Type(), @fieldParentPtr("base", base)).getNavVAddr(pt, nav_index, reloc_info); @@ -966,6 +910,7 @@ pub const File = struct { .c => unreachable, .spirv => unreachable, .nvptx => unreachable, + .wasm => unreachable, inline else => |tag| { dev.check(tag.devFeature()); return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerUav(pt, decl_val, decl_align, src_loc); @@ -978,6 +923,7 @@ pub const File = struct { .c => unreachable, .spirv => unreachable, .nvptx => unreachable, + .wasm => unreachable, inline else => |tag| { dev.check(tag.devFeature()); return @as(*tag.Type(), @fieldParentPtr("base", base)).getUavVAddr(decl_val, reloc_info); @@ -1099,12 +1045,44 @@ pub const File = struct { } } + /// Called when all linker inputs have been sent via `loadInput`. After + /// this, `loadInput` will not be called anymore. + pub fn prelink(base: *File, prog_node: std.Progress.Node) FlushError!void { + assert(!base.post_prelink); + const use_lld = build_options.have_llvm and base.comp.config.use_lld; + if (use_lld) return; + + // In this case, an object file is created by the LLVM backend, so + // there is no prelink phase. The Zig code is linked as a standard + // object along with the others. + if (base.zcu_object_sub_path != null) return; + + switch (base.tag) { + inline .wasm => |tag| { + dev.check(tag.devFeature()); + return @as(*tag.Type(), @fieldParentPtr("base", base)).prelink(prog_node); + }, + else => {}, + } + } + pub fn linkAsArchive(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void { dev.check(.lld_linker); const tracy = trace(@src()); defer tracy.end(); + const comp = base.comp; + const diags = &comp.link_diags; + + return linkAsArchiveInner(base, arena, tid, prog_node) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to link as archive: {s}", .{@errorName(e)}), + }; + } + + fn linkAsArchiveInner(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { const comp = base.comp; const directory = base.emit.root_dir; // Just an alias to make it shorter to type. @@ -1364,6 +1342,16 @@ pub const File = struct { }, llvm_object, prog_node); } + pub fn cgFail( + base: *File, + nav_index: InternPool.Nav.Index, + comptime format: []const u8, + args: anytype, + ) error{ CodegenFail, OutOfMemory } { + @branchHint(.cold); + return base.comp.zcu.?.codegenFail(nav_index, format, args); + } + pub const C = @import("link/C.zig"); pub const Coff = @import("link/Coff.zig"); pub const Plan9 = @import("link/Plan9.zig"); @@ -1379,12 +1367,32 @@ pub const File = struct { /// from the rest of compilation. All tasks performed here are /// single-threaded with respect to one another. pub fn flushTaskQueue(tid: usize, comp: *Compilation) void { + const diags = &comp.link_diags; // As soon as check() is called, another `flushTaskQueue` call could occur, // so the safety lock must go after the check. while (comp.link_task_queue.check()) |tasks| { comp.link_task_queue_safety.lock(); defer comp.link_task_queue_safety.unlock(); + + if (comp.remaining_prelink_tasks > 0) { + comp.link_task_queue_postponed.ensureUnusedCapacity(comp.gpa, tasks.len) catch |err| switch (err) { + error.OutOfMemory => return diags.setAllocFailure(), + }; + } + for (tasks) |task| doTask(comp, tid, task); + + if (comp.remaining_prelink_tasks == 0) { + if (comp.bin_file) |base| if (!base.post_prelink) { + base.prelink(comp.work_queue_progress_node) catch |err| switch (err) { + error.OutOfMemory => diags.setAllocFailure(), + error.LinkFailure => continue, + }; + base.post_prelink = true; + for (comp.link_task_queue_postponed.items) |task| doTask(comp, tid, task); + comp.link_task_queue_postponed.clearRetainingCapacity(); + }; + } } } @@ -1428,6 +1436,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { const diags = &comp.link_diags; switch (task) { .load_explicitly_provided => if (comp.bin_file) |base| { + comp.remaining_prelink_tasks -= 1; const prog_node = comp.work_queue_progress_node.start("Parse Linker Inputs", comp.link_inputs.len); defer prog_node.end(); for (comp.link_inputs) |input| { @@ -1445,6 +1454,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { } }, .load_host_libc => if (comp.bin_file) |base| { + comp.remaining_prelink_tasks -= 1; const prog_node = comp.work_queue_progress_node.start("Linker Parse Host libc", 0); defer prog_node.end(); @@ -1504,6 +1514,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { } }, .load_object => |path| if (comp.bin_file) |base| { + comp.remaining_prelink_tasks -= 1; const prog_node = comp.work_queue_progress_node.start("Linker Parse Object", 0); defer prog_node.end(); base.openLoadObject(path) catch |err| switch (err) { @@ -1512,6 +1523,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { }; }, .load_archive => |path| if (comp.bin_file) |base| { + comp.remaining_prelink_tasks -= 1; const prog_node = comp.work_queue_progress_node.start("Linker Parse Archive", 0); defer prog_node.end(); base.openLoadArchive(path, null) catch |err| switch (err) { @@ -1520,6 +1532,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { }; }, .load_dso => |path| if (comp.bin_file) |base| { + comp.remaining_prelink_tasks -= 1; const prog_node = comp.work_queue_progress_node.start("Linker Parse Shared Library", 0); defer prog_node.end(); base.openLoadDso(path, .{ @@ -1531,6 +1544,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { }; }, .load_input => |input| if (comp.bin_file) |base| { + comp.remaining_prelink_tasks -= 1; const prog_node = comp.work_queue_progress_node.start("Linker Parse Input", 0); defer prog_node.end(); base.loadInput(input) catch |err| switch (err) { @@ -1545,26 +1559,38 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { }; }, .codegen_nav => |nav_index| { - const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); - defer pt.deactivate(); - pt.linkerUpdateNav(nav_index) catch |err| switch (err) { - error.OutOfMemory => diags.setAllocFailure(), - }; + if (comp.remaining_prelink_tasks == 0) { + const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); + defer pt.deactivate(); + pt.linkerUpdateNav(nav_index) catch |err| switch (err) { + error.OutOfMemory => diags.setAllocFailure(), + }; + } else { + comp.link_task_queue_postponed.appendAssumeCapacity(task); + } }, .codegen_func => |func| { - const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); - defer pt.deactivate(); - // This call takes ownership of `func.air`. - pt.linkerUpdateFunc(func.func, func.air) catch |err| switch (err) { - error.OutOfMemory => diags.setAllocFailure(), - }; + if (comp.remaining_prelink_tasks == 0) { + const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); + defer pt.deactivate(); + // This call takes ownership of `func.air`. + pt.linkerUpdateFunc(func.func, func.air) catch |err| switch (err) { + error.OutOfMemory => diags.setAllocFailure(), + }; + } else { + comp.link_task_queue_postponed.appendAssumeCapacity(task); + } }, .codegen_type => |ty| { - const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); - defer pt.deactivate(); - pt.linkerUpdateContainerType(ty) catch |err| switch (err) { - error.OutOfMemory => diags.setAllocFailure(), - }; + if (comp.remaining_prelink_tasks == 0) { + const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); + defer pt.deactivate(); + pt.linkerUpdateContainerType(ty) catch |err| switch (err) { + error.OutOfMemory => diags.setAllocFailure(), + }; + } else { + comp.link_task_queue_postponed.appendAssumeCapacity(task); + } }, .update_line_number => |ti| { const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); @@ -1593,7 +1619,7 @@ pub fn spawnLld( const exit_code = try lldMain(arena, argv, false); if (exit_code == 0) return; if (comp.clang_passthrough_mode) std.process.exit(exit_code); - return error.LLDReportedFailure; + return error.LinkFailure; } var stderr: []u8 = &.{}; @@ -1670,17 +1696,16 @@ pub fn spawnLld( return error.UnableToSpawnSelf; }; + const diags = &comp.link_diags; switch (term) { .Exited => |code| if (code != 0) { if (comp.clang_passthrough_mode) std.process.exit(code); - const diags = &comp.link_diags; diags.lockAndParseLldStderr(argv[1], stderr); - return error.LLDReportedFailure; + return error.LinkFailure; }, else => { if (comp.clang_passthrough_mode) std.process.abort(); - log.err("{s} terminated with stderr:\n{s}", .{ argv[0], stderr }); - return error.LLDCrashed; + return diags.fail("{s} terminated with stderr:\n{s}", .{ argv[0], stderr }); }, } @@ -2239,7 +2264,7 @@ fn resolvePathInputLib( try wip_errors.init(gpa); defer wip_errors.deinit(); - try diags.addMessagesToBundle(&wip_errors); + try diags.addMessagesToBundle(&wip_errors, null); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); diff --git a/src/link/C.zig b/src/link/C.zig index 4df5b824bdec..6c7b7c8975dc 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -175,21 +175,13 @@ pub fn deinit(self: *C) void { self.lazy_code_buf.deinit(gpa); } -pub fn freeDecl(self: *C, decl_index: InternPool.DeclIndex) void { - const gpa = self.base.comp.gpa; - if (self.decl_table.fetchSwapRemove(decl_index)) |kv| { - var decl_block = kv.value; - decl_block.deinit(gpa); - } -} - pub fn updateFunc( self: *C, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness, -) !void { +) link.File.UpdateNavError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); @@ -313,7 +305,7 @@ fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) !void { }; } -pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void { +pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) link.File.UpdateNavError!void { const tracy = trace(@src()); defer tracy.end(); @@ -390,7 +382,7 @@ pub fn updateLineNumber(self: *C, pt: Zcu.PerThread, ti_id: InternPool.TrackedIn _ = ti_id; } -pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { +pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { return self.flushModule(arena, tid, prog_node); } @@ -409,7 +401,7 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) { return defines; } -pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { +pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { _ = arena; // Has the same lifetime as the call to Compilation.update. const tracy = trace(@src()); @@ -419,6 +411,7 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: defer sub_prog_node.end(); const comp = self.base.comp; + const diags = &comp.link_diags; const gpa = comp.gpa; const zcu = self.base.comp.zcu.?; const ip = &zcu.intern_pool; @@ -476,7 +469,7 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: defer export_names.deinit(gpa); try export_names.ensureTotalCapacity(gpa, @intCast(zcu.single_exports.count())); for (zcu.single_exports.values()) |export_index| { - export_names.putAssumeCapacity(zcu.all_exports.items[export_index].opts.name, {}); + export_names.putAssumeCapacity(export_index.ptr(zcu).opts.name, {}); } for (zcu.multi_exports.values()) |info| { try export_names.ensureUnusedCapacity(gpa, info.len); @@ -554,8 +547,10 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: }, self.getString(av_block.code)); const file = self.base.file.?; - try file.setEndPos(f.file_size); - try file.pwritevAll(f.all_buffers.items, 0); + file.setEndPos(f.file_size) catch |err| return diags.fail("failed to allocate file: {s}", .{@errorName(err)}); + file.pwritevAll(f.all_buffers.items, 0) catch |err| return diags.fail("failed to write to '{'}': {s}", .{ + self.base.emit, @errorName(err), + }); } const Flush = struct { @@ -845,7 +840,7 @@ pub fn updateExports( self: *C, pt: Zcu.PerThread, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) !void { const zcu = pt.zcu; const gpa = zcu.gpa; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 64df9b82f890..9bd3bd76e92c 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -408,7 +408,7 @@ pub fn createEmpty( max_file_offset = header.pointer_to_raw_data + header.size_of_raw_data; } } - try coff.base.file.?.pwriteAll(&[_]u8{0}, max_file_offset); + try coff.pwriteAll(&[_]u8{0}, max_file_offset); } return coff; @@ -858,7 +858,7 @@ fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8) !void { } coff.resolveRelocs(atom_index, relocs.items, code, coff.image_base); - try coff.base.file.?.pwriteAll(code, file_offset); + try coff.pwriteAll(code, file_offset); // Now we can mark the relocs as resolved. while (relocs.popOrNull()) |reloc| { @@ -891,7 +891,7 @@ fn writeOffsetTableEntry(coff: *Coff, index: usize) !void { const sect_id = coff.got_section_index.?; if (coff.got_table_count_dirty) { - const needed_size = @as(u32, @intCast(coff.got_table.entries.items.len * coff.ptr_width.size())); + const needed_size: u32 = @intCast(coff.got_table.entries.items.len * coff.ptr_width.size()); try coff.growSection(sect_id, needed_size); coff.got_table_count_dirty = false; } @@ -908,7 +908,7 @@ fn writeOffsetTableEntry(coff: *Coff, index: usize) !void { switch (coff.ptr_width) { .p32 => { var buf: [4]u8 = undefined; - mem.writeInt(u32, &buf, @as(u32, @intCast(entry_value + coff.image_base)), .little); + mem.writeInt(u32, &buf, @intCast(entry_value + coff.image_base), .little); try coff.base.file.?.pwriteAll(&buf, file_offset); }, .p64 => { @@ -1093,7 +1093,13 @@ fn freeAtom(coff: *Coff, atom_index: Atom.Index) void { coff.getAtomPtr(atom_index).sym_index = 0; } -pub fn updateFunc(coff: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc( + coff: *Coff, + pt: Zcu.PerThread, + func_index: InternPool.Index, + air: Air, + liveness: Liveness, +) link.File.UpdateNavError!void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -1106,34 +1112,41 @@ pub fn updateFunc(coff: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index, const zcu = pt.zcu; const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); + const nav_index = func.owner_nav; - const atom_index = try coff.getOrCreateAtomForNav(func.owner_nav); + const atom_index = try coff.getOrCreateAtomForNav(nav_index); coff.freeRelocations(atom_index); coff.navs.getPtr(func.owner_nav).?.section = coff.text_section_index.?; - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); - const res = try codegen.generateFunction( + codegen.generateFunction( &coff.base, pt, - zcu.navSrcLoc(func.owner_nav), + zcu.navSrcLoc(nav_index), func_index, air, liveness, &code_buffer, .none, - ); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| { - try zcu.failed_codegen.put(zcu.gpa, func.owner_nav, em); - return; + ) catch |err| switch (err) { + error.CodegenFail => return error.CodegenFail, + error.OutOfMemory => return error.OutOfMemory, + error.Overflow => |e| { + try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create( + gpa, + zcu.navSrcLoc(nav_index), + "unable to codegen: {s}", + .{@errorName(e)}, + )); + try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index })); + return error.CodegenFail; }, }; - try coff.updateNavCode(pt, func.owner_nav, code, .FUNCTION); + try coff.updateNavCode(pt, nav_index, code_buffer.items, .FUNCTION); // Exports will be updated by `Zcu.processExports` after the update. } @@ -1154,24 +1167,21 @@ fn lowerConst( ) !LowerConstResult { const gpa = coff.base.comp.gpa; - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); const atom_index = try coff.createAtom(); const sym = coff.getAtom(atom_index).getSymbolPtr(coff); try coff.setSymbolName(sym, name); sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(sect_id + 1)); - const res = try codegen.generateSymbol(&coff.base, pt, src_loc, val, &code_buffer, .{ + try codegen.generateSymbol(&coff.base, pt, src_loc, val, &code_buffer, .{ .atom_index = coff.getAtom(atom_index).getSymbolIndex().?, }); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| return .{ .fail = em }, - }; + const code = code_buffer.items; const atom = coff.getAtomPtr(atom_index); - atom.size = @as(u32, @intCast(code.len)); + atom.size = @intCast(code.len); atom.getSymbolPtr(coff).value = try coff.allocateAtom( atom_index, atom.size, @@ -1227,10 +1237,10 @@ pub fn updateNav( coff.navs.getPtr(nav_index).?.section = coff.getNavOutputSection(nav_index); - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); - const res = try codegen.generateSymbol( + try codegen.generateSymbol( &coff.base, pt, zcu.navSrcLoc(nav_index), @@ -1238,15 +1248,8 @@ pub fn updateNav( &code_buffer, .{ .atom_index = atom.getSymbolIndex().? }, ); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| { - try zcu.failed_codegen.put(gpa, nav_index, em); - return; - }, - }; - try coff.updateNavCode(pt, nav_index, code, .NULL); + try coff.updateNavCode(pt, nav_index, code_buffer.items, .NULL); } // Exports will be updated by `Zcu.processExports` after the update. @@ -1260,11 +1263,12 @@ fn updateLazySymbolAtom( section_index: u16, ) !void { const zcu = pt.zcu; - const gpa = zcu.gpa; + const comp = coff.base.comp; + const gpa = comp.gpa; var required_alignment: InternPool.Alignment = .none; - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); const name = try allocPrint(gpa, "__lazy_{s}_{}", .{ @tagName(sym.kind), @@ -1276,7 +1280,7 @@ fn updateLazySymbolAtom( const local_sym_index = atom.getSymbolIndex().?; const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded; - const res = try codegen.generateLazySymbol( + try codegen.generateLazySymbol( &coff.base, pt, src, @@ -1286,13 +1290,7 @@ fn updateLazySymbolAtom( .none, .{ .atom_index = local_sym_index }, ); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| { - log.err("{s}", .{em.msg}); - return error.CodegenFail; - }, - }; + const code = code_buffer.items; const code_len: u32 = @intCast(code.len); const symbol = atom.getSymbolPtr(coff); @@ -1387,7 +1385,7 @@ fn updateNavCode( nav_index: InternPool.Nav.Index, code: []u8, complex_type: coff_util.ComplexType, -) !void { +) link.File.UpdateNavError!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; const nav = ip.getNav(nav_index); @@ -1405,18 +1403,21 @@ fn updateNavCode( const atom = coff.getAtom(atom_index); const sym_index = atom.getSymbolIndex().?; const sect_index = nav_metadata.section; - const code_len = @as(u32, @intCast(code.len)); + const code_len: u32 = @intCast(code.len); if (atom.size != 0) { const sym = atom.getSymbolPtr(coff); try coff.setSymbolName(sym, nav.fqn.toSlice(ip)); - sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(sect_index + 1)); + sym.section_number = @enumFromInt(sect_index + 1); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; const capacity = atom.capacity(coff); const need_realloc = code.len > capacity or !required_alignment.check(sym.value); if (need_realloc) { - const vaddr = try coff.growAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0)); + const vaddr = coff.growAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0)) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| return coff.base.cgFail(nav_index, "failed to grow atom: {s}", .{@errorName(e)}), + }; log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), sym.value, vaddr }); log.debug(" (required alignment 0x{x}", .{required_alignment}); @@ -1424,7 +1425,10 @@ fn updateNavCode( sym.value = vaddr; log.debug(" (updating GOT entry)", .{}); const got_entry_index = coff.got_table.lookup.get(.{ .sym_index = sym_index }).?; - try coff.writeOffsetTableEntry(got_entry_index); + coff.writeOffsetTableEntry(got_entry_index) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| return coff.base.cgFail(nav_index, "failed to write offset table entry: {s}", .{@errorName(e)}), + }; coff.markRelocsDirtyByTarget(.{ .sym_index = sym_index }); } } else if (code_len < atom.size) { @@ -1434,26 +1438,34 @@ fn updateNavCode( } else { const sym = atom.getSymbolPtr(coff); try coff.setSymbolName(sym, nav.fqn.toSlice(ip)); - sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(sect_index + 1)); + sym.section_number = @enumFromInt(sect_index + 1); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; - const vaddr = try coff.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0)); + const vaddr = coff.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0)) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| return coff.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(e)}), + }; errdefer coff.freeAtom(atom_index); log.debug("allocated atom for {} at 0x{x}", .{ nav.fqn.fmt(ip), vaddr }); coff.getAtomPtr(atom_index).size = code_len; sym.value = vaddr; - try coff.addGotEntry(.{ .sym_index = sym_index }); + coff.addGotEntry(.{ .sym_index = sym_index }) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| return coff.base.cgFail(nav_index, "failed to add GOT entry: {s}", .{@errorName(e)}), + }; } - try coff.writeAtom(atom_index, code); + coff.writeAtom(atom_index, code) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| return coff.base.cgFail(nav_index, "failed to write atom: {s}", .{@errorName(e)}), + }; } pub fn freeNav(coff: *Coff, nav_index: InternPool.NavIndex) void { if (coff.llvm_object) |llvm_object| return llvm_object.freeNav(nav_index); const gpa = coff.base.comp.gpa; - log.debug("freeDecl 0x{x}", .{nav_index}); if (coff.decls.fetchOrderedRemove(nav_index)) |const_kv| { var kv = const_kv; @@ -1466,7 +1478,7 @@ pub fn updateExports( coff: *Coff, pt: Zcu.PerThread, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) link.File.UpdateExportsError!void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); @@ -1481,7 +1493,7 @@ pub fn updateExports( // Even in the case of LLVM, we need to notice certain exported symbols in order to // detect the default subsystem. for (export_indices) |export_idx| { - const exp = zcu.all_exports.items[export_idx]; + const exp = export_idx.ptr(zcu); const exported_nav_index = switch (exp.exported) { .nav => |nav| nav, .uav => continue, @@ -1524,7 +1536,7 @@ pub fn updateExports( break :blk coff.navs.getPtr(nav).?; }, .uav => |uav| coff.uavs.getPtr(uav) orelse blk: { - const first_exp = zcu.all_exports.items[export_indices[0]]; + const first_exp = export_indices[0].ptr(zcu); const res = try coff.lowerUav(pt, uav, .none, first_exp.src); switch (res) { .mcv => {}, @@ -1543,7 +1555,7 @@ pub fn updateExports( const atom = coff.getAtom(atom_index); for (export_indices) |export_idx| { - const exp = zcu.all_exports.items[export_idx]; + const exp = export_idx.ptr(zcu); log.debug("adding new export '{}'", .{exp.opts.name.fmt(&zcu.intern_pool)}); if (exp.opts.section.toSlice(&zcu.intern_pool)) |section_name| { @@ -1671,12 +1683,17 @@ fn resolveGlobalSymbol(coff: *Coff, current: SymbolWithLoc) !void { pub fn flush(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const comp = coff.base.comp; const use_lld = build_options.have_llvm and comp.config.use_lld; + const diags = &comp.link_diags; if (use_lld) { - return coff.linkWithLLD(arena, tid, prog_node); + return coff.linkWithLLD(arena, tid, prog_node) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to link with LLD: {s}", .{@errorName(e)}), + }; } switch (comp.config.output_mode) { .Exe, .Obj => return coff.flushModule(arena, tid, prog_node), - .Lib => return error.TODOImplementWritingLibFiles, + .Lib => return diags.fail("writing lib files not yet implemented for COFF", .{}), } } @@ -2207,12 +2224,16 @@ fn findLib(arena: Allocator, name: []const u8, lib_directories: []const Director return null; } -pub fn flushModule(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flushModule( + coff: *Coff, + arena: Allocator, + tid: Zcu.PerThread.Id, + prog_node: std.Progress.Node, +) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); const comp = coff.base.comp; - const gpa = comp.gpa; const diags = &comp.link_diags; if (coff.llvm_object) |llvm_object| { @@ -2223,8 +2244,22 @@ pub fn flushModule(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no const sub_prog_node = prog_node.start("COFF Flush", 0); defer sub_prog_node.end(); + return flushModuleInner(coff, arena, tid) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("COFF flush failed: {s}", .{@errorName(e)}), + }; +} + +fn flushModuleInner(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id) !void { + _ = arena; + + const comp = coff.base.comp; + const gpa = comp.gpa; + const diags = &comp.link_diags; + const pt: Zcu.PerThread = .activate( - comp.zcu orelse return error.LinkingWithoutZigSourceUnimplemented, + comp.zcu orelse return diags.fail("linking without zig source is not yet implemented", .{}), tid, ); defer pt.deactivate(); @@ -2232,24 +2267,18 @@ pub fn flushModule(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no if (coff.lazy_syms.getPtr(.anyerror_type)) |metadata| { // Most lazy symbols can be updated on first use, but // anyerror needs to wait for everything to be flushed. - if (metadata.text_state != .unused) coff.updateLazySymbolAtom( + if (metadata.text_state != .unused) try coff.updateLazySymbolAtom( pt, .{ .kind = .code, .ty = .anyerror_type }, metadata.text_atom, coff.text_section_index.?, - ) catch |err| return switch (err) { - error.CodegenFail => error.FlushFailure, - else => |e| e, - }; - if (metadata.rdata_state != .unused) coff.updateLazySymbolAtom( + ); + if (metadata.rdata_state != .unused) try coff.updateLazySymbolAtom( pt, .{ .kind = .const_data, .ty = .anyerror_type }, metadata.rdata_atom, coff.rdata_section_index.?, - ) catch |err| return switch (err) { - error.CodegenFail => error.FlushFailure, - else => |e| e, - }; + ); } for (coff.lazy_syms.values()) |*metadata| { if (metadata.text_state != .unused) metadata.text_state = .flushed; @@ -2594,7 +2623,7 @@ fn writeBaseRelocations(coff: *Coff) !void { const needed_size = @as(u32, @intCast(buffer.items.len)); try coff.growSection(coff.reloc_section_index.?, needed_size); - try coff.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data); + try coff.pwriteAll(buffer.items, header.pointer_to_raw_data); coff.data_directories[@intFromEnum(coff_util.DirectoryEntry.BASERELOC)] = .{ .virtual_address = header.virtual_address, @@ -2727,7 +2756,7 @@ fn writeImportTables(coff: *Coff) !void { assert(dll_names_offset == needed_size); - try coff.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data); + try coff.pwriteAll(buffer.items, header.pointer_to_raw_data); coff.data_directories[@intFromEnum(coff_util.DirectoryEntry.IMPORT)] = .{ .virtual_address = header.virtual_address + iat_size, @@ -2744,17 +2773,19 @@ fn writeImportTables(coff: *Coff) !void { fn writeStrtab(coff: *Coff) !void { if (coff.strtab_offset == null) return; + const comp = coff.base.comp; + const gpa = comp.gpa; + const diags = &comp.link_diags; const allocated_size = coff.allocatedSize(coff.strtab_offset.?); - const needed_size = @as(u32, @intCast(coff.strtab.buffer.items.len)); + const needed_size: u32 = @intCast(coff.strtab.buffer.items.len); if (needed_size > allocated_size) { coff.strtab_offset = null; - coff.strtab_offset = @as(u32, @intCast(coff.findFreeSpace(needed_size, @alignOf(u32)))); + coff.strtab_offset = @intCast(coff.findFreeSpace(needed_size, @alignOf(u32))); } log.debug("writing strtab from 0x{x} to 0x{x}", .{ coff.strtab_offset.?, coff.strtab_offset.? + needed_size }); - const gpa = coff.base.comp.gpa; var buffer = std.ArrayList(u8).init(gpa); defer buffer.deinit(); try buffer.ensureTotalCapacityPrecise(needed_size); @@ -2763,17 +2794,19 @@ fn writeStrtab(coff: *Coff) !void { // we write the length of the strtab to a temporary buffer that goes to file. mem.writeInt(u32, buffer.items[0..4], @as(u32, @intCast(coff.strtab.buffer.items.len)), .little); - try coff.base.file.?.pwriteAll(buffer.items, coff.strtab_offset.?); + coff.pwriteAll(buffer.items, coff.strtab_offset.?) catch |err| { + return diags.fail("failed to write: {s}", .{@errorName(err)}); + }; } fn writeSectionHeaders(coff: *Coff) !void { const offset = coff.getSectionHeadersOffset(); - try coff.base.file.?.pwriteAll(mem.sliceAsBytes(coff.sections.items(.header)), offset); + try coff.pwriteAll(mem.sliceAsBytes(coff.sections.items(.header)), offset); } fn writeDataDirectoriesHeaders(coff: *Coff) !void { const offset = coff.getDataDirectoryHeadersOffset(); - try coff.base.file.?.pwriteAll(mem.sliceAsBytes(&coff.data_directories), offset); + try coff.pwriteAll(mem.sliceAsBytes(&coff.data_directories), offset); } fn writeHeader(coff: *Coff) !void { @@ -2913,7 +2946,7 @@ fn writeHeader(coff: *Coff) !void { }, } - try coff.base.file.?.pwriteAll(buffer.items, 0); + try coff.pwriteAll(buffer.items, 0); } pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) { @@ -3710,6 +3743,14 @@ const ImportTable = struct { const ImportIndex = u32; }; +fn pwriteAll(coff: *Coff, bytes: []const u8, offset: u64) error{LinkFailure}!void { + const comp = coff.base.comp; + const diags = &comp.link_diags; + coff.base.file.?.pwriteAll(bytes, offset) catch |err| { + return diags.fail("failed to write: {s}", .{@errorName(err)}); + }; +} + const Coff = @This(); const std = @import("std"); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 2aa04a5efd0a..f76c4838539e 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -21,7 +21,6 @@ debug_rnglists: DebugRngLists, debug_str: StringSection, pub const UpdateError = error{ - CodegenFail, ReinterpretDeclRef, Unimplemented, OutOfMemory, @@ -451,7 +450,6 @@ pub const Section = struct { const zo = elf_file.zigObjectPtr().?; const atom = zo.symbol(sec.index).atom(elf_file).?; if (atom.prevAtom(elf_file)) |_| { - // FIXME:JK trimming/shrinking has to be reworked on ZigObject/Elf level atom.value += len; } else { const shdr = &elf_file.sections.items(.shdr)[atom.output_section_index]; @@ -600,12 +598,13 @@ const Unit = struct { fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void { if (unit.off == new_off) return; - if (try dwarf.getFile().?.copyRangeAll( + const n = try dwarf.getFile().?.copyRangeAll( sec.off(dwarf) + unit.off, dwarf.getFile().?, sec.off(dwarf) + new_off, unit.len, - ) != unit.len) return error.InputOutput; + ); + if (n != unit.len) return error.InputOutput; unit.off = new_off; } @@ -1891,19 +1890,16 @@ pub const WipNav = struct { const bytes = if (ty.hasRuntimeBits(wip_nav.pt.zcu)) ty.abiSize(wip_nav.pt.zcu) else 0; try uleb128(diw, bytes); if (bytes == 0) return; - var dim = wip_nav.debug_info.toManaged(wip_nav.dwarf.gpa); - defer wip_nav.debug_info = dim.moveToUnmanaged(); - switch (try codegen.generateSymbol( + const old_len = wip_nav.debug_info.items.len; + try codegen.generateSymbol( wip_nav.dwarf.bin_file, wip_nav.pt, src_loc, val, - &dim, + &wip_nav.debug_info, .{ .debug_output = .{ .dwarf = wip_nav } }, - )) { - .ok => assert(dim.items.len == wip_nav.debug_info.items.len + bytes), - .fail => unreachable, - } + ); + assert(old_len + bytes == wip_nav.debug_info.items.len); } const AbbrevCodeForForm = struct { @@ -2278,7 +2274,7 @@ pub fn deinit(dwarf: *Dwarf) void { dwarf.* = undefined; } -fn getUnit(dwarf: *Dwarf, mod: *Module) UpdateError!Unit.Index { +fn getUnit(dwarf: *Dwarf, mod: *Module) !Unit.Index { const mod_gop = try dwarf.mods.getOrPut(dwarf.gpa, mod); const unit: Unit.Index = @enumFromInt(mod_gop.index); if (!mod_gop.found_existing) { @@ -2338,7 +2334,24 @@ fn getModInfo(dwarf: *Dwarf, unit: Unit.Index) *ModInfo { return &dwarf.mods.values()[@intFromEnum(unit)]; } -pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index, sym_index: u32) UpdateError!?WipNav { +pub fn initWipNav( + dwarf: *Dwarf, + pt: Zcu.PerThread, + nav_index: InternPool.Nav.Index, + sym_index: u32, +) error{ OutOfMemory, CodegenFail }!?WipNav { + return initWipNavInner(dwarf, pt, nav_index, sym_index) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| return pt.zcu.codegenFail(nav_index, "failed to init dwarf: {s}", .{@errorName(e)}), + }; +} + +fn initWipNavInner( + dwarf: *Dwarf, + pt: Zcu.PerThread, + nav_index: InternPool.Nav.Index, + sym_index: u32, +) !?WipNav { const zcu = pt.zcu; const ip = &zcu.intern_pool; @@ -2667,7 +2680,14 @@ pub fn finishWipNav( try wip_nav.updateLazy(zcu.navSrcLoc(nav_index)); } -pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) UpdateError!void { +pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error{ OutOfMemory, CodegenFail }!void { + return updateComptimeNavInner(dwarf, pt, nav_index) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| return pt.zcu.codegenFail(nav_index, "failed to update dwarf: {s}", .{@errorName(e)}), + }; +} + +fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void { const zcu = pt.zcu; const ip = &zcu.intern_pool; const nav_src_loc = zcu.navSrcLoc(nav_index); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 716a1ee59c22..ea2fa56a5d9a 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -795,9 +795,15 @@ pub fn loadInput(self: *Elf, input: link.Input) !void { } pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { - const use_lld = build_options.have_llvm and self.base.comp.config.use_lld; + const comp = self.base.comp; + const use_lld = build_options.have_llvm and comp.config.use_lld; + const diags = &comp.link_diags; if (use_lld) { - return self.linkWithLLD(arena, tid, prog_node); + return self.linkWithLLD(arena, tid, prog_node) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to link with LLD: {s}", .{@errorName(e)}), + }; } try self.flushModule(arena, tid, prog_node); } @@ -807,7 +813,6 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod defer tracy.end(); const comp = self.base.comp; - const gpa = comp.gpa; const diags = &comp.link_diags; if (self.llvm_object) |llvm_object| { @@ -821,6 +826,18 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod const sub_prog_node = prog_node.start("ELF Flush", 0); defer sub_prog_node.end(); + return flushModuleInner(self, arena, tid) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("ELF flush failed: {s}", .{@errorName(e)}), + }; +} + +fn flushModuleInner(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id) !void { + const comp = self.base.comp; + const gpa = comp.gpa; + const diags = &comp.link_diags; + const module_obj_path: ?Path = if (self.base.zcu_object_sub_path) |path| .{ .root_dir = self.base.emit.root_dir, .sub_path = if (fs.path.dirname(self.base.emit.sub_path)) |dirname| @@ -842,12 +859,12 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod .Exe => {}, } - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; // If we haven't already, create a linker-generated input file comprising of // linker-defined synthetic symbols only such as `_DYNAMIC`, etc. if (self.linker_defined_index == null) { - const index = @as(File.Index, @intCast(try self.files.addOne(gpa))); + const index: File.Index = @intCast(try self.files.addOne(gpa)); self.files.set(index, .{ .linker_defined = .{ .index = index } }); self.linker_defined_index = index; const object = self.linkerDefinedPtr().?; @@ -878,7 +895,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod } self.checkDuplicates() catch |err| switch (err) { - error.HasDuplicates => return error.FlushFailure, + error.HasDuplicates => return error.LinkFailure, else => |e| return e, }; @@ -956,14 +973,14 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod error.RelocFailure, error.RelaxFailure => has_reloc_errors = true, error.UnsupportedCpuArch => { try self.reportUnsupportedCpuArch(); - return error.FlushFailure; + return error.LinkFailure; }, else => |e| return e, }; - try self.base.file.?.pwriteAll(code, file_offset); + try self.pwriteAll(code, file_offset); } - if (has_reloc_errors) return error.FlushFailure; + if (has_reloc_errors) return error.LinkFailure; } try self.writePhdrTable(); @@ -972,10 +989,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod try self.writeMergeSections(); self.writeSyntheticSections() catch |err| switch (err) { - error.RelocFailure => return error.FlushFailure, + error.RelocFailure => return error.LinkFailure, error.UnsupportedCpuArch => { try self.reportUnsupportedCpuArch(); - return error.FlushFailure; + return error.LinkFailure; }, else => |e| return e, }; @@ -989,7 +1006,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod try self.writeElfHeader(); } - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; } fn dumpArgvInit(self: *Elf, arena: Allocator) !void { @@ -1389,7 +1406,7 @@ fn scanRelocs(self: *Elf) !void { error.RelaxFailure => unreachable, error.UnsupportedCpuArch => { try self.reportUnsupportedCpuArch(); - return error.FlushFailure; + return error.LinkFailure; }, error.RelocFailure => has_reloc_errors = true, else => |e| return e, @@ -1400,7 +1417,7 @@ fn scanRelocs(self: *Elf) !void { error.RelaxFailure => unreachable, error.UnsupportedCpuArch => { try self.reportUnsupportedCpuArch(); - return error.FlushFailure; + return error.LinkFailure; }, error.RelocFailure => has_reloc_errors = true, else => |e| return e, @@ -1409,7 +1426,7 @@ fn scanRelocs(self: *Elf) !void { try self.reportUndefinedSymbols(&undefs); - if (has_reloc_errors) return error.FlushFailure; + if (has_reloc_errors) return error.LinkFailure; if (self.zigObjectPtr()) |zo| { try zo.asFile().createSymbolIndirection(self); @@ -2117,7 +2134,7 @@ pub fn writeShdrTable(self: *Elf) !void { mem.byteSwapAllFields(elf.Elf32_Shdr, shdr); } } - try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?); + try self.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?); }, .p64 => { const buf = try gpa.alloc(elf.Elf64_Shdr, self.sections.items(.shdr).len); @@ -2130,7 +2147,7 @@ pub fn writeShdrTable(self: *Elf) !void { mem.byteSwapAllFields(elf.Elf64_Shdr, shdr); } } - try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?); + try self.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?); }, } } @@ -2157,7 +2174,7 @@ fn writePhdrTable(self: *Elf) !void { mem.byteSwapAllFields(elf.Elf32_Phdr, phdr); } } - try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset); + try self.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset); }, .p64 => { const buf = try gpa.alloc(elf.Elf64_Phdr, self.phdrs.items.len); @@ -2169,7 +2186,7 @@ fn writePhdrTable(self: *Elf) !void { mem.byteSwapAllFields(elf.Elf64_Phdr, phdr); } } - try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset); + try self.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset); }, } } @@ -2319,7 +2336,7 @@ pub fn writeElfHeader(self: *Elf) !void { assert(index == e_ehsize); - try self.base.file.?.pwriteAll(hdr_buf[0..index], 0); + try self.pwriteAll(hdr_buf[0..index], 0); } pub fn freeNav(self: *Elf, nav: InternPool.Nav.Index) void { @@ -2327,7 +2344,13 @@ pub fn freeNav(self: *Elf, nav: InternPool.Nav.Index) void { return self.zigObjectPtr().?.freeNav(self, nav); } -pub fn updateFunc(self: *Elf, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc( + self: *Elf, + pt: Zcu.PerThread, + func_index: InternPool.Index, + air: Air, + liveness: Liveness, +) link.File.UpdateNavError!void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -2351,19 +2374,32 @@ pub fn updateContainerType( self: *Elf, pt: Zcu.PerThread, ty: InternPool.Index, -) link.File.UpdateNavError!void { +) link.File.UpdateContainerTypeError!void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (self.llvm_object) |_| return; - return self.zigObjectPtr().?.updateContainerType(pt, ty); + const zcu = pt.zcu; + const gpa = zcu.gpa; + return self.zigObjectPtr().?.updateContainerType(pt, ty) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| { + try zcu.failed_types.putNoClobber(gpa, ty, try Zcu.ErrorMsg.create( + gpa, + zcu.typeSrcLoc(ty), + "failed to update container type: {s}", + .{@errorName(e)}, + )); + return error.TypeFailureReported; + }, + }; } pub fn updateExports( self: *Elf, pt: Zcu.PerThread, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) link.File.UpdateExportsError!void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); @@ -2441,7 +2477,7 @@ pub fn resolveMergeSections(self: *Elf) !void { }; } - if (has_errors) return error.FlushFailure; + if (has_errors) return error.LinkFailure; for (self.objects.items) |index| { const object = self.file(index).?.object; @@ -2491,8 +2527,8 @@ pub fn writeMergeSections(self: *Elf) !void { for (self.merge_sections.items) |*msec| { const shdr = self.sections.items(.shdr)[msec.output_section_index]; - const fileoff = math.cast(usize, msec.value + shdr.sh_offset) orelse return error.Overflow; - const size = math.cast(usize, msec.size) orelse return error.Overflow; + const fileoff = try self.cast(usize, msec.value + shdr.sh_offset); + const size = try self.cast(usize, msec.size); try buffer.ensureTotalCapacity(size); buffer.appendNTimesAssumeCapacity(0, size); @@ -2500,11 +2536,11 @@ pub fn writeMergeSections(self: *Elf) !void { const msub = msec.mergeSubsection(msub_index); assert(msub.alive); const string = msub.getString(self); - const off = math.cast(usize, msub.value) orelse return error.Overflow; + const off = try self.cast(usize, msub.value); @memcpy(buffer.items[off..][0..string.len], string); } - try self.base.file.?.pwriteAll(buffer.items, fileoff); + try self.pwriteAll(buffer.items, fileoff); buffer.clearRetainingCapacity(); } } @@ -3121,9 +3157,6 @@ pub fn sortShdrs( fileLookup(files, ref.file, zig_object_ptr).?.atom(ref.index).?.output_section_index = atom_list.output_section_index; } if (shdr.sh_type == elf.SHT_RELA) { - // FIXME:JK we should spin up .symtab potentially earlier, or set all non-dynamic RELA sections - // to point at symtab - // shdr.sh_link = backlinks[shdr.sh_link]; shdr.sh_link = section_indexes.symtab.?; shdr.sh_info = backlinks[shdr.sh_info]; } @@ -3211,7 +3244,7 @@ fn updateSectionSizes(self: *Elf) !void { atom_list.dirty = false; } - // FIXME:JK this will hopefully not be needed once we create a link from Atom/Thunk to AtomList. + // This might not be needed if there was a link from Atom/Thunk to AtomList. for (self.thunks.items) |*th| { th.value += slice.items(.atom_list_2)[th.output_section_index].value; } @@ -3297,7 +3330,6 @@ fn updateSectionSizes(self: *Elf) !void { self.updateShStrtabSize(); } -// FIXME:JK this is very much obsolete, remove! pub fn updateShStrtabSize(self: *Elf) void { if (self.section_indexes.shstrtab) |index| { self.sections.items(.shdr)[index].sh_size = self.shstrtab.items.len; @@ -3362,7 +3394,7 @@ fn allocatePhdrTable(self: *Elf) error{OutOfMemory}!void { // TODO verify `getMaxNumberOfPhdrs()` is accurate and convert this into no-op var err = try diags.addErrorWithNotes(1); try err.addMsg("fatal linker error: not enough space reserved for EHDR and PHDR table", .{}); - try err.addNote("required 0x{x}, available 0x{x}", .{ needed_size, available_space }); + err.addNote("required 0x{x}, available 0x{x}", .{ needed_size, available_space }); } phdr_table_load.p_filesz = needed_size + ehsize; @@ -3658,7 +3690,7 @@ fn writeAtoms(self: *Elf) !void { atom_list.write(&buffer, &undefs, self) catch |err| switch (err) { error.UnsupportedCpuArch => { try self.reportUnsupportedCpuArch(); - return error.FlushFailure; + return error.LinkFailure; }, error.RelocFailure, error.RelaxFailure => has_reloc_errors = true, else => |e| return e, @@ -3666,7 +3698,7 @@ fn writeAtoms(self: *Elf) !void { } try self.reportUndefinedSymbols(&undefs); - if (has_reloc_errors) return error.FlushFailure; + if (has_reloc_errors) return error.LinkFailure; if (self.requiresThunks()) { for (self.thunks.items) |th| { @@ -3676,7 +3708,7 @@ fn writeAtoms(self: *Elf) !void { const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset; try th.write(self, buffer.writer()); assert(buffer.items.len == thunk_size); - try self.base.file.?.pwriteAll(buffer.items, offset); + try self.pwriteAll(buffer.items, offset); buffer.clearRetainingCapacity(); } } @@ -3784,12 +3816,12 @@ fn writeSyntheticSections(self: *Elf) !void { const contents = buffer[0 .. interp.len + 1]; const shdr = slice.items(.shdr)[shndx]; assert(shdr.sh_size == contents.len); - try self.base.file.?.pwriteAll(contents, shdr.sh_offset); + try self.pwriteAll(contents, shdr.sh_offset); } if (self.section_indexes.hash) |shndx| { const shdr = slice.items(.shdr)[shndx]; - try self.base.file.?.pwriteAll(self.hash.buffer.items, shdr.sh_offset); + try self.pwriteAll(self.hash.buffer.items, shdr.sh_offset); } if (self.section_indexes.gnu_hash) |shndx| { @@ -3797,12 +3829,12 @@ fn writeSyntheticSections(self: *Elf) !void { var buffer = try std.ArrayList(u8).initCapacity(gpa, self.gnu_hash.size()); defer buffer.deinit(); try self.gnu_hash.write(self, buffer.writer()); - try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset); + try self.pwriteAll(buffer.items, shdr.sh_offset); } if (self.section_indexes.versym) |shndx| { const shdr = slice.items(.shdr)[shndx]; - try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.versym.items), shdr.sh_offset); + try self.pwriteAll(mem.sliceAsBytes(self.versym.items), shdr.sh_offset); } if (self.section_indexes.verneed) |shndx| { @@ -3810,7 +3842,7 @@ fn writeSyntheticSections(self: *Elf) !void { var buffer = try std.ArrayList(u8).initCapacity(gpa, self.verneed.size()); defer buffer.deinit(); try self.verneed.write(buffer.writer()); - try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset); + try self.pwriteAll(buffer.items, shdr.sh_offset); } if (self.section_indexes.dynamic) |shndx| { @@ -3818,7 +3850,7 @@ fn writeSyntheticSections(self: *Elf) !void { var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynamic.size(self)); defer buffer.deinit(); try self.dynamic.write(self, buffer.writer()); - try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset); + try self.pwriteAll(buffer.items, shdr.sh_offset); } if (self.section_indexes.dynsymtab) |shndx| { @@ -3826,12 +3858,12 @@ fn writeSyntheticSections(self: *Elf) !void { var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynsym.size()); defer buffer.deinit(); try self.dynsym.write(self, buffer.writer()); - try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset); + try self.pwriteAll(buffer.items, shdr.sh_offset); } if (self.section_indexes.dynstrtab) |shndx| { const shdr = slice.items(.shdr)[shndx]; - try self.base.file.?.pwriteAll(self.dynstrtab.items, shdr.sh_offset); + try self.pwriteAll(self.dynstrtab.items, shdr.sh_offset); } if (self.section_indexes.eh_frame) |shndx| { @@ -3841,21 +3873,21 @@ fn writeSyntheticSections(self: *Elf) !void { break :existing_size sym.atom(self).?.size; }; const shdr = slice.items(.shdr)[shndx]; - const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow; + const sh_size = try self.cast(usize, shdr.sh_size); var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size)); defer buffer.deinit(); try eh_frame.writeEhFrame(self, buffer.writer()); assert(buffer.items.len == sh_size - existing_size); - try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset + existing_size); + try self.pwriteAll(buffer.items, shdr.sh_offset + existing_size); } if (self.section_indexes.eh_frame_hdr) |shndx| { const shdr = slice.items(.shdr)[shndx]; - const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow; + const sh_size = try self.cast(usize, shdr.sh_size); var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size); defer buffer.deinit(); try eh_frame.writeEhFrameHdr(self, buffer.writer()); - try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset); + try self.pwriteAll(buffer.items, shdr.sh_offset); } if (self.section_indexes.got) |index| { @@ -3863,7 +3895,7 @@ fn writeSyntheticSections(self: *Elf) !void { var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got.size(self)); defer buffer.deinit(); try self.got.write(self, buffer.writer()); - try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset); + try self.pwriteAll(buffer.items, shdr.sh_offset); } if (self.section_indexes.rela_dyn) |shndx| { @@ -3871,7 +3903,7 @@ fn writeSyntheticSections(self: *Elf) !void { try self.got.addRela(self); try self.copy_rel.addRela(self); self.sortRelaDyn(); - try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.rela_dyn.items), shdr.sh_offset); + try self.pwriteAll(mem.sliceAsBytes(self.rela_dyn.items), shdr.sh_offset); } if (self.section_indexes.plt) |shndx| { @@ -3879,7 +3911,7 @@ fn writeSyntheticSections(self: *Elf) !void { var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt.size(self)); defer buffer.deinit(); try self.plt.write(self, buffer.writer()); - try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset); + try self.pwriteAll(buffer.items, shdr.sh_offset); } if (self.section_indexes.got_plt) |shndx| { @@ -3887,7 +3919,7 @@ fn writeSyntheticSections(self: *Elf) !void { var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got_plt.size(self)); defer buffer.deinit(); try self.got_plt.write(self, buffer.writer()); - try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset); + try self.pwriteAll(buffer.items, shdr.sh_offset); } if (self.section_indexes.plt_got) |shndx| { @@ -3895,25 +3927,24 @@ fn writeSyntheticSections(self: *Elf) !void { var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt_got.size(self)); defer buffer.deinit(); try self.plt_got.write(self, buffer.writer()); - try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset); + try self.pwriteAll(buffer.items, shdr.sh_offset); } if (self.section_indexes.rela_plt) |shndx| { const shdr = slice.items(.shdr)[shndx]; try self.plt.addRela(self); - try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.rela_plt.items), shdr.sh_offset); + try self.pwriteAll(mem.sliceAsBytes(self.rela_plt.items), shdr.sh_offset); } try self.writeSymtab(); try self.writeShStrtab(); } -// FIXME:JK again, why is this needed? pub fn writeShStrtab(self: *Elf) !void { if (self.section_indexes.shstrtab) |index| { const shdr = self.sections.items(.shdr)[index]; log.debug("writing .shstrtab from 0x{x} to 0x{x}", .{ shdr.sh_offset, shdr.sh_offset + shdr.sh_size }); - try self.base.file.?.pwriteAll(self.shstrtab.items, shdr.sh_offset); + try self.pwriteAll(self.shstrtab.items, shdr.sh_offset); } } @@ -3928,7 +3959,7 @@ pub fn writeSymtab(self: *Elf) !void { .p32 => @sizeOf(elf.Elf32_Sym), .p64 => @sizeOf(elf.Elf64_Sym), }; - const nsyms = math.cast(usize, @divExact(symtab_shdr.sh_size, sym_size)) orelse return error.Overflow; + const nsyms = try self.cast(usize, @divExact(symtab_shdr.sh_size, sym_size)); log.debug("writing {d} symbols in .symtab from 0x{x} to 0x{x}", .{ nsyms, @@ -3941,7 +3972,7 @@ pub fn writeSymtab(self: *Elf) !void { }); try self.symtab.resize(gpa, nsyms); - const needed_strtab_size = math.cast(usize, strtab_shdr.sh_size - 1) orelse return error.Overflow; + const needed_strtab_size = try self.cast(usize, strtab_shdr.sh_size - 1); // TODO we could resize instead and in ZigObject/Object always access as slice self.strtab.clearRetainingCapacity(); self.strtab.appendAssumeCapacity(0); @@ -4010,17 +4041,17 @@ pub fn writeSymtab(self: *Elf) !void { }; if (foreign_endian) mem.byteSwapAllFields(elf.Elf32_Sym, out); } - try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), symtab_shdr.sh_offset); + try self.pwriteAll(mem.sliceAsBytes(buf), symtab_shdr.sh_offset); }, .p64 => { if (foreign_endian) { for (self.symtab.items) |*sym| mem.byteSwapAllFields(elf.Elf64_Sym, sym); } - try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.symtab.items), symtab_shdr.sh_offset); + try self.pwriteAll(mem.sliceAsBytes(self.symtab.items), symtab_shdr.sh_offset); }, } - try self.base.file.?.pwriteAll(self.strtab.items, strtab_shdr.sh_offset); + try self.pwriteAll(self.strtab.items, strtab_shdr.sh_offset); } /// Always 4 or 8 depending on whether this is 32-bit ELF or 64-bit ELF. @@ -4514,12 +4545,12 @@ fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void { for (refs.items[0..nrefs]) |ref| { const atom_ptr = self.atom(ref).?; const file_ptr = atom_ptr.file(self).?; - try err.addNote("referenced by {s}:{s}", .{ file_ptr.fmtPath(), atom_ptr.name(self) }); + err.addNote("referenced by {s}:{s}", .{ file_ptr.fmtPath(), atom_ptr.name(self) }); } if (refs.items.len > max_notes) { const remaining = refs.items.len - max_notes; - try err.addNote("referenced {d} more times", .{remaining}); + err.addNote("referenced {d} more times", .{remaining}); } } } @@ -4536,17 +4567,17 @@ fn reportDuplicates(self: *Elf, dupes: anytype) error{ HasDuplicates, OutOfMemor var err = try diags.addErrorWithNotes(nnotes + 1); try err.addMsg("duplicate symbol definition: {s}", .{sym.name(self)}); - try err.addNote("defined by {}", .{sym.file(self).?.fmtPath()}); + err.addNote("defined by {}", .{sym.file(self).?.fmtPath()}); var inote: usize = 0; while (inote < @min(notes.items.len, max_notes)) : (inote += 1) { const file_ptr = self.file(notes.items[inote]).?; - try err.addNote("defined by {}", .{file_ptr.fmtPath()}); + err.addNote("defined by {}", .{file_ptr.fmtPath()}); } if (notes.items.len > max_notes) { const remaining = notes.items.len - max_notes; - try err.addNote("defined {d} more times", .{remaining}); + err.addNote("defined {d} more times", .{remaining}); } } @@ -4570,7 +4601,7 @@ pub fn addFileError( const diags = &self.base.comp.link_diags; var err = try diags.addErrorWithNotes(1); try err.addMsg(format, args); - try err.addNote("while parsing {}", .{self.file(file_index).?.fmtPath()}); + err.addNote("while parsing {}", .{self.file(file_index).?.fmtPath()}); } pub fn failFile( @@ -5184,6 +5215,30 @@ pub fn stringTableLookup(strtab: []const u8, off: u32) [:0]const u8 { return slice[0..mem.indexOfScalar(u8, slice, 0).? :0]; } +pub fn pwriteAll(elf_file: *Elf, bytes: []const u8, offset: u64) error{LinkFailure}!void { + const comp = elf_file.base.comp; + const diags = &comp.link_diags; + elf_file.base.file.?.pwriteAll(bytes, offset) catch |err| { + return diags.fail("failed to write: {s}", .{@errorName(err)}); + }; +} + +pub fn setEndPos(elf_file: *Elf, length: u64) error{LinkFailure}!void { + const comp = elf_file.base.comp; + const diags = &comp.link_diags; + elf_file.base.file.?.setEndPos(length) catch |err| { + return diags.fail("failed to set file end pos: {s}", .{@errorName(err)}); + }; +} + +pub fn cast(elf_file: *Elf, comptime T: type, x: anytype) error{LinkFailure}!T { + return std.math.cast(T, x) orelse { + const comp = elf_file.base.comp; + const diags = &comp.link_diags; + return diags.fail("encountered {d}, overflowing {d}-bit value", .{ x, @bitSizeOf(T) }); + }; +} + const std = @import("std"); const build_options = @import("build_options"); const builtin = @import("builtin"); diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index d34dd6bb63e4..f0eb0dce3f22 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -523,7 +523,7 @@ fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) Re relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch), rel.r_offset, }); - try err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) }); + err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) }); return error.RelocFailure; } @@ -539,7 +539,7 @@ fn reportTextRelocError( rel.r_offset, symbol.name(elf_file), }); - try err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) }); + err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) }); return error.RelocFailure; } @@ -555,8 +555,8 @@ fn reportPicError( rel.r_offset, symbol.name(elf_file), }); - try err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) }); - try err.addNote("recompile with -fPIC", .{}); + err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) }); + err.addNote("recompile with -fPIC", .{}); return error.RelocFailure; } @@ -572,8 +572,8 @@ fn reportNoPicError( rel.r_offset, symbol.name(elf_file), }); - try err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) }); - try err.addNote("recompile with -fno-PIC", .{}); + err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) }); + err.addNote("recompile with -fno-PIC", .{}); return error.RelocFailure; } @@ -1187,7 +1187,7 @@ const x86_64 = struct { x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..]) catch { var err = try diags.addErrorWithNotes(1); try err.addMsg("could not relax {s}", .{@tagName(r_type)}); - try err.addNote("in {}:{s} at offset 0x{x}", .{ + err.addNote("in {}:{s} at offset 0x{x}", .{ atom.file(elf_file).?.fmtPath(), atom.name(elf_file), rel.r_offset, @@ -1332,7 +1332,7 @@ const x86_64 = struct { relocation.fmtRelocType(rels[0].r_type(), .x86_64), relocation.fmtRelocType(rels[1].r_type(), .x86_64), }); - try err.addNote("in {}:{s} at offset 0x{x}", .{ + err.addNote("in {}:{s} at offset 0x{x}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file), rels[0].r_offset, @@ -1388,7 +1388,7 @@ const x86_64 = struct { relocation.fmtRelocType(rels[0].r_type(), .x86_64), relocation.fmtRelocType(rels[1].r_type(), .x86_64), }); - try err.addNote("in {}:{s} at offset 0x{x}", .{ + err.addNote("in {}:{s} at offset 0x{x}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file), rels[0].r_offset, @@ -1485,7 +1485,7 @@ const x86_64 = struct { relocation.fmtRelocType(rels[0].r_type(), .x86_64), relocation.fmtRelocType(rels[1].r_type(), .x86_64), }); - try err.addNote("in {}:{s} at offset 0x{x}", .{ + err.addNote("in {}:{s} at offset 0x{x}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file), rels[0].r_offset, @@ -1672,7 +1672,7 @@ const aarch64 = struct { // TODO: relax var err = try diags.addErrorWithNotes(1); try err.addMsg("TODO: relax ADR_GOT_PAGE", .{}); - try err.addNote("in {}:{s} at offset 0x{x}", .{ + err.addNote("in {}:{s} at offset 0x{x}", .{ atom.file(elf_file).?.fmtPath(), atom.name(elf_file), r_offset, @@ -1959,7 +1959,7 @@ const riscv = struct { // TODO: implement searching forward var err = try diags.addErrorWithNotes(1); try err.addMsg("TODO: find HI20 paired reloc scanning forward", .{}); - try err.addNote("in {}:{s} at offset 0x{x}", .{ + err.addNote("in {}:{s} at offset 0x{x}", .{ atom.file(elf_file).?.fmtPath(), atom.name(elf_file), rel.r_offset, diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig index bab4726f241b..f8d57d04a108 100644 --- a/src/link/Elf/AtomList.zig +++ b/src/link/Elf/AtomList.zig @@ -58,7 +58,7 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void { if (expand_section) last_atom_ref.* = list.lastAtom(elf_file).ref(); shdr.sh_addralign = @max(shdr.sh_addralign, list.alignment.toByteUnits().?); - // FIXME:JK this currently ignores Thunks as valid chunks. + // This currently ignores Thunks as valid chunks. { var idx: usize = 0; while (idx < list.atoms.keys().len) : (idx += 1) { @@ -78,7 +78,8 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void { placement_atom.next_atom_ref = list.firstAtom(elf_file).ref(); } - // FIXME:JK if we had a link from Atom to parent AtomList we would not need to update Atom's value or osec index + // If we had a link from Atom to parent AtomList we would not need to + // update Atom's value or osec index. for (list.atoms.keys()) |ref| { const atom_ptr = elf_file.atom(ref).?; atom_ptr.output_section_index = list.output_section_index; diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 65a62ff1a6b5..d6076a5558d7 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -797,7 +797,7 @@ pub fn initInputMergeSections(self: *Object, elf_file: *Elf) !void { if (!isNull(data[end .. end + sh_entsize])) { var err = try diags.addErrorWithNotes(1); try err.addMsg("string not null terminated", .{}); - try err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) }); + err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) }); return error.LinkFailure; } end += sh_entsize; @@ -812,7 +812,7 @@ pub fn initInputMergeSections(self: *Object, elf_file: *Elf) !void { if (shdr.sh_size % sh_entsize != 0) { var err = try diags.addErrorWithNotes(1); try err.addMsg("size not a multiple of sh_entsize", .{}); - try err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) }); + err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) }); return error.LinkFailure; } @@ -889,8 +889,8 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) error{ const res = imsec.findSubsection(@intCast(esym.st_value)) orelse { var err = try diags.addErrorWithNotes(2); try err.addMsg("invalid symbol value: {x}", .{esym.st_value}); - try err.addNote("for symbol {s}", .{sym.name(elf_file)}); - try err.addNote("in {}", .{self.fmtPath()}); + err.addNote("for symbol {s}", .{sym.name(elf_file)}); + err.addNote("in {}", .{self.fmtPath()}); return error.LinkFailure; }; @@ -915,7 +915,7 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) error{ const res = imsec.findSubsection(@intCast(@as(i64, @intCast(esym.st_value)) + rel.r_addend)) orelse { var err = try diags.addErrorWithNotes(1); try err.addMsg("invalid relocation at offset 0x{x}", .{rel.r_offset}); - try err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) }); + err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) }); return error.LinkFailure; }; diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 73ea6288643c..aac29d8a4f57 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -278,8 +278,8 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void { .{ .kind = .code, .ty = .anyerror_type }, metadata.text_symbol_index, ) catch |err| return switch (err) { - error.CodegenFail => error.FlushFailure, - else => |e| e, + error.CodegenFail => error.LinkFailure, + else => |e| return e, }; if (metadata.rodata_state != .unused) self.updateLazySymbol( elf_file, @@ -287,8 +287,8 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void { .{ .kind = .const_data, .ty = .anyerror_type }, metadata.rodata_symbol_index, ) catch |err| return switch (err) { - error.CodegenFail => error.FlushFailure, - else => |e| e, + error.CodegenFail => error.LinkFailure, + else => |e| return e, }; } for (self.lazy_syms.values()) |*metadata| { @@ -933,6 +933,7 @@ pub fn getNavVAddr( const this_sym = self.symbol(this_sym_index); const vaddr = this_sym.address(.{}, elf_file); switch (reloc_info.parent) { + .none => unreachable, .atom_index => |atom_index| { const parent_atom = self.symbol(atom_index).atom(elf_file).?; const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch); @@ -965,6 +966,7 @@ pub fn getUavVAddr( const sym = self.symbol(sym_index); const vaddr = sym.address(.{}, elf_file); switch (reloc_info.parent) { + .none => unreachable, .atom_index => |atom_index| { const parent_atom = self.symbol(atom_index).atom(elf_file).?; const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch); @@ -1261,7 +1263,7 @@ fn updateNavCode( shdr_index: u32, code: []const u8, stt_bits: u8, -) !void { +) link.File.UpdateNavError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; @@ -1298,7 +1300,9 @@ fn updateNavCode( const capacity = atom_ptr.capacity(elf_file); const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value)); if (need_realloc) { - try self.allocateAtom(atom_ptr, true, elf_file); + self.allocateAtom(atom_ptr, true, elf_file) catch |err| + return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)}); + log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value }); if (old_vaddr != atom_ptr.value) { sym.value = 0; @@ -1308,7 +1312,9 @@ fn updateNavCode( // TODO shrink section size } } else { - try self.allocateAtom(atom_ptr, true, elf_file); + self.allocateAtom(atom_ptr, true, elf_file) catch |err| + return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)}); + errdefer self.freeNavMetadata(elf_file, sym_index); sym.value = 0; esym.st_value = 0; @@ -1333,14 +1339,15 @@ fn updateNavCode( else => |errno| log.warn("process_vm_writev failure: {s}", .{@tagName(errno)}), } }, - else => return error.HotSwapUnavailableOnHostOperatingSystem, + else => return elf_file.base.cgFail(nav_index, "ELF hot swap unavailable on host operating system '{s}'", .{@tagName(builtin.os.tag)}), } } const shdr = elf_file.sections.items(.shdr)[shdr_index]; if (shdr.sh_type != elf.SHT_NOBITS) { const file_offset = atom_ptr.offset(elf_file); - try elf_file.base.file.?.pwriteAll(code, file_offset); + elf_file.base.file.?.pwriteAll(code, file_offset) catch |err| + return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)}); log.debug("writing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len }); } } @@ -1353,7 +1360,7 @@ fn updateTlv( sym_index: Symbol.Index, shndx: u32, code: []const u8, -) !void { +) link.File.UpdateNavError!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = zcu.gpa; @@ -1383,7 +1390,8 @@ fn updateTlv( const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index); assert(!gop.found_existing); // TODO incremental updates - try self.allocateAtom(atom_ptr, true, elf_file); + self.allocateAtom(atom_ptr, true, elf_file) catch |err| + return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)}); sym.value = 0; esym.st_value = 0; @@ -1392,7 +1400,8 @@ fn updateTlv( const shdr = elf_file.sections.items(.shdr)[shndx]; if (shdr.sh_type != elf.SHT_NOBITS) { const file_offset = atom_ptr.offset(elf_file); - try elf_file.base.file.?.pwriteAll(code, file_offset); + elf_file.base.file.?.pwriteAll(code, file_offset) catch |err| + return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)}); log.debug("writing TLV {s} from 0x{x} to 0x{x}", .{ atom_ptr.name(elf_file), file_offset, @@ -1408,7 +1417,7 @@ pub fn updateFunc( func_index: InternPool.Index, air: Air, liveness: Liveness, -) !void { +) link.File.UpdateNavError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1422,13 +1431,13 @@ pub fn updateFunc( const sym_index = try self.getOrCreateMetadataForNav(zcu, func.owner_nav); self.atom(self.symbol(sym_index).ref.index).?.freeRelocs(self); - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null; defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit(); - const res = try codegen.generateFunction( + try codegen.generateFunction( &elf_file.base, pt, zcu.navSrcLoc(func.owner_nav), @@ -1438,14 +1447,7 @@ pub fn updateFunc( &code_buffer, if (debug_wip_nav) |*dn| .{ .dwarf = dn } else .none, ); - - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| { - try zcu.failed_codegen.put(gpa, func.owner_nav, em); - return; - }, - }; + const code = code_buffer.items; const shndx = try self.getNavShdrIndex(elf_file, zcu, func.owner_nav, sym_index, code); log.debug("setting shdr({x},{s}) for {}", .{ @@ -1463,7 +1465,8 @@ pub fn updateFunc( break :blk .{ atom_ptr.value, atom_ptr.alignment }; }; - if (debug_wip_nav) |*wip_nav| try self.dwarf.?.finishWipNavFunc(pt, func.owner_nav, code.len, wip_nav); + if (debug_wip_nav) |*wip_nav| self.dwarf.?.finishWipNavFunc(pt, func.owner_nav, code.len, wip_nav) catch |err| + return elf_file.base.cgFail(func.owner_nav, "failed to finish dwarf function: {s}", .{@errorName(err)}); // Exports will be updated by `Zcu.processExports` after the update. @@ -1511,7 +1514,8 @@ pub fn updateFunc( target_sym.flags.has_trampoline = true; } const target_sym = self.symbol(sym_index); - try writeTrampoline(self.symbol(target_sym.extra(elf_file).trampoline).*, target_sym.*, elf_file); + writeTrampoline(self.symbol(target_sym.extra(elf_file).trampoline).*, target_sym.*, elf_file) catch |err| + return elf_file.base.cgFail(func.owner_nav, "failed to write trampoline: {s}", .{@errorName(err)}); } } @@ -1547,7 +1551,11 @@ pub fn updateNav( if (self.dwarf) |*dwarf| dwarf: { var debug_wip_nav = try dwarf.initWipNav(pt, nav_index, sym_index) orelse break :dwarf; defer debug_wip_nav.deinit(); - try dwarf.finishWipNav(pt, nav_index, &debug_wip_nav); + dwarf.finishWipNav(pt, nav_index, &debug_wip_nav) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.Overflow => return error.Overflow, + else => |e| return elf_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}), + }; } return; }, @@ -1558,13 +1566,13 @@ pub fn updateNav( const sym_index = try self.getOrCreateMetadataForNav(zcu, nav_index); self.symbol(sym_index).atom(elf_file).?.freeRelocs(self); - var code_buffer = std.ArrayList(u8).init(zcu.gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(zcu.gpa); var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null; defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit(); - const res = try codegen.generateSymbol( + try codegen.generateSymbol( &elf_file.base, pt, zcu.navSrcLoc(nav_index), @@ -1572,14 +1580,7 @@ pub fn updateNav( &code_buffer, .{ .atom_index = sym_index }, ); - - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| { - try zcu.failed_codegen.put(zcu.gpa, nav_index, em); - return; - }, - }; + const code = code_buffer.items; const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, sym_index, code); log.debug("setting shdr({x},{s}) for {}", .{ @@ -1592,7 +1593,11 @@ pub fn updateNav( else try self.updateNavCode(elf_file, pt, nav_index, sym_index, shndx, code, elf.STT_OBJECT); - if (debug_wip_nav) |*wip_nav| try self.dwarf.?.finishWipNav(pt, nav_index, wip_nav); + if (debug_wip_nav) |*wip_nav| self.dwarf.?.finishWipNav(pt, nav_index, wip_nav) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.Overflow => return error.Overflow, + else => |e| return elf_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}), + }; } else if (self.dwarf) |*dwarf| try dwarf.updateComptimeNav(pt, nav_index); // Exports will be updated by `Zcu.processExports` after the update. @@ -1602,7 +1607,7 @@ pub fn updateContainerType( self: *ZigObject, pt: Zcu.PerThread, ty: InternPool.Index, -) link.File.UpdateNavError!void { +) !void { const tracy = trace(@src()); defer tracy.end(); @@ -1620,8 +1625,8 @@ fn updateLazySymbol( const gpa = zcu.gpa; var required_alignment: InternPool.Alignment = .none; - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); const name_str_index = blk: { const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ @@ -1633,7 +1638,7 @@ fn updateLazySymbol( }; const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded; - const res = try codegen.generateLazySymbol( + try codegen.generateLazySymbol( &elf_file.base, pt, src, @@ -1643,13 +1648,7 @@ fn updateLazySymbol( .none, .{ .atom_index = symbol_index }, ); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| { - log.err("{s}", .{em.msg}); - return error.CodegenFail; - }, - }; + const code = code_buffer.items; const output_section_index = switch (sym.kind) { .code => if (self.text_index) |sym_index| @@ -1696,7 +1695,7 @@ fn updateLazySymbol( local_sym.value = 0; local_esym.st_value = 0; - try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file)); + try elf_file.pwriteAll(code, atom_ptr.offset(elf_file)); } const LowerConstResult = union(enum) { @@ -1716,13 +1715,13 @@ fn lowerConst( ) !LowerConstResult { const gpa = pt.zcu.gpa; - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); const name_off = try self.addString(gpa, name); const sym_index = try self.newSymbolWithAtom(gpa, name_off); - const res = try codegen.generateSymbol( + try codegen.generateSymbol( &elf_file.base, pt, src_loc, @@ -1730,10 +1729,7 @@ fn lowerConst( &code_buffer, .{ .atom_index = sym_index }, ); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| return .{ .fail = em }, - }; + const code = code_buffer.items; const local_sym = self.symbol(sym_index); const local_esym = &self.symtab.items(.elf_sym)[local_sym.esym_index]; @@ -1748,7 +1744,7 @@ fn lowerConst( try self.allocateAtom(atom_ptr, true, elf_file); errdefer self.freeNavMetadata(elf_file, sym_index); - try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file)); + try elf_file.pwriteAll(code, atom_ptr.offset(elf_file)); return .{ .ok = sym_index }; } @@ -1758,7 +1754,7 @@ pub fn updateExports( elf_file: *Elf, pt: Zcu.PerThread, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) link.File.UpdateExportsError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1771,7 +1767,7 @@ pub fn updateExports( break :blk self.navs.getPtr(nav).?; }, .uav => |uav| self.uavs.getPtr(uav) orelse blk: { - const first_exp = zcu.all_exports.items[export_indices[0]]; + const first_exp = export_indices[0].ptr(zcu); const res = try self.lowerUav(elf_file, pt, uav, .none, first_exp.src); switch (res) { .mcv => {}, @@ -1792,7 +1788,7 @@ pub fn updateExports( const esym_shndx = self.symtab.items(.shndx)[esym_index]; for (export_indices) |export_idx| { - const exp = zcu.all_exports.items[export_idx]; + const exp = export_idx.ptr(zcu); if (exp.opts.section.unwrap()) |section_name| { if (!section_name.eqlSlice(".text", &zcu.intern_pool)) { try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1); @@ -1849,7 +1845,13 @@ pub fn updateExports( pub fn updateLineNumber(self: *ZigObject, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void { if (self.dwarf) |*dwarf| { - try dwarf.updateLineNumber(pt.zcu, ti_id); + const comp = dwarf.bin_file.comp; + const diags = &comp.link_diags; + dwarf.updateLineNumber(pt.zcu, ti_id) catch |err| switch (err) { + error.Overflow => return error.Overflow, + error.OutOfMemory => return error.OutOfMemory, + else => |e| return diags.fail("failed to update dwarf line numbers: {s}", .{@errorName(e)}), + }; } } @@ -1935,8 +1937,8 @@ pub fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, requires_padding: bool, e const shdr = &slice.items(.shdr)[atom_ptr.output_section_index]; const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index]; - // FIXME:JK this only works if this atom is the only atom in the output section - // In every other case, we need to redo the prev/next links + // This only works if this atom is the only atom in the output section. In + // every other case, we need to redo the prev/next links. if (last_atom_ref.eql(atom_ptr.ref())) last_atom_ref.* = .{}; const alloc_res = try elf_file.allocateChunk(.{ diff --git a/src/link/Elf/eh_frame.zig b/src/link/Elf/eh_frame.zig index 4df05d49d3d5..bf46fb02621d 100644 --- a/src/link/Elf/eh_frame.zig +++ b/src/link/Elf/eh_frame.zig @@ -611,7 +611,7 @@ fn reportInvalidReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela) !void { relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch), rel.r_offset, }); - try err.addNote("in {}:.eh_frame", .{elf_file.file(rec.file_index).?.fmtPath()}); + err.addNote("in {}:.eh_frame", .{elf_file.file(rec.file_index).?.fmtPath()}); return error.RelocFailure; } diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 3035c33790fa..e8f9414da6ae 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -1,8 +1,8 @@ -pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void { +pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void { const gpa = comp.gpa; const diags = &comp.link_diags; - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; // First, we flush relocatable object file generated with our backends. if (elf_file.zigObjectPtr()) |zig_object| { @@ -127,13 +127,13 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) link.File.FlushError!v try elf_file.base.file.?.setEndPos(total_size); try elf_file.base.file.?.pwriteAll(buffer.items, 0); - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; } -pub fn flushObject(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void { +pub fn flushObject(elf_file: *Elf, comp: *Compilation) !void { const diags = &comp.link_diags; - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; // Now, we are ready to resolve the symbols across all input files. // We will first resolve the files in the ZigObject, next in the parsed @@ -179,7 +179,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void try elf_file.writeShdrTable(); try elf_file.writeElfHeader(); - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; } fn claimUnresolved(elf_file: *Elf) void { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 3a3710aed88b..e36fd4e80ab6 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -434,7 +434,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n // libc/libSystem dep self.resolveLibSystem(arena, comp, &system_libs) catch |err| switch (err) { error.MissingLibSystem => {}, // already reported - else => |e| return e, // TODO: convert into an error + else => |e| return diags.fail("failed to resolve libSystem: {s}", .{@errorName(e)}), }; for (comp.link_inputs) |link_input| switch (link_input) { @@ -481,7 +481,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n } }; - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; { const index = @as(File.Index, @intCast(try self.files.addOne(gpa))); @@ -494,14 +494,17 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n try self.resolveSymbols(); try self.convertTentativeDefsAndResolveSpecialSymbols(); - try self.dedupLiterals(); + self.dedupLiterals() catch |err| switch (err) { + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to deduplicate literals: {s}", .{@errorName(e)}), + }; if (self.base.gc_sections) { try dead_strip.gcAtoms(self); } self.checkDuplicates() catch |err| switch (err) { - error.HasDuplicates => return error.FlushFailure, + error.HasDuplicates => return error.LinkFailure, else => |e| return diags.fail("failed to check for duplicate symbol definitions: {s}", .{@errorName(e)}), }; @@ -516,7 +519,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n self.claimUnresolved(); self.scanRelocs() catch |err| switch (err) { - error.HasUndefinedSymbols => return error.FlushFailure, + error.HasUndefinedSymbols => return error.LinkFailure, else => |e| return diags.fail("failed to scan relocations: {s}", .{@errorName(e)}), }; @@ -529,7 +532,10 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n try self.generateUnwindInfo(); try self.initSegments(); - try self.allocateSections(); + self.allocateSections() catch |err| switch (err) { + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to allocate sections: {s}", .{@errorName(e)}), + }; self.allocateSegments(); self.allocateSyntheticSymbols(); @@ -543,7 +549,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n if (self.getZigObject()) |zo| { zo.resolveRelocs(self) catch |err| switch (err) { - error.ResolveFailed => return error.FlushFailure, + error.ResolveFailed => return error.LinkFailure, else => |e| return e, }; } @@ -551,7 +557,11 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n try self.writeSectionsToFile(); try self.allocateLinkeditSegment(); - try self.writeLinkeditSectionsToFile(); + self.writeLinkeditSectionsToFile() catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to write linkedit sections to file: {s}", .{@errorName(e)}), + }; var codesig: ?CodeSignature = if (self.requiresCodeSig()) blk: { // Preallocate space for the code signature. @@ -561,7 +571,8 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n // where the code signature goes into. var codesig = CodeSignature.init(self.getPageSize()); codesig.code_directory.ident = fs.path.basename(self.base.emit.sub_path); - if (self.entitlements) |path| try codesig.addEntitlements(gpa, path); + if (self.entitlements) |path| codesig.addEntitlements(gpa, path) catch |err| + return diags.fail("failed to add entitlements from {s}: {s}", .{ path, @errorName(err) }); try self.writeCodeSignaturePadding(&codesig); break :blk codesig; } else null; @@ -573,15 +584,34 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n self.getPageSize(), ); - const ncmds, const sizeofcmds, const uuid_cmd_offset = try self.writeLoadCommands(); + const ncmds, const sizeofcmds, const uuid_cmd_offset = self.writeLoadCommands() catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + }; try self.writeHeader(ncmds, sizeofcmds); - try self.writeUuid(uuid_cmd_offset, self.requiresCodeSig()); - if (self.getDebugSymbols()) |dsym| try dsym.flushModule(self); + self.writeUuid(uuid_cmd_offset, self.requiresCodeSig()) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to calculate and write uuid: {s}", .{@errorName(e)}), + }; + if (self.getDebugSymbols()) |dsym| dsym.flushModule(self) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| return diags.fail("failed to get debug symbols: {s}", .{@errorName(e)}), + }; + // Code signing always comes last. if (codesig) |*csig| { - try self.writeCodeSignature(csig); // code signing always comes last + self.writeCodeSignature(csig) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to write code signature: {s}", .{@errorName(e)}), + }; const emit = self.base.emit; - try invalidateKernelCache(emit.root_dir.handle, emit.sub_path); + invalidateKernelCache(emit.root_dir.handle, emit.sub_path) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| return diags.fail("failed to invalidate kernel cache: {s}", .{@errorName(e)}), + }; } } @@ -1545,21 +1575,21 @@ fn reportUndefs(self: *MachO) !void { try err.addMsg("undefined symbol: {s}", .{undef_sym.getName(self)}); switch (notes) { - .force_undefined => try err.addNote("referenced with linker flag -u", .{}), - .entry => try err.addNote("referenced with linker flag -e", .{}), - .dyld_stub_binder, .objc_msgsend => try err.addNote("referenced implicitly", .{}), + .force_undefined => err.addNote("referenced with linker flag -u", .{}), + .entry => err.addNote("referenced with linker flag -e", .{}), + .dyld_stub_binder, .objc_msgsend => err.addNote("referenced implicitly", .{}), .refs => |refs| { var inote: usize = 0; while (inote < @min(refs.items.len, max_notes)) : (inote += 1) { const ref = refs.items[inote]; const file = self.getFile(ref.file).?; const atom = ref.getAtom(self).?; - try err.addNote("referenced by {}:{s}", .{ file.fmtPath(), atom.getName(self) }); + err.addNote("referenced by {}:{s}", .{ file.fmtPath(), atom.getName(self) }); } if (refs.items.len > max_notes) { const remaining = refs.items.len - max_notes; - try err.addNote("referenced {d} more times", .{remaining}); + err.addNote("referenced {d} more times", .{remaining}); } }, } @@ -2171,7 +2201,7 @@ fn allocateSections(self: *MachO) !void { fileoff = mem.alignForward(u32, fileoff, page_size); } - const alignment = try math.powi(u32, 2, header.@"align"); + const alignment = try self.alignPow(header.@"align"); vmaddr = mem.alignForward(u64, vmaddr, alignment); header.addr = vmaddr; @@ -2327,7 +2357,7 @@ fn allocateLinkeditSegment(self: *MachO) !void { seg.vmaddr = mem.alignForward(u64, vmaddr, page_size); seg.fileoff = mem.alignForward(u64, fileoff, page_size); - var off = math.cast(u32, seg.fileoff) orelse return error.Overflow; + var off = try self.cast(u32, seg.fileoff); // DYLD_INFO_ONLY { const cmd = &self.dyld_info_cmd; @@ -2392,7 +2422,7 @@ fn resizeSections(self: *MachO) !void { if (header.isZerofill()) continue; if (self.isZigSection(@intCast(n_sect))) continue; // TODO this is horrible const cpu_arch = self.getTarget().cpu.arch; - const size = math.cast(usize, header.size) orelse return error.Overflow; + const size = try self.cast(usize, header.size); try out.resize(self.base.comp.gpa, size); const padding_byte: u8 = if (header.isCode() and cpu_arch == .x86_64) 0xcc else 0; @memset(out.items, padding_byte); @@ -2489,7 +2519,7 @@ fn writeThunkWorker(self: *MachO, thunk: Thunk) void { const doWork = struct { fn doWork(th: Thunk, buffer: []u8, macho_file: *MachO) !void { - const off = math.cast(usize, th.value) orelse return error.Overflow; + const off = try macho_file.cast(usize, th.value); const size = th.size(); var stream = std.io.fixedBufferStream(buffer[off..][0..size]); try th.write(macho_file, stream.writer()); @@ -2601,7 +2631,7 @@ fn writeSectionsToFile(self: *MachO) !void { const slice = self.sections.slice(); for (slice.items(.header), slice.items(.out)) |header, out| { - try self.base.file.?.pwriteAll(out.items, header.offset); + try self.pwriteAll(out.items, header.offset); } } @@ -2644,7 +2674,7 @@ fn writeDyldInfo(self: *MachO) !void { try self.lazy_bind_section.write(writer); try stream.seekTo(cmd.export_off - base_off); try self.export_trie.write(writer); - try self.base.file.?.pwriteAll(buffer, cmd.rebase_off); + try self.pwriteAll(buffer, cmd.rebase_off); } pub fn writeDataInCode(self: *MachO) !void { @@ -2655,7 +2685,7 @@ pub fn writeDataInCode(self: *MachO) !void { var buffer = try std.ArrayList(u8).initCapacity(gpa, self.data_in_code.size()); defer buffer.deinit(); try self.data_in_code.write(self, buffer.writer()); - try self.base.file.?.pwriteAll(buffer.items, cmd.dataoff); + try self.pwriteAll(buffer.items, cmd.dataoff); } fn writeIndsymtab(self: *MachO) !void { @@ -2667,15 +2697,15 @@ fn writeIndsymtab(self: *MachO) !void { var buffer = try std.ArrayList(u8).initCapacity(gpa, needed_size); defer buffer.deinit(); try self.indsymtab.write(self, buffer.writer()); - try self.base.file.?.pwriteAll(buffer.items, cmd.indirectsymoff); + try self.pwriteAll(buffer.items, cmd.indirectsymoff); } pub fn writeSymtabToFile(self: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); const cmd = self.symtab_cmd; - try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.symtab.items), cmd.symoff); - try self.base.file.?.pwriteAll(self.strtab.items, cmd.stroff); + try self.pwriteAll(mem.sliceAsBytes(self.symtab.items), cmd.symoff); + try self.pwriteAll(self.strtab.items, cmd.stroff); } fn writeUnwindInfo(self: *MachO) !void { @@ -2686,20 +2716,20 @@ fn writeUnwindInfo(self: *MachO) !void { if (self.eh_frame_sect_index) |index| { const header = self.sections.items(.header)[index]; - const size = math.cast(usize, header.size) orelse return error.Overflow; + const size = try self.cast(usize, header.size); const buffer = try gpa.alloc(u8, size); defer gpa.free(buffer); eh_frame.write(self, buffer); - try self.base.file.?.pwriteAll(buffer, header.offset); + try self.pwriteAll(buffer, header.offset); } if (self.unwind_info_sect_index) |index| { const header = self.sections.items(.header)[index]; - const size = math.cast(usize, header.size) orelse return error.Overflow; + const size = try self.cast(usize, header.size); const buffer = try gpa.alloc(u8, size); defer gpa.free(buffer); try self.unwind_info.write(self, buffer); - try self.base.file.?.pwriteAll(buffer, header.offset); + try self.pwriteAll(buffer, header.offset); } } @@ -2890,7 +2920,7 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } { assert(stream.pos == needed_size); - try self.base.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64)); + try self.pwriteAll(buffer, @sizeOf(macho.mach_header_64)); return .{ ncmds, buffer.len, uuid_cmd_offset }; } @@ -2944,7 +2974,7 @@ fn writeHeader(self: *MachO, ncmds: usize, sizeofcmds: usize) !void { log.debug("writing Mach-O header {}", .{header}); - try self.base.file.?.pwriteAll(mem.asBytes(&header), 0); + try self.pwriteAll(mem.asBytes(&header), 0); } fn writeUuid(self: *MachO, uuid_cmd_offset: u64, has_codesig: bool) !void { @@ -2954,7 +2984,7 @@ fn writeUuid(self: *MachO, uuid_cmd_offset: u64, has_codesig: bool) !void { } else self.codesig_cmd.dataoff; try calcUuid(self.base.comp, self.base.file.?, file_size, &self.uuid_cmd.uuid); const offset = uuid_cmd_offset + @sizeOf(macho.load_command); - try self.base.file.?.pwriteAll(&self.uuid_cmd.uuid, offset); + try self.pwriteAll(&self.uuid_cmd.uuid, offset); } pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void { @@ -2968,7 +2998,7 @@ pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void { log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); // Pad out the space. We need to do this to calculate valid hashes for everything in the file // except for code signature data. - try self.base.file.?.pwriteAll(&[_]u8{0}, offset + needed_size - 1); + try self.pwriteAll(&[_]u8{0}, offset + needed_size - 1); self.codesig_cmd.dataoff = @as(u32, @intCast(offset)); self.codesig_cmd.datasize = @as(u32, @intCast(needed_size)); @@ -2995,10 +3025,16 @@ pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void { offset + buffer.items.len, }); - try self.base.file.?.pwriteAll(buffer.items, offset); + try self.pwriteAll(buffer.items, offset); } -pub fn updateFunc(self: *MachO, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc( + self: *MachO, + pt: Zcu.PerThread, + func_index: InternPool.Index, + air: Air, + liveness: Liveness, +) link.File.UpdateNavError!void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -3006,7 +3042,7 @@ pub fn updateFunc(self: *MachO, pt: Zcu.PerThread, func_index: InternPool.Index, return self.getZigObject().?.updateFunc(self, pt, func_index, air, liveness); } -pub fn updateNav(self: *MachO, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void { +pub fn updateNav(self: *MachO, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -3023,7 +3059,7 @@ pub fn updateExports( self: *MachO, pt: Zcu.PerThread, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) link.File.UpdateExportsError!void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); @@ -3199,7 +3235,7 @@ fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64 const gpa = self.base.comp.gpa; try self.copyRangeAll(old_offset, new_offset, size); const size_u = math.cast(usize, size) orelse return error.Overflow; - const zeroes = try gpa.alloc(u8, size_u); + const zeroes = try gpa.alloc(u8, size_u); // TODO no need to allocate here. defer gpa.free(zeroes); @memset(zeroes, 0); try self.base.file.?.pwriteAll(zeroes, old_offset); @@ -3306,10 +3342,9 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void { const allocSect = struct { fn allocSect(macho_file: *MachO, sect_id: u8, size: u64) !void { const sect = &macho_file.sections.items(.header)[sect_id]; - const alignment = try math.powi(u32, 2, sect.@"align"); + const alignment = try macho_file.alignPow(sect.@"align"); if (!sect.isZerofill()) { - sect.offset = math.cast(u32, try macho_file.findFreeSpace(size, alignment)) orelse - return error.Overflow; + sect.offset = try macho_file.cast(u32, try macho_file.findFreeSpace(size, alignment)); } sect.addr = macho_file.findFreeSpaceVirtual(size, alignment); sect.size = size; @@ -3441,8 +3476,8 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo seg_id, seg.segName(), }); - try err.addNote("TODO: emit relocations to memory locations in self-hosted backends", .{}); - try err.addNote("as a workaround, try increasing pre-allocated virtual memory of each segment", .{}); + err.addNote("TODO: emit relocations to memory locations in self-hosted backends", .{}); + err.addNote("as a workaround, try increasing pre-allocated virtual memory of each segment", .{}); } seg.vmsize = needed_size; @@ -3744,7 +3779,7 @@ pub fn reportParseError2( const diags = &self.base.comp.link_diags; var err = try diags.addErrorWithNotes(1); try err.addMsg(format, args); - try err.addNote("while parsing {}", .{self.getFile(file_index).?.fmtPath()}); + err.addNote("while parsing {}", .{self.getFile(file_index).?.fmtPath()}); } fn reportMissingDependencyError( @@ -3758,10 +3793,10 @@ fn reportMissingDependencyError( const diags = &self.base.comp.link_diags; var err = try diags.addErrorWithNotes(2 + checked_paths.len); try err.addMsg(format, args); - try err.addNote("while resolving {s}", .{path}); - try err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()}); + err.addNote("while resolving {s}", .{path}); + err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()}); for (checked_paths) |p| { - try err.addNote("tried {s}", .{p}); + err.addNote("tried {s}", .{p}); } } @@ -3775,8 +3810,8 @@ fn reportDependencyError( const diags = &self.base.comp.link_diags; var err = try diags.addErrorWithNotes(2); try err.addMsg(format, args); - try err.addNote("while parsing {s}", .{path}); - try err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()}); + err.addNote("while parsing {s}", .{path}); + err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()}); } fn reportDuplicates(self: *MachO) error{ HasDuplicates, OutOfMemory }!void { @@ -3806,17 +3841,17 @@ fn reportDuplicates(self: *MachO) error{ HasDuplicates, OutOfMemory }!void { var err = try diags.addErrorWithNotes(nnotes + 1); try err.addMsg("duplicate symbol definition: {s}", .{sym.getName(self)}); - try err.addNote("defined by {}", .{sym.getFile(self).?.fmtPath()}); + err.addNote("defined by {}", .{sym.getFile(self).?.fmtPath()}); var inote: usize = 0; while (inote < @min(notes.items.len, max_notes)) : (inote += 1) { const file = self.getFile(notes.items[inote]).?; - try err.addNote("defined by {}", .{file.fmtPath()}); + err.addNote("defined by {}", .{file.fmtPath()}); } if (notes.items.len > max_notes) { const remaining = notes.items.len - max_notes; - try err.addNote("defined {d} more times", .{remaining}); + err.addNote("defined {d} more times", .{remaining}); } } return error.HasDuplicates; @@ -5310,6 +5345,40 @@ fn isReachable(atom: *const Atom, rel: Relocation, macho_file: *MachO) bool { return true; } +pub fn pwriteAll(macho_file: *MachO, bytes: []const u8, offset: u64) error{LinkFailure}!void { + const comp = macho_file.base.comp; + const diags = &comp.link_diags; + macho_file.base.file.?.pwriteAll(bytes, offset) catch |err| { + return diags.fail("failed to write: {s}", .{@errorName(err)}); + }; +} + +pub fn setEndPos(macho_file: *MachO, length: u64) error{LinkFailure}!void { + const comp = macho_file.base.comp; + const diags = &comp.link_diags; + macho_file.base.file.?.setEndPos(length) catch |err| { + return diags.fail("failed to set file end pos: {s}", .{@errorName(err)}); + }; +} + +pub fn cast(macho_file: *MachO, comptime T: type, x: anytype) error{LinkFailure}!T { + return std.math.cast(T, x) orelse { + const comp = macho_file.base.comp; + const diags = &comp.link_diags; + return diags.fail("encountered {d}, overflowing {d}-bit value", .{ x, @bitSizeOf(T) }); + }; +} + +pub fn alignPow(macho_file: *MachO, x: u32) error{LinkFailure}!u32 { + const result, const ov = @shlWithOverflow(@as(u32, 1), try cast(macho_file, u5, x)); + if (ov != 0) { + const comp = macho_file.base.comp; + const diags = &comp.link_diags; + return diags.fail("alignment overflow", .{}); + } + return result; +} + /// Branch instruction has 26 bits immediate but is 4 byte aligned. const jump_bits = @bitSizeOf(i28); const max_distance = (1 << (jump_bits - 1)); diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig index f8bf9c37e782..ed554ffb35ee 100644 --- a/src/link/MachO/Atom.zig +++ b/src/link/MachO/Atom.zig @@ -909,8 +909,8 @@ const x86_64 = struct { rel.offset, rel.fmtPretty(.x86_64), }); - try err.addNote("expected .mov instruction but found .{s}", .{@tagName(x)}); - try err.addNote("while parsing {}", .{self.getFile(macho_file).fmtPath()}); + err.addNote("expected .mov instruction but found .{s}", .{@tagName(x)}); + err.addNote("while parsing {}", .{self.getFile(macho_file).fmtPath()}); return error.RelaxFailUnexpectedInstruction; }, } @@ -971,7 +971,7 @@ pub fn calcNumRelocs(self: Atom, macho_file: *MachO) u32 { } } -pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.relocation_info) !void { +pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.relocation_info) error{ LinkFailure, OutOfMemory }!void { const tracy = trace(@src()); defer tracy.end(); @@ -983,15 +983,15 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.r var i: usize = 0; for (relocs) |rel| { defer i += 1; - const rel_offset = math.cast(usize, rel.offset - self.off) orelse return error.Overflow; - const r_address: i32 = math.cast(i32, self.value + rel_offset) orelse return error.Overflow; + const rel_offset = try macho_file.cast(usize, rel.offset - self.off); + const r_address: i32 = try macho_file.cast(i32, self.value + rel_offset); assert(r_address >= 0); const r_symbolnum = r_symbolnum: { const r_symbolnum: u32 = switch (rel.tag) { .local => rel.getTargetAtom(self, macho_file).out_n_sect + 1, .@"extern" => rel.getTargetSymbol(self, macho_file).getOutputSymtabIndex(macho_file).?, }; - break :r_symbolnum math.cast(u24, r_symbolnum) orelse return error.Overflow; + break :r_symbolnum try macho_file.cast(u24, r_symbolnum); }; const r_extern = rel.tag == .@"extern"; var addend = rel.addend + rel.getRelocAddend(cpu_arch); @@ -1027,7 +1027,7 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.r } else if (addend > 0) { buffer[i] = .{ .r_address = r_address, - .r_symbolnum = @bitCast(math.cast(i24, addend) orelse return error.Overflow), + .r_symbolnum = @bitCast(try macho_file.cast(i24, addend)), .r_pcrel = 0, .r_length = 2, .r_extern = 0, diff --git a/src/link/MachO/InternalObject.zig b/src/link/MachO/InternalObject.zig index f41b1aa7ef16..2eb98378335f 100644 --- a/src/link/MachO/InternalObject.zig +++ b/src/link/MachO/InternalObject.zig @@ -414,10 +414,11 @@ pub fn resolveLiterals(self: *InternalObject, lp: *MachO.LiteralPool, macho_file const rel = relocs[0]; assert(rel.tag == .@"extern"); const target = rel.getTargetSymbol(atom.*, macho_file).getAtom(macho_file).?; - const target_size = std.math.cast(usize, target.size) orelse return error.Overflow; + const target_size = try macho_file.cast(usize, target.size); try buffer.ensureUnusedCapacity(target_size); buffer.resize(target_size) catch unreachable; - @memcpy(buffer.items, try self.getSectionData(target.n_sect)); + const section_data = try self.getSectionData(target.n_sect, macho_file); + @memcpy(buffer.items, section_data); const res = try lp.insert(gpa, header.type(), buffer.items); buffer.clearRetainingCapacity(); if (!res.found_existing) { @@ -607,10 +608,11 @@ pub fn writeAtoms(self: *InternalObject, macho_file: *MachO) !void { if (!atom.isAlive()) continue; const sect = atom.getInputSection(macho_file); if (sect.isZerofill()) continue; - const off = std.math.cast(usize, atom.value) orelse return error.Overflow; - const size = std.math.cast(usize, atom.size) orelse return error.Overflow; + const off = try macho_file.cast(usize, atom.value); + const size = try macho_file.cast(usize, atom.size); const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items[off..][0..size]; - @memcpy(buffer, try self.getSectionData(atom.n_sect)); + const section_data = try self.getSectionData(atom.n_sect, macho_file); + @memcpy(buffer, section_data); try atom.resolveRelocs(macho_file, buffer); } } @@ -644,13 +646,13 @@ fn addSection(self: *InternalObject, allocator: Allocator, segname: []const u8, return n_sect; } -fn getSectionData(self: *const InternalObject, index: u32) error{Overflow}![]const u8 { +fn getSectionData(self: *const InternalObject, index: u32, macho_file: *MachO) error{LinkFailure}![]const u8 { const slice = self.sections.slice(); assert(index < slice.items(.header).len); const sect = slice.items(.header)[index]; const extra = slice.items(.extra)[index]; if (extra.is_objc_methname) { - const size = std.math.cast(usize, sect.size) orelse return error.Overflow; + const size = try macho_file.cast(usize, sect.size); return self.objc_methnames.items[sect.offset..][0..size]; } else if (extra.is_objc_selref) return &self.objc_selrefs diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index 349ee99ca430..000f37403505 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -582,7 +582,7 @@ fn initPointerLiterals(self: *Object, allocator: Allocator, macho_file: *MachO) ); return error.MalformedObject; } - const num_ptrs = math.cast(usize, @divExact(sect.size, rec_size)) orelse return error.Overflow; + const num_ptrs = try macho_file.cast(usize, @divExact(sect.size, rec_size)); for (0..num_ptrs) |i| { const pos: u32 = @as(u32, @intCast(i)) * rec_size; @@ -650,8 +650,8 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO for (subs.items) |sub| { const atom = self.getAtom(sub.atom).?; - const atom_off = math.cast(usize, atom.off) orelse return error.Overflow; - const atom_size = math.cast(usize, atom.size) orelse return error.Overflow; + const atom_off = try macho_file.cast(usize, atom.off); + const atom_size = try macho_file.cast(usize, atom.size); const atom_data = data[atom_off..][0..atom_size]; const res = try lp.insert(gpa, header.type(), atom_data); if (!res.found_existing) { @@ -674,8 +674,8 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO .local => rel.getTargetAtom(atom.*, macho_file), .@"extern" => rel.getTargetSymbol(atom.*, macho_file).getAtom(macho_file).?, }; - const addend = math.cast(u32, rel.addend) orelse return error.Overflow; - const target_size = math.cast(usize, target.size) orelse return error.Overflow; + const addend = try macho_file.cast(u32, rel.addend); + const target_size = try macho_file.cast(usize, target.size); try buffer.ensureUnusedCapacity(target_size); buffer.resize(target_size) catch unreachable; const gop = try sections_data.getOrPut(target.n_sect); @@ -683,7 +683,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO gop.value_ptr.* = try self.readSectionData(gpa, file, @intCast(target.n_sect)); } const data = gop.value_ptr.*; - const target_off = math.cast(usize, target.off) orelse return error.Overflow; + const target_off = try macho_file.cast(usize, target.off); @memcpy(buffer.items, data[target_off..][0..target_size]); const res = try lp.insert(gpa, header.type(), buffer.items[addend..]); buffer.clearRetainingCapacity(); @@ -1033,7 +1033,7 @@ fn initEhFrameRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fi const sect = slice.items(.header)[sect_id]; const relocs = slice.items(.relocs)[sect_id]; - const size = math.cast(usize, sect.size) orelse return error.Overflow; + const size = try macho_file.cast(usize, sect.size); try self.eh_frame_data.resize(allocator, size); const amt = try file.preadAll(self.eh_frame_data.items, sect.offset + self.offset); if (amt != self.eh_frame_data.items.len) return error.InputOutput; @@ -1696,7 +1696,7 @@ pub fn updateArSize(self: *Object, macho_file: *MachO) !void { pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: anytype) !void { // Header - const size = std.math.cast(usize, self.output_ar_state.size) orelse return error.Overflow; + const size = try macho_file.cast(usize, self.output_ar_state.size); const basename = std.fs.path.basename(self.path.sub_path); try Archive.writeHeader(basename, size, ar_format, writer); // Data @@ -1826,7 +1826,7 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void { for (headers, 0..) |header, n_sect| { if (header.isZerofill()) continue; - const size = math.cast(usize, header.size) orelse return error.Overflow; + const size = try macho_file.cast(usize, header.size); const data = try gpa.alloc(u8, size); const amt = try file.preadAll(data, header.offset + self.offset); if (amt != data.len) return error.InputOutput; @@ -1837,9 +1837,9 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void { if (!atom.isAlive()) continue; const sect = atom.getInputSection(macho_file); if (sect.isZerofill()) continue; - const value = math.cast(usize, atom.value) orelse return error.Overflow; - const off = math.cast(usize, atom.off) orelse return error.Overflow; - const size = math.cast(usize, atom.size) orelse return error.Overflow; + const value = try macho_file.cast(usize, atom.value); + const off = try macho_file.cast(usize, atom.off); + const size = try macho_file.cast(usize, atom.size); const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items; const data = sections_data[atom.n_sect]; @memcpy(buffer[value..][0..size], data[off..][0..size]); @@ -1865,7 +1865,7 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void { for (headers, 0..) |header, n_sect| { if (header.isZerofill()) continue; - const size = math.cast(usize, header.size) orelse return error.Overflow; + const size = try macho_file.cast(usize, header.size); const data = try gpa.alloc(u8, size); const amt = try file.preadAll(data, header.offset + self.offset); if (amt != data.len) return error.InputOutput; @@ -1876,9 +1876,9 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void { if (!atom.isAlive()) continue; const sect = atom.getInputSection(macho_file); if (sect.isZerofill()) continue; - const value = math.cast(usize, atom.value) orelse return error.Overflow; - const off = math.cast(usize, atom.off) orelse return error.Overflow; - const size = math.cast(usize, atom.size) orelse return error.Overflow; + const value = try macho_file.cast(usize, atom.value); + const off = try macho_file.cast(usize, atom.off); + const size = try macho_file.cast(usize, atom.size); const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items; const data = sections_data[atom.n_sect]; @memcpy(buffer[value..][0..size], data[off..][0..size]); @@ -1909,29 +1909,27 @@ pub fn calcCompactUnwindSizeRelocatable(self: *Object, macho_file: *MachO) void } } +fn addReloc(offset: u32, arch: std.Target.Cpu.Arch) !macho.relocation_info { + return .{ + .r_address = std.math.cast(i32, offset) orelse return error.Overflow, + .r_symbolnum = 0, + .r_pcrel = 0, + .r_length = 3, + .r_extern = 0, + .r_type = switch (arch) { + .aarch64 => @intFromEnum(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED), + .x86_64 => @intFromEnum(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED), + else => unreachable, + }, + }; +} + pub fn writeCompactUnwindRelocatable(self: *Object, macho_file: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); const cpu_arch = macho_file.getTarget().cpu.arch; - const addReloc = struct { - fn addReloc(offset: u32, arch: std.Target.Cpu.Arch) !macho.relocation_info { - return .{ - .r_address = math.cast(i32, offset) orelse return error.Overflow, - .r_symbolnum = 0, - .r_pcrel = 0, - .r_length = 3, - .r_extern = 0, - .r_type = switch (arch) { - .aarch64 => @intFromEnum(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED), - .x86_64 => @intFromEnum(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED), - else => unreachable, - }, - }; - } - }.addReloc; - const nsect = macho_file.unwind_info_sect_index.?; const buffer = macho_file.sections.items(.out)[nsect].items; const relocs = macho_file.sections.items(.relocs)[nsect].items; @@ -1967,7 +1965,7 @@ pub fn writeCompactUnwindRelocatable(self: *Object, macho_file: *MachO) !void { // Personality function if (rec.getPersonality(macho_file)) |sym| { - const r_symbolnum = math.cast(u24, sym.getOutputSymtabIndex(macho_file).?) orelse return error.Overflow; + const r_symbolnum = try macho_file.cast(u24, sym.getOutputSymtabIndex(macho_file).?); var reloc = try addReloc(offset + 16, cpu_arch); reloc.r_symbolnum = r_symbolnum; reloc.r_extern = 1; diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index fb5a1255caf1..5f2e1291c086 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -290,12 +290,15 @@ pub fn dedupLiterals(self: *ZigObject, lp: MachO.LiteralPool, macho_file: *MachO /// We need this so that we can write to an archive. /// TODO implement writing ZigObject data directly to a buffer instead. pub fn readFileContents(self: *ZigObject, macho_file: *MachO) !void { + const diags = &macho_file.base.comp.link_diags; // Size of the output object file is always the offset + size of the strtab const size = macho_file.symtab_cmd.stroff + macho_file.symtab_cmd.strsize; const gpa = macho_file.base.comp.gpa; try self.data.resize(gpa, size); - const amt = try macho_file.base.file.?.preadAll(self.data.items, 0); - if (amt != size) return error.InputOutput; + const amt = macho_file.base.file.?.preadAll(self.data.items, 0) catch |err| + return diags.fail("failed to read output file: {s}", .{@errorName(err)}); + if (amt != size) + return diags.fail("unexpected EOF reading from output file", .{}); } pub fn updateArSymtab(self: ZigObject, ar_symtab: *Archive.ArSymtab, macho_file: *MachO) error{OutOfMemory}!void { @@ -376,7 +379,7 @@ pub fn resolveRelocs(self: *ZigObject, macho_file: *MachO) !void { if (atom.getRelocs(macho_file).len == 0) continue; // TODO: we will resolve and write ZigObject's TLS data twice: // once here, and once in writeAtoms - const atom_size = std.math.cast(usize, atom.size) orelse return error.Overflow; + const atom_size = try macho_file.cast(usize, atom.size); const code = try gpa.alloc(u8, atom_size); defer gpa.free(code); self.getAtomData(macho_file, atom.*, code) catch |err| { @@ -400,7 +403,7 @@ pub fn resolveRelocs(self: *ZigObject, macho_file: *MachO) !void { has_error = true; continue; }; - try macho_file.base.file.?.pwriteAll(code, file_offset); + try macho_file.pwriteAll(code, file_offset); } if (has_error) return error.ResolveFailed; @@ -419,7 +422,7 @@ pub fn calcNumRelocs(self: *ZigObject, macho_file: *MachO) void { } } -pub fn writeRelocs(self: *ZigObject, macho_file: *MachO) !void { +pub fn writeRelocs(self: *ZigObject, macho_file: *MachO) error{ LinkFailure, OutOfMemory }!void { const gpa = macho_file.base.comp.gpa; const diags = &macho_file.base.comp.link_diags; @@ -432,14 +435,14 @@ pub fn writeRelocs(self: *ZigObject, macho_file: *MachO) !void { if (!macho_file.isZigSection(atom.out_n_sect) and !macho_file.isDebugSection(atom.out_n_sect)) continue; if (atom.getRelocs(macho_file).len == 0) continue; const extra = atom.getExtra(macho_file); - const atom_size = std.math.cast(usize, atom.size) orelse return error.Overflow; + const atom_size = try macho_file.cast(usize, atom.size); const code = try gpa.alloc(u8, atom_size); defer gpa.free(code); self.getAtomData(macho_file, atom.*, code) catch |err| return diags.fail("failed to fetch code for '{s}': {s}", .{ atom.getName(macho_file), @errorName(err) }); const file_offset = header.offset + atom.value; try atom.writeRelocs(macho_file, code, relocs[extra.rel_out_index..][0..extra.rel_out_count]); - try macho_file.base.file.?.pwriteAll(code, file_offset); + try macho_file.pwriteAll(code, file_offset); } } @@ -457,8 +460,8 @@ pub fn writeAtomsRelocatable(self: *ZigObject, macho_file: *MachO) !void { if (sect.isZerofill()) continue; if (macho_file.isZigSection(atom.out_n_sect)) continue; if (atom.getRelocs(macho_file).len == 0) continue; - const off = std.math.cast(usize, atom.value) orelse return error.Overflow; - const size = std.math.cast(usize, atom.size) orelse return error.Overflow; + const off = try macho_file.cast(usize, atom.value); + const size = try macho_file.cast(usize, atom.size); const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items; try self.getAtomData(macho_file, atom.*, buffer[off..][0..size]); const relocs = macho_file.sections.items(.relocs)[atom.out_n_sect].items; @@ -480,8 +483,8 @@ pub fn writeAtoms(self: *ZigObject, macho_file: *MachO) !void { const sect = atom.getInputSection(macho_file); if (sect.isZerofill()) continue; if (macho_file.isZigSection(atom.out_n_sect)) continue; - const off = std.math.cast(usize, atom.value) orelse return error.Overflow; - const size = std.math.cast(usize, atom.size) orelse return error.Overflow; + const off = try macho_file.cast(usize, atom.value); + const size = try macho_file.cast(usize, atom.size); const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items; try self.getAtomData(macho_file, atom.*, buffer[off..][0..size]); try atom.resolveRelocs(macho_file, buffer[off..][0..size]); @@ -546,7 +549,9 @@ pub fn getInputSection(self: ZigObject, atom: Atom, macho_file: *MachO) macho.se return sect; } -pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) !void { +pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) link.File.FlushError!void { + const diags = &macho_file.base.comp.link_diags; + // Handle any lazy symbols that were emitted by incremental compilation. if (self.lazy_syms.getPtr(.anyerror_type)) |metadata| { const pt: Zcu.PerThread = .activate(macho_file.base.comp.zcu.?, tid); @@ -559,18 +564,20 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) pt, .{ .kind = .code, .ty = .anyerror_type }, metadata.text_symbol_index, - ) catch |err| return switch (err) { - error.CodegenFail => error.FlushFailure, - else => |e| e, + ) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to update lazy symbol: {s}", .{@errorName(e)}), }; if (metadata.const_state != .unused) self.updateLazySymbol( macho_file, pt, .{ .kind = .const_data, .ty = .anyerror_type }, metadata.const_symbol_index, - ) catch |err| return switch (err) { - error.CodegenFail => error.FlushFailure, - else => |e| e, + ) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to update lazy symbol: {s}", .{@errorName(e)}), }; } for (self.lazy_syms.values()) |*metadata| { @@ -581,7 +588,10 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) if (self.dwarf) |*dwarf| { const pt: Zcu.PerThread = .activate(macho_file.base.comp.zcu.?, tid); defer pt.deactivate(); - try dwarf.flushModule(pt); + dwarf.flushModule(pt) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| return diags.fail("failed to flush dwarf module: {s}", .{@errorName(e)}), + }; self.debug_abbrev_dirty = false; self.debug_aranges_dirty = false; @@ -616,6 +626,7 @@ pub fn getNavVAddr( const sym = self.symbols.items[sym_index]; const vaddr = sym.getAddress(.{}, macho_file); switch (reloc_info.parent) { + .none => unreachable, .atom_index => |atom_index| { const parent_atom = self.symbols.items[atom_index].getAtom(macho_file).?; try parent_atom.addReloc(macho_file, .{ @@ -655,6 +666,7 @@ pub fn getUavVAddr( const sym = self.symbols.items[sym_index]; const vaddr = sym.getAddress(.{}, macho_file); switch (reloc_info.parent) { + .none => unreachable, .atom_index => |atom_index| { const parent_atom = self.symbols.items[atom_index].getAtom(macho_file).?; try parent_atom.addReloc(macho_file, .{ @@ -766,7 +778,7 @@ pub fn updateFunc( func_index: InternPool.Index, air: Air, liveness: Liveness, -) !void { +) link.File.UpdateNavError!void { const tracy = trace(@src()); defer tracy.end(); @@ -777,13 +789,13 @@ pub fn updateFunc( const sym_index = try self.getOrCreateMetadataForNav(macho_file, func.owner_nav); self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file); - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null; defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit(); - const res = try codegen.generateFunction( + try codegen.generateFunction( &macho_file.base, pt, zcu.navSrcLoc(func.owner_nav), @@ -793,14 +805,7 @@ pub fn updateFunc( &code_buffer, if (debug_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none, ); - - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| { - try zcu.failed_codegen.put(gpa, func.owner_nav, em); - return; - }, - }; + const code = code_buffer.items; const sect_index = try self.getNavOutputSection(macho_file, zcu, func.owner_nav, code); const old_rva, const old_alignment = blk: { @@ -813,7 +818,8 @@ pub fn updateFunc( break :blk .{ atom.value, atom.alignment }; }; - if (debug_wip_nav) |*wip_nav| try self.dwarf.?.finishWipNavFunc(pt, func.owner_nav, code.len, wip_nav); + if (debug_wip_nav) |*wip_nav| self.dwarf.?.finishWipNavFunc(pt, func.owner_nav, code.len, wip_nav) catch |err| + return macho_file.base.cgFail(func.owner_nav, "falied to finish dwarf function: {s}", .{@errorName(err)}); // Exports will be updated by `Zcu.processExports` after the update. if (old_rva != new_rva and old_rva > 0) { @@ -850,7 +856,8 @@ pub fn updateFunc( } const target_sym = self.symbols.items[sym_index]; const source_sym = self.symbols.items[target_sym.getExtra(macho_file).trampoline]; - try writeTrampoline(source_sym, target_sym, macho_file); + writeTrampoline(source_sym, target_sym, macho_file) catch |err| + return macho_file.base.cgFail(func.owner_nav, "failed to write trampoline: {s}", .{@errorName(err)}); } } @@ -883,7 +890,11 @@ pub fn updateNav( if (self.dwarf) |*dwarf| dwarf: { var debug_wip_nav = try dwarf.initWipNav(pt, nav_index, sym_index) orelse break :dwarf; defer debug_wip_nav.deinit(); - try dwarf.finishWipNav(pt, nav_index, &debug_wip_nav); + dwarf.finishWipNav(pt, nav_index, &debug_wip_nav) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.Overflow => return error.Overflow, + else => |e| return macho_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}), + }; } return; }, @@ -894,13 +905,13 @@ pub fn updateNav( const sym_index = try self.getOrCreateMetadataForNav(macho_file, nav_index); self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file); - var code_buffer = std.ArrayList(u8).init(zcu.gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(zcu.gpa); var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null; defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit(); - const res = try codegen.generateSymbol( + try codegen.generateSymbol( &macho_file.base, pt, zcu.navSrcLoc(nav_index), @@ -908,21 +919,19 @@ pub fn updateNav( &code_buffer, .{ .atom_index = sym_index }, ); + const code = code_buffer.items; - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| { - try zcu.failed_codegen.put(zcu.gpa, nav_index, em); - return; - }, - }; const sect_index = try self.getNavOutputSection(macho_file, zcu, nav_index, code); if (isThreadlocal(macho_file, nav_index)) try self.updateTlv(macho_file, pt, nav_index, sym_index, sect_index, code) else try self.updateNavCode(macho_file, pt, nav_index, sym_index, sect_index, code); - if (debug_wip_nav) |*wip_nav| try self.dwarf.?.finishWipNav(pt, nav_index, wip_nav); + if (debug_wip_nav) |*wip_nav| self.dwarf.?.finishWipNav(pt, nav_index, wip_nav) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.Overflow => return error.Overflow, + else => |e| return macho_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}), + }; } else if (self.dwarf) |*dwarf| try dwarf.updateComptimeNav(pt, nav_index); // Exports will be updated by `Zcu.processExports` after the update. @@ -936,7 +945,7 @@ fn updateNavCode( sym_index: Symbol.Index, sect_index: u8, code: []const u8, -) !void { +) link.File.UpdateNavError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; @@ -978,7 +987,8 @@ fn updateNavCode( const need_realloc = code.len > capacity or !required_alignment.check(atom.value); if (need_realloc) { - try atom.grow(macho_file); + atom.grow(macho_file) catch |err| + return macho_file.base.cgFail(nav_index, "failed to grow atom: {s}", .{@errorName(err)}); log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom.value }); if (old_vaddr != atom.value) { sym.value = 0; @@ -991,7 +1001,8 @@ fn updateNavCode( sect.size = needed_size; } } else { - try atom.allocate(macho_file); + atom.allocate(macho_file) catch |err| + return macho_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)}); errdefer self.freeNavMetadata(macho_file, sym_index); sym.value = 0; @@ -1000,7 +1011,8 @@ fn updateNavCode( if (!sect.isZerofill()) { const file_offset = sect.offset + atom.value; - try macho_file.base.file.?.pwriteAll(code, file_offset); + macho_file.base.file.?.pwriteAll(code, file_offset) catch |err| + return macho_file.base.cgFail(nav_index, "failed to write output file: {s}", .{@errorName(err)}); } } @@ -1198,13 +1210,13 @@ fn lowerConst( ) !LowerConstResult { const gpa = macho_file.base.comp.gpa; - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); const name_str = try self.addString(gpa, name); const sym_index = try self.newSymbolWithAtom(gpa, name_str, macho_file); - const res = try codegen.generateSymbol( + try codegen.generateSymbol( &macho_file.base, pt, src_loc, @@ -1212,10 +1224,7 @@ fn lowerConst( &code_buffer, .{ .atom_index = sym_index }, ); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| return .{ .fail = em }, - }; + const code = code_buffer.items; const sym = &self.symbols.items[sym_index]; sym.out_n_sect = output_section_index; @@ -1236,7 +1245,7 @@ fn lowerConst( const sect = macho_file.sections.items(.header)[output_section_index]; const file_offset = sect.offset + atom.value; - try macho_file.base.file.?.pwriteAll(code, file_offset); + try macho_file.pwriteAll(code, file_offset); return .{ .ok = sym_index }; } @@ -1246,7 +1255,7 @@ pub fn updateExports( macho_file: *MachO, pt: Zcu.PerThread, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) link.File.UpdateExportsError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1259,7 +1268,7 @@ pub fn updateExports( break :blk self.navs.getPtr(nav).?; }, .uav => |uav| self.uavs.getPtr(uav) orelse blk: { - const first_exp = zcu.all_exports.items[export_indices[0]]; + const first_exp = export_indices[0].ptr(zcu); const res = try self.lowerUav(macho_file, pt, uav, .none, first_exp.src); switch (res) { .mcv => {}, @@ -1279,7 +1288,7 @@ pub fn updateExports( const nlist = self.symtab.items(.nlist)[nlist_idx]; for (export_indices) |export_idx| { - const exp = zcu.all_exports.items[export_idx]; + const exp = export_idx.ptr(zcu); if (exp.opts.section.unwrap()) |section_name| { if (!section_name.eqlSlice("__text", &zcu.intern_pool)) { try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1); @@ -1352,8 +1361,8 @@ fn updateLazySymbol( const gpa = zcu.gpa; var required_alignment: Atom.Alignment = .none; - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); const name_str = blk: { const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ @@ -1365,7 +1374,7 @@ fn updateLazySymbol( }; const src = Type.fromInterned(lazy_sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded; - const res = try codegen.generateLazySymbol( + try codegen.generateLazySymbol( &macho_file.base, pt, src, @@ -1375,13 +1384,7 @@ fn updateLazySymbol( .none, .{ .atom_index = symbol_index }, ); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| { - log.err("{s}", .{em.msg}); - return error.CodegenFail; - }, - }; + const code = code_buffer.items; const output_section_index = switch (lazy_sym.kind) { .code => macho_file.zig_text_sect_index.?, @@ -1412,12 +1415,18 @@ fn updateLazySymbol( const sect = macho_file.sections.items(.header)[output_section_index]; const file_offset = sect.offset + atom.value; - try macho_file.base.file.?.pwriteAll(code, file_offset); + try macho_file.pwriteAll(code, file_offset); } pub fn updateLineNumber(self: *ZigObject, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void { if (self.dwarf) |*dwarf| { - try dwarf.updateLineNumber(pt.zcu, ti_id); + const comp = dwarf.bin_file.comp; + const diags = &comp.link_diags; + dwarf.updateLineNumber(pt.zcu, ti_id) catch |err| switch (err) { + error.Overflow => return error.Overflow, + error.OutOfMemory => return error.OutOfMemory, + else => |e| return diags.fail("failed to update dwarf line numbers: {s}", .{@errorName(e)}), + }; } } diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig index 497969ab90eb..d090a2c9adfd 100644 --- a/src/link/MachO/relocatable.zig +++ b/src/link/MachO/relocatable.zig @@ -18,13 +18,15 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat // Instead of invoking a full-blown `-r` mode on the input which sadly will strip all // debug info segments/sections (this is apparently by design by Apple), we copy // the *only* input file over. - // TODO: in the future, when we implement `dsymutil` alternative directly in the Zig - // compiler, investigate if we can get rid of this `if` prong here. const path = positionals.items[0].path().?; - const in_file = try path.root_dir.handle.openFile(path.sub_path, .{}); - const stat = try in_file.stat(); - const amt = try in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size); - if (amt != stat.size) return error.InputOutput; // TODO: report an actual user error + const in_file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| + return diags.fail("failed to open {}: {s}", .{ path, @errorName(err) }); + const stat = in_file.stat() catch |err| + return diags.fail("failed to stat {}: {s}", .{ path, @errorName(err) }); + const amt = in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size) catch |err| + return diags.fail("failed to copy range of file {}: {s}", .{ path, @errorName(err) }); + if (amt != stat.size) + return diags.fail("unexpected short write in copy range of file {}", .{path}); return; } @@ -33,14 +35,18 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)}); } - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; try macho_file.parseInputFiles(); - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; try macho_file.resolveSymbols(); - try macho_file.dedupLiterals(); + macho_file.dedupLiterals() catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to update ar size: {s}", .{@errorName(e)}), + }; markExports(macho_file); claimUnresolved(macho_file); try initOutputSections(macho_file); @@ -49,7 +55,10 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat try calcSectionSizes(macho_file); try createSegment(macho_file); - try allocateSections(macho_file); + allocateSections(macho_file) catch |err| switch (err) { + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to allocate sections: {s}", .{@errorName(e)}), + }; allocateSegment(macho_file); if (build_options.enable_logging) { @@ -93,11 +102,11 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)}); } - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; try parseInputFilesAr(macho_file); - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; // First, we flush relocatable object file generated with our backends. if (macho_file.getZigObject()) |zo| { @@ -108,7 +117,8 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? try macho_file.addAtomsToSections(); try calcSectionSizes(macho_file); try createSegment(macho_file); - try allocateSections(macho_file); + allocateSections(macho_file) catch |err| + return diags.fail("failed to allocate sections: {s}", .{@errorName(err)}); allocateSegment(macho_file); if (build_options.enable_logging) { @@ -126,8 +136,6 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? const ncmds, const sizeofcmds = try writeLoadCommands(macho_file); try writeHeader(macho_file, ncmds, sizeofcmds); - // TODO we can avoid reading in the file contents we just wrote if we give the linker - // ability to write directly to a buffer. try zo.readFileContents(macho_file); } @@ -152,7 +160,8 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? // Update sizes of contributing objects for (files.items) |index| { - try macho_file.getFile(index).?.updateArSize(macho_file); + macho_file.getFile(index).?.updateArSize(macho_file) catch |err| + return diags.fail("failed to update ar size: {s}", .{@errorName(err)}); } // Update file offsets of contributing objects @@ -171,7 +180,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? state.file_off = pos; pos += @sizeOf(Archive.ar_hdr); pos += mem.alignForward(usize, zo.basename.len + 1, ptr_width); - pos += math.cast(usize, state.size) orelse return error.Overflow; + pos += try macho_file.cast(usize, state.size); }, .object => |o| { const state = &o.output_ar_state; @@ -179,7 +188,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? state.file_off = pos; pos += @sizeOf(Archive.ar_hdr); pos += mem.alignForward(usize, o.path.basename().len + 1, ptr_width); - pos += math.cast(usize, state.size) orelse return error.Overflow; + pos += try macho_file.cast(usize, state.size); }, else => unreachable, } @@ -201,7 +210,10 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? try writer.writeAll(Archive.ARMAG); // Write symtab - try ar_symtab.write(format, macho_file, writer); + ar_symtab.write(format, macho_file, writer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => |e| return diags.fail("failed to write archive symbol table: {s}", .{@errorName(e)}), + }; // Write object files for (files.items) |index| { @@ -210,15 +222,16 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? if (padding > 0) { try writer.writeByteNTimes(0, padding); } - try macho_file.getFile(index).?.writeAr(format, macho_file, writer); + macho_file.getFile(index).?.writeAr(format, macho_file, writer) catch |err| + return diags.fail("failed to write archive: {s}", .{@errorName(err)}); } assert(buffer.items.len == total_size); - try macho_file.base.file.?.setEndPos(total_size); - try macho_file.base.file.?.pwriteAll(buffer.items, 0); + try macho_file.setEndPos(total_size); + try macho_file.pwriteAll(buffer.items, 0); - if (diags.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; } fn parseInputFilesAr(macho_file: *MachO) !void { @@ -452,11 +465,10 @@ fn allocateSections(macho_file: *MachO) !void { for (slice.items(.header)) |*header| { const needed_size = header.size; header.size = 0; - const alignment = try math.powi(u32, 2, header.@"align"); + const alignment = try macho_file.alignPow(header.@"align"); if (!header.isZerofill()) { if (needed_size > macho_file.allocatedSize(header.offset)) { - header.offset = math.cast(u32, try macho_file.findFreeSpace(needed_size, alignment)) orelse - return error.Overflow; + header.offset = try macho_file.cast(u32, try macho_file.findFreeSpace(needed_size, alignment)); } } if (needed_size > macho_file.allocatedSizeVirtual(header.addr)) { @@ -572,7 +584,7 @@ fn sortRelocs(macho_file: *MachO) void { } } -fn writeSections(macho_file: *MachO) !void { +fn writeSections(macho_file: *MachO) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -583,7 +595,7 @@ fn writeSections(macho_file: *MachO) !void { for (slice.items(.header), slice.items(.out), slice.items(.relocs), 0..) |header, *out, *relocs, n_sect| { if (header.isZerofill()) continue; if (!macho_file.isZigSection(@intCast(n_sect))) { // TODO this is wrong; what about debug sections? - const size = math.cast(usize, header.size) orelse return error.Overflow; + const size = try macho_file.cast(usize, header.size); try out.resize(gpa, size); const padding_byte: u8 = if (header.isCode() and cpu_arch == .x86_64) 0xcc else 0; @memset(out.items, padding_byte); @@ -662,16 +674,16 @@ fn writeSectionsToFile(macho_file: *MachO) !void { const slice = macho_file.sections.slice(); for (slice.items(.header), slice.items(.out), slice.items(.relocs)) |header, out, relocs| { - try macho_file.base.file.?.pwriteAll(out.items, header.offset); - try macho_file.base.file.?.pwriteAll(mem.sliceAsBytes(relocs.items), header.reloff); + try macho_file.pwriteAll(out.items, header.offset); + try macho_file.pwriteAll(mem.sliceAsBytes(relocs.items), header.reloff); } try macho_file.writeDataInCode(); - try macho_file.base.file.?.pwriteAll(mem.sliceAsBytes(macho_file.symtab.items), macho_file.symtab_cmd.symoff); - try macho_file.base.file.?.pwriteAll(macho_file.strtab.items, macho_file.symtab_cmd.stroff); + try macho_file.pwriteAll(mem.sliceAsBytes(macho_file.symtab.items), macho_file.symtab_cmd.symoff); + try macho_file.pwriteAll(macho_file.strtab.items, macho_file.symtab_cmd.stroff); } -fn writeLoadCommands(macho_file: *MachO) !struct { usize, usize } { +fn writeLoadCommands(macho_file: *MachO) error{ LinkFailure, OutOfMemory }!struct { usize, usize } { const gpa = macho_file.base.comp.gpa; const needed_size = load_commands.calcLoadCommandsSizeObject(macho_file); const buffer = try gpa.alloc(u8, needed_size); @@ -686,31 +698,45 @@ fn writeLoadCommands(macho_file: *MachO) !struct { usize, usize } { { assert(macho_file.segments.items.len == 1); const seg = macho_file.segments.items[0]; - try writer.writeStruct(seg); + writer.writeStruct(seg) catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + }; for (macho_file.sections.items(.header)) |header| { - try writer.writeStruct(header); + writer.writeStruct(header) catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + }; } ncmds += 1; } - try writer.writeStruct(macho_file.data_in_code_cmd); + writer.writeStruct(macho_file.data_in_code_cmd) catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + }; ncmds += 1; - try writer.writeStruct(macho_file.symtab_cmd); + writer.writeStruct(macho_file.symtab_cmd) catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + }; ncmds += 1; - try writer.writeStruct(macho_file.dysymtab_cmd); + writer.writeStruct(macho_file.dysymtab_cmd) catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + }; ncmds += 1; if (macho_file.platform.isBuildVersionCompatible()) { - try load_commands.writeBuildVersionLC(macho_file.platform, macho_file.sdk_version, writer); + load_commands.writeBuildVersionLC(macho_file.platform, macho_file.sdk_version, writer) catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + }; ncmds += 1; } else { - try load_commands.writeVersionMinLC(macho_file.platform, macho_file.sdk_version, writer); + load_commands.writeVersionMinLC(macho_file.platform, macho_file.sdk_version, writer) catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + }; ncmds += 1; } assert(stream.pos == needed_size); - try macho_file.base.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64)); + try macho_file.pwriteAll(buffer, @sizeOf(macho.mach_header_64)); return .{ ncmds, buffer.len }; } @@ -742,7 +768,7 @@ fn writeHeader(macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void { header.ncmds = @intCast(ncmds); header.sizeofcmds = @intCast(sizeofcmds); - try macho_file.base.file.?.pwriteAll(mem.asBytes(&header), 0); + try macho_file.pwriteAll(mem.asBytes(&header), 0); } const std = @import("std"); diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index 199b13a6c6d8..ab82bb9de8bb 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -82,11 +82,17 @@ pub fn deinit(self: *NvPtx) void { self.llvm_object.deinit(); } -pub fn updateFunc(self: *NvPtx, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc( + self: *NvPtx, + pt: Zcu.PerThread, + func_index: InternPool.Index, + air: Air, + liveness: Liveness, +) link.File.UpdateNavError!void { try self.llvm_object.updateFunc(pt, func_index, air, liveness); } -pub fn updateNav(self: *NvPtx, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void { +pub fn updateNav(self: *NvPtx, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void { return self.llvm_object.updateNav(pt, nav); } @@ -94,7 +100,7 @@ pub fn updateExports( self: *NvPtx, pt: Zcu.PerThread, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) !void { if (build_options.skip_non_native and builtin.object_format != .nvptx) @panic("Attempted to compile for object format that was disabled by build configuration"); @@ -102,10 +108,6 @@ pub fn updateExports( return self.llvm_object.updateExports(pt, exported, export_indices); } -pub fn freeDecl(self: *NvPtx, decl_index: InternPool.DeclIndex) void { - return self.llvm_object.freeDecl(decl_index); -} - pub fn flush(self: *NvPtx, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { return self.flushModule(arena, tid, prog_node); } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 31aac2486ef7..15c89693ff7c 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -60,7 +60,7 @@ fn_nav_table: std.AutoArrayHashMapUnmanaged( data_nav_table: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []u8) = .empty, /// When `updateExports` is called, we store the export indices here, to be used /// during flush. -nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []u32) = .empty, +nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []Zcu.Export.Index) = .empty, lazy_syms: LazySymbolTable = .{}, @@ -345,6 +345,7 @@ fn putFn(self: *Plan9, nav_index: InternPool.Nav.Index, out: FnNavOutput) !void try a.writer().writeInt(u16, 1, .big); // getting the full file path + // TODO don't call getcwd here, that is inappropriate var buf: [std.fs.max_path_bytes]u8 = undefined; const full_path = try std.fs.path.join(arena, &.{ file.mod.root.root_dir.path orelse try std.posix.getcwd(&buf), @@ -385,7 +386,13 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi } } -pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc( + self: *Plan9, + pt: Zcu.PerThread, + func_index: InternPool.Index, + air: Air, + liveness: Liveness, +) link.File.UpdateNavError!void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -397,8 +404,8 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, const atom_idx = try self.seeNav(pt, func.owner_nav); - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); var dbg_info_output: DebugInfoOutput = .{ .dbg_line = std.ArrayList(u8).init(gpa), .start_line = null, @@ -409,7 +416,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, }; defer dbg_info_output.dbg_line.deinit(); - const res = try codegen.generateFunction( + try codegen.generateFunction( &self.base, pt, zcu.navSrcLoc(func.owner_nav), @@ -419,10 +426,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, &code_buffer, .{ .plan9 = &dbg_info_output }, ); - const code = switch (res) { - .ok => try code_buffer.toOwnedSlice(), - .fail => |em| return zcu.failed_codegen.put(gpa, func.owner_nav, em), - }; + const code = try code_buffer.toOwnedSlice(gpa); self.getAtomPtr(atom_idx).code = .{ .code_ptr = null, .other = .{ .nav_index = func.owner_nav }, @@ -433,11 +437,13 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, .start_line = dbg_info_output.start_line.?, .end_line = dbg_info_output.end_line, }; - try self.putFn(func.owner_nav, out); + // The awkward error handling here is due to putFn calling `std.posix.getcwd` which it should not do. + self.putFn(func.owner_nav, out) catch |err| + return zcu.codegenFail(func.owner_nav, "failed to put fn: {s}", .{@errorName(err)}); return self.updateFinish(pt, func.owner_nav); } -pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void { +pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) link.File.UpdateNavError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; @@ -456,10 +462,10 @@ pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde if (nav_init.typeOf(zcu).hasRuntimeBits(zcu)) { const atom_idx = try self.seeNav(pt, nav_index); - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); // TODO we need the symbol index for symbol in the table of locals for the containing atom - const res = try codegen.generateSymbol( + try codegen.generateSymbol( &self.base, pt, zcu.navSrcLoc(nav_index), @@ -467,10 +473,7 @@ pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde &code_buffer, .{ .atom_index = @intCast(atom_idx) }, ); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| return zcu.failed_codegen.put(gpa, nav_index, em), - }; + const code = code_buffer.items; try self.data_nav_table.ensureUnusedCapacity(gpa, 1); const duped_code = try gpa.dupe(u8, code); self.getAtomPtr(self.navs.get(nav_index).?.index).code = .{ .code_ptr = null, .other = .{ .nav_index = nav_index } }; @@ -529,16 +532,21 @@ fn allocateGotIndex(self: *Plan9) usize { } } -pub fn flush(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flush( + self: *Plan9, + arena: Allocator, + tid: Zcu.PerThread.Id, + prog_node: std.Progress.Node, +) link.File.FlushError!void { const comp = self.base.comp; + const diags = &comp.link_diags; const use_lld = build_options.have_llvm and comp.config.use_lld; assert(!use_lld); switch (link.File.effectiveOutputMode(use_lld, comp.config.output_mode)) { .Exe => {}, - // plan9 object files are totally different - .Obj => return error.TODOImplementPlan9Objs, - .Lib => return error.TODOImplementWritingLibFiles, + .Obj => return diags.fail("writing plan9 object files unimplemented", .{}), + .Lib => return diags.fail("writing plan9 lib files unimplemented", .{}), } return self.flushModule(arena, tid, prog_node); } @@ -583,7 +591,13 @@ fn atomCount(self: *Plan9) usize { return data_nav_count + fn_nav_count + lazy_atom_count + extern_atom_count + uav_atom_count; } -pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flushModule( + self: *Plan9, + arena: Allocator, + /// TODO: stop using this + tid: Zcu.PerThread.Id, + prog_node: std.Progress.Node, +) link.File.FlushError!void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -594,6 +608,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n _ = arena; // Has the same lifetime as the call to Compilation.update. const comp = self.base.comp; + const diags = &comp.link_diags; const gpa = comp.gpa; const target = comp.root_mod.resolved_target.result; @@ -605,7 +620,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n defer assert(self.hdr.entry != 0x0); const pt: Zcu.PerThread = .activate( - self.base.comp.zcu orelse return error.LinkingWithoutZigSourceUnimplemented, + self.base.comp.zcu orelse return diags.fail("linking without zig source unimplemented", .{}), tid, ); defer pt.deactivate(); @@ -614,22 +629,16 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n if (self.lazy_syms.getPtr(.none)) |metadata| { // Most lazy symbols can be updated on first use, but // anyerror needs to wait for everything to be flushed. - if (metadata.text_state != .unused) self.updateLazySymbolAtom( + if (metadata.text_state != .unused) try self.updateLazySymbolAtom( pt, .{ .kind = .code, .ty = .anyerror_type }, metadata.text_atom, - ) catch |err| return switch (err) { - error.CodegenFail => error.FlushFailure, - else => |e| e, - }; - if (metadata.rodata_state != .unused) self.updateLazySymbolAtom( + ); + if (metadata.rodata_state != .unused) try self.updateLazySymbolAtom( pt, .{ .kind = .const_data, .ty = .anyerror_type }, metadata.rodata_atom, - ) catch |err| return switch (err) { - error.CodegenFail => error.FlushFailure, - else => |e| e, - }; + ); } for (self.lazy_syms.values()) |*metadata| { if (metadata.text_state != .unused) metadata.text_state = .flushed; @@ -902,30 +911,29 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n } } } - // write it all! - try file.pwritevAll(iovecs, 0); + file.pwritevAll(iovecs, 0) catch |err| return diags.fail("failed to write file: {s}", .{@errorName(err)}); } fn addNavExports( self: *Plan9, - mod: *Zcu, + zcu: *Zcu, nav_index: InternPool.Nav.Index, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) !void { const gpa = self.base.comp.gpa; const metadata = self.navs.getPtr(nav_index).?; const atom = self.getAtom(metadata.index); for (export_indices) |export_idx| { - const exp = mod.all_exports.items[export_idx]; - const exp_name = exp.opts.name.toSlice(&mod.intern_pool); + const exp = export_idx.ptr(zcu); + const exp_name = exp.opts.name.toSlice(&zcu.intern_pool); // plan9 does not support custom sections if (exp.opts.section.unwrap()) |section_name| { - if (!section_name.eqlSlice(".text", &mod.intern_pool) and - !section_name.eqlSlice(".data", &mod.intern_pool)) + if (!section_name.eqlSlice(".text", &zcu.intern_pool) and + !section_name.eqlSlice(".data", &zcu.intern_pool)) { - try mod.failed_exports.put(mod.gpa, export_idx, try Zcu.ErrorMsg.create( + try zcu.failed_exports.put(zcu.gpa, export_idx, try Zcu.ErrorMsg.create( gpa, - mod.navSrcLoc(nav_index), + zcu.navSrcLoc(nav_index), "plan9 does not support extra sections", .{}, )); @@ -947,50 +955,6 @@ fn addNavExports( } } -pub fn freeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) void { - const gpa = self.base.comp.gpa; - // TODO audit the lifetimes of decls table entries. It's possible to get - // freeDecl without any updateDecl in between. - const zcu = self.base.comp.zcu.?; - const decl = zcu.declPtr(decl_index); - const is_fn = decl.val.isFuncBody(zcu); - if (is_fn) { - const symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(zcu)).?; - var submap = symidx_and_submap.functions; - if (submap.fetchSwapRemove(decl_index)) |removed_entry| { - gpa.free(removed_entry.value.code); - gpa.free(removed_entry.value.lineinfo); - } - if (submap.count() == 0) { - self.syms.items[symidx_and_submap.sym_index] = aout.Sym.undefined_symbol; - self.syms_index_free_list.append(gpa, symidx_and_submap.sym_index) catch {}; - submap.deinit(gpa); - } - } else { - if (self.data_decl_table.fetchSwapRemove(decl_index)) |removed_entry| { - gpa.free(removed_entry.value); - } - } - if (self.decls.fetchRemove(decl_index)) |const_kv| { - var kv = const_kv; - const atom = self.getAtom(kv.value.index); - if (atom.got_index) |i| { - // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length - self.got_index_free_list.append(gpa, i) catch {}; - } - if (atom.sym_index) |i| { - self.syms_index_free_list.append(gpa, i) catch {}; - self.syms.items[i] = aout.Sym.undefined_symbol; - } - kv.value.exports.deinit(gpa); - } - { - const atom_index = self.decls.get(decl_index).?.index; - const relocs = self.relocs.getPtr(atom_index) orelse return; - relocs.clearAndFree(gpa); - assert(self.relocs.remove(atom_index)); - } -} fn createAtom(self: *Plan9) !Atom.Index { const gpa = self.base.comp.gpa; const index = @as(Atom.Index, @intCast(self.atoms.items.len)); @@ -1043,7 +1007,7 @@ pub fn updateExports( self: *Plan9, pt: Zcu.PerThread, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) !void { const gpa = self.base.comp.gpa; switch (exported) { @@ -1054,7 +1018,7 @@ pub fn updateExports( gpa.free(kv.value); } try self.nav_exports.ensureUnusedCapacity(gpa, 1); - const duped_indices = try gpa.dupe(u32, export_indices); + const duped_indices = try gpa.dupe(Zcu.Export.Index, export_indices); self.nav_exports.putAssumeCapacityNoClobber(nav, duped_indices); }, } @@ -1085,12 +1049,19 @@ pub fn getOrCreateAtomForLazySymbol(self: *Plan9, pt: Zcu.PerThread, lazy_sym: F return atom; } -fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, atom_index: Atom.Index) !void { +fn updateLazySymbolAtom( + self: *Plan9, + pt: Zcu.PerThread, + sym: File.LazySymbol, + atom_index: Atom.Index, +) error{ LinkFailure, OutOfMemory }!void { const gpa = pt.zcu.gpa; + const comp = self.base.comp; + const diags = &comp.link_diags; var required_alignment: InternPool.Alignment = .none; - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); // create the symbol for the name const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ @@ -1107,7 +1078,7 @@ fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, a // generate the code const src = Type.fromInterned(sym.ty).srcLocOrNull(pt.zcu) orelse Zcu.LazySrcLoc.unneeded; - const res = try codegen.generateLazySymbol( + codegen.generateLazySymbol( &self.base, pt, src, @@ -1116,14 +1087,12 @@ fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, a &code_buffer, .none, .{ .atom_index = @intCast(atom_index) }, - ); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| { - log.err("{s}", .{em.msg}); - return error.CodegenFail; - }, + ) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.CodegenFail => return error.LinkFailure, + error.Overflow => return diags.fail("codegen failure: encountered number too big for compiler", .{}), }; + const code = code_buffer.items; // duped_code is freed when the atom is freed const duped_code = try gpa.dupe(u8, code); errdefer gpa.free(duped_code); @@ -1283,7 +1252,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { try self.writeSym(writer, sym); if (self.nav_exports.get(nav_index)) |export_indices| { for (export_indices) |export_idx| { - const exp = zcu.all_exports.items[export_idx]; + const exp = export_idx.ptr(zcu); if (nav_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| { try self.writeSym(writer, self.syms.items[exp_i]); } @@ -1322,7 +1291,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { try self.writeSym(writer, sym); if (self.nav_exports.get(nav_index)) |export_indices| { for (export_indices) |export_idx| { - const exp = zcu.all_exports.items[export_idx]; + const exp = export_idx.ptr(zcu); if (nav_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| { const s = self.syms.items[exp_i]; if (mem.eql(u8, s.name, "_start")) @@ -1432,19 +1401,16 @@ pub fn lowerUav( const got_index = self.allocateGotIndex(); gop.value_ptr.* = index; // we need to free name latex - var code_buffer = std.ArrayList(u8).init(gpa); - const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ .atom_index = index }); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| return .{ .fail = em }, - }; + var code_buffer: std.ArrayListUnmanaged(u8) = .empty; + defer code_buffer.deinit(gpa); + try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ .atom_index = index }); const atom_ptr = self.getAtomPtr(index); atom_ptr.* = .{ .type = .d, .offset = undefined, .sym_index = null, .got_index = got_index, - .code = Atom.CodePtr.fromSlice(code), + .code = Atom.CodePtr.fromSlice(try code_buffer.toOwnedSlice(gpa)), }; _ = try atom_ptr.getOrCreateSymbolTableEntry(self); self.syms.items[atom_ptr.sym_index.?] = .{ diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index b1b8945963ca..a5b9615c5e6c 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -122,7 +122,13 @@ pub fn deinit(self: *SpirV) void { self.object.deinit(); } -pub fn updateFunc(self: *SpirV, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc( + self: *SpirV, + pt: Zcu.PerThread, + func_index: InternPool.Index, + air: Air, + liveness: Liveness, +) link.File.UpdateNavError!void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } @@ -134,7 +140,7 @@ pub fn updateFunc(self: *SpirV, pt: Zcu.PerThread, func_index: InternPool.Index, try self.object.updateFunc(pt, func_index, air, liveness); } -pub fn updateNav(self: *SpirV, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void { +pub fn updateNav(self: *SpirV, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } @@ -149,7 +155,7 @@ pub fn updateExports( self: *SpirV, pt: Zcu.PerThread, exported: Zcu.Exported, - export_indices: []const u32, + export_indices: []const Zcu.Export.Index, ) !void { const zcu = pt.zcu; const ip = &zcu.intern_pool; @@ -184,7 +190,7 @@ pub fn updateExports( }; for (export_indices) |export_idx| { - const exp = zcu.all_exports.items[export_idx]; + const exp = export_idx.ptr(zcu); try self.object.spv.declareEntryPoint( spv_decl_index, exp.opts.name.toSlice(ip), @@ -196,16 +202,21 @@ pub fn updateExports( // TODO: Export regular functions, variables, etc using Linkage attributes. } -pub fn freeDecl(self: *SpirV, decl_index: InternPool.DeclIndex) void { - _ = self; - _ = decl_index; -} - pub fn flush(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { return self.flushModule(arena, tid, prog_node); } -pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flushModule( + self: *SpirV, + arena: Allocator, + tid: Zcu.PerThread.Id, + prog_node: std.Progress.Node, +) link.File.FlushError!void { + // The goal is to never use this because it's only needed if we need to + // write to InternPool, but flushModule is too late to be writing to the + // InternPool. + _ = tid; + if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } @@ -216,12 +227,11 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n const sub_prog_node = prog_node.start("Flush Module", 0); defer sub_prog_node.end(); - const spv = &self.object.spv; - const comp = self.base.comp; + const spv = &self.object.spv; + const diags = &comp.link_diags; const gpa = comp.gpa; const target = comp.getTarget(); - _ = tid; try writeCapabilities(spv, target); try writeMemoryModel(spv, target); @@ -264,13 +274,11 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n const linked_module = self.linkModule(arena, module, sub_prog_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - else => |other| { - log.err("error while linking: {s}", .{@errorName(other)}); - return error.FlushFailure; - }, + else => |other| return diags.fail("error while linking: {s}", .{@errorName(other)}), }; - try self.base.file.?.writeAll(std.mem.sliceAsBytes(linked_module)); + self.base.file.?.writeAll(std.mem.sliceAsBytes(linked_module)) catch |err| + return diags.fail("failed to write: {s}", .{@errorName(err)}); } fn linkModule(self: *SpirV, a: Allocator, module: []Word, progress: std.Progress.Node) ![]Word { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 7bb1d8c476b1..cf1a560fb308 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1,45 +1,67 @@ +//! The overall strategy here is to load all the object file data into memory +//! as inputs are parsed. During `prelink`, as much linking as possible is +//! performed without any knowledge of functions and globals provided by the +//! Zcu. If there is no Zcu, effectively all linking is done in `prelink`. +//! +//! `updateFunc`, `updateNav`, `updateExports`, and `deleteExport` are handled +//! by merely tracking references to the relevant functions and globals. All +//! the linking logic between objects and Zcu happens in `flush`. Many +//! components of the final output are computed on-the-fly at this time rather +//! than being precomputed and stored separately. + const Wasm = @This(); -const build_options = @import("build_options"); +const Archive = @import("Wasm/Archive.zig"); +const Object = @import("Wasm/Object.zig"); +pub const Flush = @import("Wasm/Flush.zig"); const builtin = @import("builtin"); const native_endian = builtin.cpu.arch.endian(); +const build_options = @import("build_options"); + const std = @import("std"); const Allocator = std.mem.Allocator; const Cache = std.Build.Cache; const Path = Cache.Path; const assert = std.debug.assert; const fs = std.fs; -const gc_log = std.log.scoped(.gc); const leb = std.leb; const log = std.log.scoped(.link); const mem = std.mem; const Air = @import("../Air.zig"); -const Archive = @import("Wasm/Archive.zig"); +const Mir = @import("../arch/wasm/Mir.zig"); const CodeGen = @import("../arch/wasm/CodeGen.zig"); +const abi = @import("../arch/wasm/abi.zig"); const Compilation = @import("../Compilation.zig"); const Dwarf = @import("Dwarf.zig"); const InternPool = @import("../InternPool.zig"); const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; -const Object = @import("Wasm/Object.zig"); -const Symbol = @import("Wasm/Symbol.zig"); -const Type = @import("../Type.zig"); -const Value = @import("../Value.zig"); const Zcu = @import("../Zcu.zig"); -const ZigObject = @import("Wasm/ZigObject.zig"); const codegen = @import("../codegen.zig"); const dev = @import("../dev.zig"); const link = @import("../link.zig"); const lldMain = @import("../main.zig").lldMain; const trace = @import("../tracy.zig").trace; const wasi_libc = @import("../wasi_libc.zig"); +const Value = @import("../Value.zig"); base: link.File, /// Null-terminated strings, indexes have type String and string_table provides /// lookup. +/// +/// There are a couple of sites that add things here without adding +/// corresponding string_table entries. For such cases, when implementing +/// serialization/deserialization, they should be adjusted to prefix that data +/// with a null byte so that deserialization does not attempt to create +/// string_table entries for them. Alternately those sites could be moved to +/// use a different byte array for this purpose. string_bytes: std.ArrayListUnmanaged(u8), +/// Sometimes we have logic that wants to borrow string bytes to store +/// arbitrary things in there. In this case it is not allowed to intern new +/// strings during this time. This safety lock is used to detect misuses. +string_bytes_lock: std.debug.SafetyLock = .{}, /// Omitted when serializing linker state. string_table: String.Table, /// Symbol name of the entry function to export @@ -62,2525 +84,3230 @@ export_table: bool, name: []const u8, /// If this is not null, an object file is created by LLVM and linked with LLD afterwards. llvm_object: ?LlvmObject.Ptr = null, -zig_object: ?*ZigObject, /// List of relocatable files to be linked into the final binary. objects: std.ArrayListUnmanaged(Object) = .{}, + +func_types: std.AutoArrayHashMapUnmanaged(FunctionType, void) = .empty, +/// Provides a mapping of both imports and provided functions to symbol name. +/// Local functions may be unnamed. +/// Key is symbol name, however the `FunctionImport` may have an name override for the import name. +object_function_imports: std.AutoArrayHashMapUnmanaged(String, FunctionImport) = .empty, +/// All functions for all objects. +object_functions: std.ArrayListUnmanaged(ObjectFunction) = .empty, + +/// Provides a mapping of both imports and provided globals to symbol name. +/// Local globals may be unnamed. +object_global_imports: std.AutoArrayHashMapUnmanaged(String, GlobalImport) = .empty, +/// All globals for all objects. +object_globals: std.ArrayListUnmanaged(ObjectGlobal) = .empty, + +/// All table imports for all objects. +object_table_imports: std.AutoArrayHashMapUnmanaged(String, TableImport) = .empty, +/// All parsed table sections for all objects. +object_tables: std.ArrayListUnmanaged(Table) = .empty, + +/// All memory imports for all objects. +object_memory_imports: std.AutoArrayHashMapUnmanaged(String, MemoryImport) = .empty, +/// All parsed memory sections for all objects. +object_memories: std.ArrayListUnmanaged(ObjectMemory) = .empty, + +/// All relocations from all objects concatenated. `relocs_start` marks the end +/// point of object relocations and start point of Zcu relocations. +object_relocations: std.MultiArrayList(ObjectRelocation) = .empty, + +/// List of initialization functions. These must be called in order of priority +/// by the (synthetic) `__wasm_call_ctors` function. +object_init_funcs: std.ArrayListUnmanaged(InitFunc) = .empty, + +/// The data section of an object has many segments. Each segment corresponds +/// logically to an object file's .data section, or .rodata section. In +/// the case of `-fdata-sections` there will be one segment per data symbol. +object_data_segments: std.ArrayListUnmanaged(ObjectDataSegment) = .empty, +/// Each segment has many data symbols, which correspond logically to global +/// constants. +object_datas: std.ArrayListUnmanaged(ObjectData) = .empty, +object_data_imports: std.AutoArrayHashMapUnmanaged(String, ObjectDataImport) = .empty, +/// Non-synthetic section that can essentially be mem-cpy'd into place after performing relocations. +object_custom_segments: std.AutoArrayHashMapUnmanaged(ObjectSectionIndex, CustomSegment) = .empty, + +/// All comdat information for all objects. +object_comdats: std.ArrayListUnmanaged(Comdat) = .empty, +/// A table that maps the relocations to be performed where the key represents +/// the section (across all objects) that the slice of relocations applies to. +object_relocations_table: std.AutoArrayHashMapUnmanaged(ObjectSectionIndex, ObjectRelocation.Slice) = .empty, +/// Incremented across all objects in order to enable calculation of `ObjectSectionIndex` values. +object_total_sections: u32 = 0, +/// All comdat symbols from all objects concatenated. +object_comdat_symbols: std.MultiArrayList(Comdat.Symbol) = .empty, + +/// Relocations to be emitted into an object file. Remains empty when not +/// emitting an object file. +out_relocs: std.MultiArrayList(OutReloc) = .empty, +/// List of locations within `string_bytes` that must be patched with the virtual +/// memory address of a Uav during `flush`. +/// When emitting an object file, `out_relocs` is used instead. +uav_fixups: std.ArrayListUnmanaged(UavFixup) = .empty, +/// List of locations within `string_bytes` that must be patched with the virtual +/// memory address of a Nav during `flush`. +/// When emitting an object file, `out_relocs` is used instead. +/// No functions here only global variables. +nav_fixups: std.ArrayListUnmanaged(NavFixup) = .empty, +/// When a nav reference is a function pointer, this tracks the required function +/// table entry index that needs to overwrite the code in the final output. +func_table_fixups: std.ArrayListUnmanaged(FuncTableFixup) = .empty, +/// Symbols to be emitted into an object file. Remains empty when not emitting +/// an object file. +symbol_table: std.AutoArrayHashMapUnmanaged(String, void) = .empty, + /// When importing objects from the host environment, a name must be supplied. -/// LLVM uses "env" by default when none is given. This would be a good default for Zig -/// to support existing code. -/// TODO: Allow setting this through a flag? -host_name: String, -/// List of symbols generated by the linker. -synthetic_symbols: std.ArrayListUnmanaged(Symbol) = .empty, -/// Maps atoms to their segment index -atoms: std.AutoHashMapUnmanaged(Segment.Index, Atom.Index) = .empty, -/// List of all atoms. -managed_atoms: std.ArrayListUnmanaged(Atom) = .empty, - -/// The count of imported functions. This number will be appended -/// to the function indexes as their index starts at the lowest non-extern function. -imported_functions_count: u32 = 0, -/// The count of imported wasm globals. This number will be appended -/// to the global indexes when sections are merged. -imported_globals_count: u32 = 0, -/// The count of imported tables. This number will be appended -/// to the table indexes when sections are merged. -imported_tables_count: u32 = 0, -/// Map of symbol locations, represented by its `Import` -imports: std.AutoHashMapUnmanaged(SymbolLoc, Import) = .empty, -/// Represents non-synthetic section entries. -/// Used for code, data and custom sections. -segments: std.ArrayListUnmanaged(Segment) = .empty, -/// Maps a data segment key (such as .rodata) to the index into `segments`. -data_segments: std.StringArrayHashMapUnmanaged(Segment.Index) = .empty, -/// A table of `NamedSegment` which provide meta data -/// about a data symbol such as its name where the key is -/// the segment index, which can be found from `data_segments` -segment_info: std.AutoArrayHashMapUnmanaged(Segment.Index, NamedSegment) = .empty, - -// Output sections -/// Output type section -func_types: std.ArrayListUnmanaged(std.wasm.Type) = .empty, -/// Output function section where the key is the original -/// function index and the value is function. -/// This allows us to map multiple symbols to the same function. -functions: std.AutoArrayHashMapUnmanaged( - struct { - /// `none` in the case of synthetic sections. - file: OptionalObjectId, - index: u32, - }, - struct { - func: std.wasm.Func, - sym_index: Symbol.Index, - }, -) = .{}, -/// Output global section -wasm_globals: std.ArrayListUnmanaged(std.wasm.Global) = .empty, +/// LLVM uses "env" by default when none is given. +/// This value is passed to object files since wasm tooling conventions provides +/// no way to specify the module name in the symbol table. +object_host_name: OptionalString, + /// Memory section memories: std.wasm.Memory = .{ .limits = .{ .min = 0, - .max = undefined, - .flags = 0, + .max = 0, + .flags = .{ .has_max = false, .is_shared = false }, } }, -/// Output table section -tables: std.ArrayListUnmanaged(std.wasm.Table) = .empty, -/// Output export section -exports: std.ArrayListUnmanaged(Export) = .empty, -/// List of initialization functions. These must be called in order of priority -/// by the (synthetic) __wasm_call_ctors function. -init_funcs: std.ArrayListUnmanaged(InitFuncLoc) = .empty, -/// Index to a function defining the entry of the wasm file -entry: ?u32 = null, - -/// Indirect function table, used to call function pointers -/// When this is non-zero, we must emit a table entry, -/// as well as an 'elements' section. -/// -/// Note: Key is symbol location, value represents the index into the table -function_table: std.AutoHashMapUnmanaged(SymbolLoc, u32) = .empty, - -/// All archive files that are lazy loaded. -/// e.g. when an undefined symbol references a symbol from the archive. -/// None of this data is serialized to disk because it is trivially reloaded -/// from unchanged archive files on the next start of the compiler process, -/// or if those files have changed, the prelink phase needs to be restarted. -lazy_archives: std.ArrayListUnmanaged(LazyArchive) = .empty, - -/// A map of global names to their symbol location -globals: std.AutoArrayHashMapUnmanaged(String, SymbolLoc) = .empty, -/// The list of GOT symbols and their location -got_symbols: std.ArrayListUnmanaged(SymbolLoc) = .empty, -/// Maps discarded symbols and their positions to the location of the symbol -/// it was resolved to -discarded: std.AutoHashMapUnmanaged(SymbolLoc, SymbolLoc) = .empty, -/// List of all symbol locations which have been resolved by the linker and will be emit -/// into the final binary. -resolved_symbols: std.AutoArrayHashMapUnmanaged(SymbolLoc, void) = .empty, -/// Symbols that remain undefined after symbol resolution. -undefs: std.AutoArrayHashMapUnmanaged(String, SymbolLoc) = .empty, -/// Maps a symbol's location to an atom. This can be used to find meta -/// data of a symbol, such as its size, or its offset to perform a relocation. -/// Undefined (and synthetic) symbols do not have an Atom and therefore cannot be mapped. -symbol_atom: std.AutoHashMapUnmanaged(SymbolLoc, Atom.Index) = .empty, /// `--verbose-link` output. /// Initialized on creation, appended to as inputs are added, printed during `flush`. /// String data is allocated into Compilation arena. dump_argv_list: std.ArrayListUnmanaged([]const u8), -/// Represents the index into `segments` where the 'code' section lives. -code_section_index: Segment.OptionalIndex = .none, -custom_sections: CustomSections, preloaded_strings: PreloadedStrings, -/// Type reflection is used on the field names to autopopulate each field -/// during initialization. -const PreloadedStrings = struct { - __heap_base: String, - __heap_end: String, - __indirect_function_table: String, - __linear_memory: String, - __stack_pointer: String, - __tls_align: String, - __tls_base: String, - __tls_size: String, - __wasm_apply_global_tls_relocs: String, - __wasm_call_ctors: String, - __wasm_init_memory: String, - __wasm_init_memory_flag: String, - __wasm_init_tls: String, - __zig_err_name_table: String, - __zig_err_names: String, - __zig_errors_len: String, - _initialize: String, - _start: String, - memory: String, +/// This field is used when emitting an object; `navs_exe` used otherwise. +/// Does not include externs since that data lives elsewhere. +navs_obj: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ZcuDataObj) = .empty, +/// This field is unused when emitting an object; `navs_obj` used otherwise. +/// Does not include externs since that data lives elsewhere. +navs_exe: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ZcuDataExe) = .empty, +/// Tracks all InternPool values referenced by codegen. Needed for outputting +/// the data segment. This one does not track ref count because object files +/// require using max LEB encoding for these references anyway. +uavs_obj: std.AutoArrayHashMapUnmanaged(InternPool.Index, ZcuDataObj) = .empty, +/// Tracks ref count to optimize LEB encodings for UAV references. +uavs_exe: std.AutoArrayHashMapUnmanaged(InternPool.Index, ZcuDataExe) = .empty, +/// Sparse table of uavs that need to be emitted with greater alignment than +/// the default for the type. +overaligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .empty, +/// When the key is an enum type, this represents a `@tagName` function. +zcu_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Index, ZcuFunc) = .empty, +nav_exports: std.AutoArrayHashMapUnmanaged(NavExport, Zcu.Export.Index) = .empty, +uav_exports: std.AutoArrayHashMapUnmanaged(UavExport, Zcu.Export.Index) = .empty, +imports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty, + +dwarf: ?Dwarf = null, + +flush_buffer: Flush = .{}, + +/// Empty until `prelink`. There it is populated based on object files. +/// Next, it is copied into `Flush.missing_exports` just before `flush` +/// and that data is used during `flush`. +missing_exports: std.AutoArrayHashMapUnmanaged(String, void) = .empty, +entry_resolution: FunctionImport.Resolution = .unresolved, + +/// Empty when outputting an object. +function_exports: std.AutoArrayHashMapUnmanaged(String, FunctionIndex) = .empty, +hidden_function_exports: std.AutoArrayHashMapUnmanaged(String, FunctionIndex) = .empty, +global_exports: std.ArrayListUnmanaged(GlobalExport) = .empty, +/// Tracks the value at the end of prelink. +global_exports_len: u32 = 0, + +/// Ordered list of non-import functions that will appear in the final binary. +/// Empty until prelink. +functions: std.AutoArrayHashMapUnmanaged(FunctionImport.Resolution, void) = .empty, +/// Tracks the value at the end of prelink, at which point `functions` +/// contains only object file functions, and nothing from the Zcu yet. +functions_end_prelink: u32 = 0, + +function_imports_len_prelink: u32 = 0, +data_imports_len_prelink: u32 = 0, +/// At the end of prelink, this is populated with needed functions from +/// objects. +/// +/// During the Zcu phase, entries are not deleted from this table +/// because doing so would be irreversible when a `deleteExport` call is +/// handled. However, entries are added during the Zcu phase when extern +/// functions are passed to `updateNav`. +/// +/// `flush` gets a copy of this table, and then Zcu exports are applied to +/// remove elements from the table, and the remainder are either undefined +/// symbol errors, or import section entries depending on the output mode. +function_imports: std.AutoArrayHashMapUnmanaged(String, FunctionImportId) = .empty, + +/// At the end of prelink, this is populated with data symbols needed by +/// objects. +/// +/// During the Zcu phase, entries are not deleted from this table +/// because doing so would be irreversible when a `deleteExport` call is +/// handled. However, entries are added during the Zcu phase when extern +/// functions are passed to `updateNav`. +/// +/// `flush` gets a copy of this table, and then Zcu exports are applied to +/// remove elements from the table, and the remainder are either undefined +/// symbol errors, or symbol table entries depending on the output mode. +data_imports: std.AutoArrayHashMapUnmanaged(String, DataImportId) = .empty, +/// Set of data symbols that will appear in the final binary. Used to populate +/// `Flush.data_segments` before sorting. +data_segments: std.AutoArrayHashMapUnmanaged(DataSegmentId, void) = .empty, + +/// Ordered list of non-import globals that will appear in the final binary. +/// Empty until prelink. +globals: std.AutoArrayHashMapUnmanaged(GlobalImport.Resolution, void) = .empty, +/// Tracks the value at the end of prelink, at which point `globals` +/// contains only object file globals, and nothing from the Zcu yet. +globals_end_prelink: u32 = 0, +global_imports: std.AutoArrayHashMapUnmanaged(String, GlobalImportId) = .empty, + +/// Ordered list of non-import tables that will appear in the final binary. +/// Empty until prelink. +tables: std.AutoArrayHashMapUnmanaged(TableImport.Resolution, void) = .empty, +table_imports: std.AutoArrayHashMapUnmanaged(String, TableImport.Index) = .empty, + +/// All functions that have had their address taken and therefore might be +/// called via a `call_indirect` function. +zcu_indirect_function_set: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty, +object_indirect_function_import_set: std.AutoArrayHashMapUnmanaged(String, void) = .empty, +object_indirect_function_set: std.AutoArrayHashMapUnmanaged(ObjectFunctionIndex, void) = .empty, + +error_name_table_ref_count: u32 = 0, +tag_name_table_ref_count: u32 = 0, + +/// Set to true if any `GLOBAL_INDEX` relocation is encountered with +/// `SymbolFlags.tls` set to true. This is for objects only; final +/// value must be this OR'd with the same logic for zig functions +/// (set to true if any threadlocal global is used). +any_tls_relocs: bool = false, +any_passive_inits: bool = false, + +/// All MIR instructions for all Zcu functions. +mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, +/// Corresponds to `mir_instructions`. +mir_extra: std.ArrayListUnmanaged(u32) = .empty, +/// All local types for all Zcu functions. +all_zcu_locals: std.ArrayListUnmanaged(std.wasm.Valtype) = .empty, + +params_scratch: std.ArrayListUnmanaged(std.wasm.Valtype) = .empty, +returns_scratch: std.ArrayListUnmanaged(std.wasm.Valtype) = .empty, + +/// All Zcu error names in order, null-terminated, concatenated. No need to +/// serialize; trivially reconstructed. +error_name_bytes: std.ArrayListUnmanaged(u8) = .empty, +/// For each Zcu error, in order, offset into `error_name_bytes` where the name +/// is stored. No need to serialize; trivially reconstructed. +error_name_offs: std.ArrayListUnmanaged(u32) = .empty, + +tag_name_bytes: std.ArrayListUnmanaged(u8) = .empty, +tag_name_offs: std.ArrayListUnmanaged(u32) = .empty, + +pub const TagNameOff = extern struct { + off: u32, + len: u32, }; -/// Type reflection is used on the field names to autopopulate each inner `name` field. -const CustomSections = struct { - @".debug_info": CustomSection, - @".debug_pubtypes": CustomSection, - @".debug_abbrev": CustomSection, - @".debug_line": CustomSection, - @".debug_str": CustomSection, - @".debug_pubnames": CustomSection, - @".debug_loc": CustomSection, - @".debug_ranges": CustomSection, +/// Index into `Wasm.zcu_indirect_function_set`. +pub const ZcuIndirectFunctionSetIndex = enum(u32) { + _, }; -const CustomSection = struct { - name: String, - index: Segment.OptionalIndex, +pub const UavFixup = extern struct { + uavs_exe_index: UavsExeIndex, + /// Index into `string_bytes`. + offset: u32, + addend: u32, }; -/// Index into string_bytes -pub const String = enum(u32) { - _, +pub const NavFixup = extern struct { + navs_exe_index: NavsExeIndex, + /// Index into `string_bytes`. + offset: u32, + addend: u32, +}; - const Table = std.HashMapUnmanaged(String, void, TableContext, std.hash_map.default_max_load_percentage); +pub const FuncTableFixup = extern struct { + table_index: ZcuIndirectFunctionSetIndex, + /// Index into `string_bytes`. + offset: u32, +}; - const TableContext = struct { - bytes: []const u8, +/// Index into `objects`. +pub const ObjectIndex = enum(u32) { + _, - pub fn eql(_: @This(), a: String, b: String) bool { - return a == b; - } + pub fn ptr(index: ObjectIndex, wasm: *const Wasm) *Object { + return &wasm.objects.items[@intFromEnum(index)]; + } +}; - pub fn hash(ctx: @This(), key: String) u64 { - return std.hash_map.hashString(mem.sliceTo(ctx.bytes[@intFromEnum(key)..], 0)); - } - }; +/// Index into `Wasm.functions`. +pub const FunctionIndex = enum(u32) { + _, - const TableIndexAdapter = struct { - bytes: []const u8, + pub fn ptr(index: FunctionIndex, wasm: *const Wasm) *FunctionImport.Resolution { + return &wasm.functions.keys()[@intFromEnum(index)]; + } - pub fn eql(ctx: @This(), a: []const u8, b: String) bool { - return mem.eql(u8, a, mem.sliceTo(ctx.bytes[@intFromEnum(b)..], 0)); - } + pub fn fromIpNav(wasm: *const Wasm, nav_index: InternPool.Nav.Index) ?FunctionIndex { + return fromResolution(wasm, .fromIpNav(wasm, nav_index)); + } - pub fn hash(_: @This(), adapted_key: []const u8) u64 { - assert(mem.indexOfScalar(u8, adapted_key, 0) == null); - return std.hash_map.hashString(adapted_key); + pub fn fromTagNameType(wasm: *const Wasm, tag_type: InternPool.Index) ?FunctionIndex { + const zcu_func: ZcuFunc.Index = @enumFromInt(wasm.zcu_funcs.getIndex(tag_type) orelse return null); + return fromResolution(wasm, .pack(wasm, .{ .zcu_func = zcu_func })); + } + + pub fn fromSymbolName(wasm: *const Wasm, name: String) ?FunctionIndex { + if (wasm.object_function_imports.getPtr(name)) |import| { + return fromResolution(wasm, import.resolution); } - }; + if (wasm.function_exports.get(name)) |index| return index; + if (wasm.hidden_function_exports.get(name)) |index| return index; + return null; + } - pub fn toOptional(i: String) OptionalString { - const result: OptionalString = @enumFromInt(@intFromEnum(i)); - assert(result != .none); - return result; + pub fn fromResolution(wasm: *const Wasm, resolution: FunctionImport.Resolution) ?FunctionIndex { + const i = wasm.functions.getIndex(resolution) orelse return null; + return @enumFromInt(i); } }; -pub const OptionalString = enum(u32) { - none = std.math.maxInt(u32), +pub const GlobalExport = extern struct { + name: String, + global_index: GlobalIndex, +}; + +/// 0. Index into `Flush.function_imports` +/// 1. Index into `functions`. +/// +/// Note that function_imports indexes are subject to swap removals during +/// `flush`. +pub const OutputFunctionIndex = enum(u32) { _, - pub fn unwrap(i: OptionalString) ?String { - if (i == .none) return null; - return @enumFromInt(@intFromEnum(i)); + pub fn fromResolution(wasm: *const Wasm, resolution: FunctionImport.Resolution) ?OutputFunctionIndex { + return fromFunctionIndex(wasm, FunctionIndex.fromResolution(wasm, resolution) orelse return null); + } + + pub fn fromFunctionIndex(wasm: *const Wasm, index: FunctionIndex) OutputFunctionIndex { + return @enumFromInt(wasm.flush_buffer.function_imports.entries.len + @intFromEnum(index)); + } + + pub fn fromObjectFunction(wasm: *const Wasm, index: ObjectFunctionIndex) OutputFunctionIndex { + return fromResolution(wasm, .fromObjectFunction(wasm, index)).?; + } + + pub fn fromObjectFunctionHandlingWeak(wasm: *const Wasm, index: ObjectFunctionIndex) OutputFunctionIndex { + const ptr = index.ptr(wasm); + if (ptr.flags.binding == .weak) { + const name = ptr.name.unwrap().?; + const import = wasm.object_function_imports.getPtr(name).?; + assert(import.resolution != .unresolved); + return fromResolution(wasm, import.resolution).?; + } + return fromResolution(wasm, .fromObjectFunction(wasm, index)).?; + } + + pub fn fromIpIndex(wasm: *const Wasm, ip_index: InternPool.Index) OutputFunctionIndex { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + return switch (ip.indexToKey(ip_index)) { + .@"extern" => |ext| { + const name = wasm.getExistingString(ext.name.toSlice(ip)).?; + return fromSymbolName(wasm, name); + }, + else => fromResolution(wasm, .fromIpIndex(wasm, ip_index)).?, + }; + } + + pub fn fromIpNav(wasm: *const Wasm, nav_index: InternPool.Nav.Index) OutputFunctionIndex { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); + return fromIpIndex(wasm, nav.status.fully_resolved.val); + } + + pub fn fromTagNameType(wasm: *const Wasm, tag_type: InternPool.Index) OutputFunctionIndex { + return fromFunctionIndex(wasm, FunctionIndex.fromTagNameType(wasm, tag_type).?); + } + + pub fn fromSymbolName(wasm: *const Wasm, name: String) OutputFunctionIndex { + if (wasm.flush_buffer.function_imports.getIndex(name)) |i| return @enumFromInt(i); + return fromFunctionIndex(wasm, FunctionIndex.fromSymbolName(wasm, name).?); } }; -/// Index into objects array or the zig object. -pub const ObjectId = enum(u16) { - zig_object = std.math.maxInt(u16) - 1, +/// Index into `Wasm.globals`. +pub const GlobalIndex = enum(u32) { _, - pub fn toOptional(i: ObjectId) OptionalObjectId { - const result: OptionalObjectId = @enumFromInt(@intFromEnum(i)); - assert(result != .none); - return result; + /// This is only accurate when not emitting an object and there is a Zcu. + pub const stack_pointer: GlobalIndex = @enumFromInt(0); + + /// Same as `stack_pointer` but with a safety assertion. + pub fn stackPointer(wasm: *const Wasm) ObjectGlobal.Index { + const comp = wasm.base.comp; + assert(comp.config.output_mode != .Obj); + assert(comp.zcu != null); + return .stack_pointer; + } + + pub fn ptr(index: GlobalIndex, f: *const Flush) *Wasm.GlobalImport.Resolution { + return &f.globals.items[@intFromEnum(index)]; + } + + pub fn fromIpNav(wasm: *const Wasm, nav_index: InternPool.Nav.Index) ?GlobalIndex { + const i = wasm.globals.getIndex(.fromIpNav(wasm, nav_index)) orelse return null; + return @enumFromInt(i); + } + + pub fn fromObjectGlobal(wasm: *const Wasm, i: ObjectGlobalIndex) GlobalIndex { + return @enumFromInt(wasm.globals.getIndex(.fromObjectGlobal(wasm, i)).?); + } + + pub fn fromObjectGlobalHandlingWeak(wasm: *const Wasm, index: ObjectGlobalIndex) GlobalIndex { + const global = index.ptr(wasm); + return if (global.flags.binding == .weak) + fromSymbolName(wasm, global.name.unwrap().?) + else + fromObjectGlobal(wasm, index); + } + + pub fn fromSymbolName(wasm: *const Wasm, name: String) GlobalIndex { + const import = wasm.object_global_imports.getPtr(name).?; + return @enumFromInt(wasm.globals.getIndex(import.resolution).?); } }; -/// Optional index into objects array or the zig object. -pub const OptionalObjectId = enum(u16) { - zig_object = std.math.maxInt(u16) - 1, - none = std.math.maxInt(u16), +/// Index into `tables`. +pub const TableIndex = enum(u32) { _, - pub fn unwrap(i: OptionalObjectId) ?ObjectId { - if (i == .none) return null; - return @enumFromInt(@intFromEnum(i)); + pub fn ptr(index: TableIndex, f: *const Flush) *Wasm.TableImport.Resolution { + return &f.tables.items[@intFromEnum(index)]; } -}; -/// None of this data is serialized since it can be re-loaded from disk, or if -/// it has been changed, the data must be discarded. -const LazyArchive = struct { - path: Path, - file_contents: []const u8, - archive: Archive, + pub fn fromObjectTable(wasm: *const Wasm, i: ObjectTableIndex) TableIndex { + return @enumFromInt(wasm.tables.getIndex(.fromObjectTable(i)).?); + } - fn deinit(la: *LazyArchive, gpa: Allocator) void { - la.archive.deinit(gpa); - gpa.free(la.path.sub_path); - gpa.free(la.file_contents); - la.* = undefined; + pub fn fromSymbolName(wasm: *const Wasm, name: String) TableIndex { + const import = wasm.object_table_imports.getPtr(name).?; + return @enumFromInt(wasm.tables.getIndex(import.resolution).?); } }; -pub const Segment = struct { - alignment: Alignment, - size: u32, - offset: u32, - flags: u32, +/// The first N indexes correspond to input objects (`objects`) array. +/// After that, the indexes correspond to the `source_locations` array, +/// representing a location in a Zig source file that can be pinpointed +/// precisely via AST node and token. +pub const SourceLocation = enum(u32) { + /// From the Zig compilation unit but no precise source location. + zig_object_nofile = std.math.maxInt(u32) - 1, + none = std.math.maxInt(u32), + _, - const Index = enum(u32) { + /// Index into `source_locations`. + pub const Index = enum(u32) { _, - - pub fn toOptional(i: Index) OptionalIndex { - const result: OptionalIndex = @enumFromInt(@intFromEnum(i)); - assert(result != .none); - return result; - } }; - const OptionalIndex = enum(u32) { - none = std.math.maxInt(u32), - _, - - pub fn unwrap(i: OptionalIndex) ?Index { - if (i == .none) return null; - return @enumFromInt(@intFromEnum(i)); - } + pub const Unpacked = union(enum) { + none, + zig_object_nofile, + object_index: ObjectIndex, + source_location_index: Index, }; - pub const Flag = enum(u32) { - WASM_DATA_SEGMENT_IS_PASSIVE = 0x01, - WASM_DATA_SEGMENT_HAS_MEMINDEX = 0x02, - }; + pub fn pack(unpacked: Unpacked, wasm: *const Wasm) SourceLocation { + _ = wasm; + return switch (unpacked) { + .zig_object_nofile => .zig_object_nofile, + .none => .none, + .object_index => |object_index| @enumFromInt(@intFromEnum(object_index)), + .source_location_index => @panic("TODO"), + }; + } + + pub fn unpack(sl: SourceLocation, wasm: *const Wasm) Unpacked { + return switch (sl) { + .zig_object_nofile => .zig_object_nofile, + .none => .none, + _ => { + const i = @intFromEnum(sl); + if (i < wasm.objects.items.len) return .{ .object_index = @enumFromInt(i) }; + const sl_index = i - wasm.objects.items.len; + _ = sl_index; + @panic("TODO"); + }, + }; + } - pub fn isPassive(segment: Segment) bool { - return segment.flags & @intFromEnum(Flag.WASM_DATA_SEGMENT_IS_PASSIVE) != 0; + pub fn fromObject(object_index: ObjectIndex, wasm: *const Wasm) SourceLocation { + return pack(.{ .object_index = object_index }, wasm); } - /// For a given segment, determines if it needs passive initialization - fn needsPassiveInitialization(segment: Segment, import_mem: bool, name: []const u8) bool { - if (import_mem and !std.mem.eql(u8, name, ".bss")) { - return true; + pub fn addError(sl: SourceLocation, wasm: *Wasm, comptime f: []const u8, args: anytype) void { + const diags = &wasm.base.comp.link_diags; + switch (sl.unpack(wasm)) { + .none => unreachable, + .zig_object_nofile => diags.addError("zig compilation unit: " ++ f, args), + .object_index => |i| diags.addError("{}: " ++ f, .{i.ptr(wasm).path} ++ args), + .source_location_index => @panic("TODO"), } - return segment.isPassive(); } -}; -pub const SymbolLoc = struct { - /// The index of the symbol within the specified file - index: Symbol.Index, - /// The index of the object file where the symbol resides. - file: OptionalObjectId, -}; + pub fn addNote( + sl: SourceLocation, + err: *link.Diags.ErrorWithNotes, + comptime f: []const u8, + args: anytype, + ) void { + err.addNote(f, args); + const err_msg = &err.diags.msgs.items[err.index]; + err_msg.notes[err.note_slot - 1].source_location = .{ .wasm = sl }; + } -/// From a given location, returns the corresponding symbol in the wasm binary -pub fn symbolLocSymbol(wasm: *const Wasm, loc: SymbolLoc) *Symbol { - if (wasm.discarded.get(loc)) |new_loc| { - return symbolLocSymbol(wasm, new_loc); + pub fn fail(sl: SourceLocation, diags: *link.Diags, comptime format: []const u8, args: anytype) error{LinkFailure} { + return diags.failSourceLocation(.{ .wasm = sl }, format, args); } - return switch (loc.file) { - .none => &wasm.synthetic_symbols.items[@intFromEnum(loc.index)], - .zig_object => wasm.zig_object.?.symbol(loc.index), - _ => &wasm.objects.items[@intFromEnum(loc.file)].symtable[@intFromEnum(loc.index)], + + pub fn string( + sl: SourceLocation, + msg: []const u8, + bundle: *std.zig.ErrorBundle.Wip, + wasm: *const Wasm, + ) Allocator.Error!std.zig.ErrorBundle.String { + return switch (sl.unpack(wasm)) { + .none => try bundle.addString(msg), + .zig_object_nofile => try bundle.printString("zig compilation unit: {s}", .{msg}), + .object_index => |i| { + const obj = i.ptr(wasm); + return if (obj.archive_member_name.slice(wasm)) |obj_name| + try bundle.printString("{} ({s}): {s}", .{ obj.path, std.fs.path.basename(obj_name), msg }) + else + try bundle.printString("{}: {s}", .{ obj.path, msg }); + }, + .source_location_index => @panic("TODO"), + }; + } +}; + +/// The lower bits of this ABI-match the flags here: +/// https://github.com/WebAssembly/tool-conventions/blob/df8d737539eb8a8f446ba5eab9dc670c40dfb81e/Linking.md#symbol-table-subsection +/// The upper bits are used for nefarious purposes. +pub const SymbolFlags = packed struct(u32) { + binding: Binding = .strong, + /// Indicating that this is a hidden symbol. Hidden symbols are not to be + /// exported when performing the final link, but may be linked to other + /// modules. + visibility_hidden: bool = false, + padding0: u1 = 0, + /// For non-data symbols, this must match whether the symbol is an import + /// or is defined; for data symbols, determines whether a segment is + /// specified. + undefined: bool = false, + /// The symbol is intended to be exported from the wasm module to the host + /// environment. This differs from the visibility flags in that it affects + /// static linking. + exported: bool = false, + /// The symbol uses an explicit symbol name, rather than reusing the name + /// from a wasm import. This allows it to remap imports from foreign + /// WebAssembly modules into local symbols with different names. + explicit_name: bool = false, + /// The symbol is intended to be included in the linker output, regardless + /// of whether it is used by the program. Same meaning as `retain`. + no_strip: bool = false, + /// The symbol resides in thread local storage. + tls: bool = false, + /// The symbol represents an absolute address. This means its offset is + /// relative to the start of the wasm memory as opposed to being relative + /// to a data segment. + absolute: bool = false, + + // Above here matches the tooling conventions ABI. + + padding1: u13 = 0, + /// Zig-specific. Dead things are allowed to be garbage collected. + alive: bool = false, + /// Zig-specific. This symbol comes from an object that must be included in + /// the final link. + must_link: bool = false, + /// Zig-specific. + global_type: GlobalType4 = .zero, + /// Zig-specific. + limits_has_max: bool = false, + /// Zig-specific. + limits_is_shared: bool = false, + /// Zig-specific. + ref_type: RefType1 = .funcref, + + pub const Binding = enum(u2) { + strong = 0, + /// Indicating that this is a weak symbol. When linking multiple modules + /// defining the same symbol, all weak definitions are discarded if any + /// strong definitions exist; then if multiple weak definitions exist all + /// but one (unspecified) are discarded; and finally it is an error if more + /// than one definition remains. + weak = 1, + /// Indicating that this is a local symbol. Local symbols are not to be + /// exported, or linked to other modules/sections. The names of all + /// non-local symbols must be unique, but the names of local symbols + /// are not considered for uniqueness. A local function or global + /// symbol cannot reference an import. + local = 2, }; -} -/// From a given location, returns the name of the symbol. -pub fn symbolLocName(wasm: *const Wasm, loc: SymbolLoc) [:0]const u8 { - return wasm.stringSlice(wasm.symbolLocSymbol(loc).name); -} + pub fn initZigSpecific(flags: *SymbolFlags, must_link: bool, no_strip: bool) void { + flags.no_strip = no_strip; + flags.alive = false; + flags.must_link = must_link; + flags.global_type = .zero; + flags.limits_has_max = false; + flags.limits_is_shared = false; + flags.ref_type = .funcref; + } -/// From a given symbol location, returns the final location. -/// e.g. when a symbol was resolved and replaced by the symbol -/// in a different file, this will return said location. -/// If the symbol wasn't replaced by another, this will return -/// the given location itwasm. -pub fn symbolLocFinalLoc(wasm: *const Wasm, loc: SymbolLoc) SymbolLoc { - if (wasm.discarded.get(loc)) |new_loc| { - return symbolLocFinalLoc(wasm, new_loc); + pub fn isIncluded(flags: SymbolFlags, is_dynamic: bool) bool { + return flags.exported or + (is_dynamic and !flags.visibility_hidden) or + (flags.no_strip and flags.must_link); } - return loc; -} -// Contains the location of the function symbol, as well as -/// the priority itself of the initialization function. -pub const InitFuncLoc = struct { - /// object file index in the list of objects. - /// Unlike `SymbolLoc` this cannot be `null` as we never define - /// our own ctors. - file: ObjectId, - /// Symbol index within the corresponding object file. - index: Symbol.Index, - /// The priority in which the constructor must be called. - priority: u32, + pub fn isExported(flags: SymbolFlags, is_dynamic: bool) bool { + if (flags.undefined or flags.binding == .local) return false; + if (is_dynamic and !flags.visibility_hidden) return true; + return flags.exported; + } - /// From a given `InitFuncLoc` returns the corresponding function symbol - fn getSymbol(loc: InitFuncLoc, wasm: *const Wasm) *Symbol { - return wasm.symbolLocSymbol(getSymbolLoc(loc)); + /// Returns the name as how it will be output into the final object + /// file or binary. When `merge` is true, this will return the + /// short name. i.e. ".rodata". When false, it returns the entire name instead. + pub fn outputName(flags: SymbolFlags, name: []const u8, merge: bool) []const u8 { + if (flags.tls) return ".tdata"; + if (!merge) return name; + if (mem.startsWith(u8, name, ".rodata.")) return ".rodata"; + if (mem.startsWith(u8, name, ".text.")) return ".text"; + if (mem.startsWith(u8, name, ".data.")) return ".data"; + if (mem.startsWith(u8, name, ".bss.")) return ".bss"; + return name; + } + + /// Masks off the Zig-specific stuff. + pub fn toAbiInteger(flags: SymbolFlags) u32 { + var copy = flags; + copy.initZigSpecific(false, false); + return @bitCast(copy); } +}; - /// Turns the given `InitFuncLoc` into a `SymbolLoc` - fn getSymbolLoc(loc: InitFuncLoc) SymbolLoc { +pub const GlobalType4 = packed struct(u4) { + valtype: Valtype3, + mutable: bool, + + pub const zero: GlobalType4 = @bitCast(@as(u4, 0)); + + pub fn to(gt: GlobalType4) ObjectGlobal.Type { return .{ - .file = loc.file.toOptional(), - .index = loc.index, + .valtype = gt.valtype.to(), + .mutable = gt.mutable, }; } +}; - /// Returns true when `lhs` has a higher priority (e.i. value closer to 0) than `rhs`. - fn lessThan(ctx: void, lhs: InitFuncLoc, rhs: InitFuncLoc) bool { - _ = ctx; - return lhs.priority < rhs.priority; +pub const Valtype3 = enum(u3) { + i32, + i64, + f32, + f64, + v128, + + pub fn from(v: std.wasm.Valtype) Valtype3 { + return switch (v) { + .i32 => .i32, + .i64 => .i64, + .f32 => .f32, + .f64 => .f64, + .v128 => .v128, + }; + } + + pub fn to(v: Valtype3) std.wasm.Valtype { + return switch (v) { + .i32 => .i32, + .i64 => .i64, + .f32 => .f32, + .f64 => .f64, + .v128 => .v128, + }; } }; -pub fn open( - arena: Allocator, - comp: *Compilation, - emit: Path, - options: link.File.OpenOptions, -) !*Wasm { - // TODO: restore saved linker state, don't truncate the file, and - // participate in incremental compilation. - return createEmpty(arena, comp, emit, options); -} +/// Index into `Wasm.navs_obj`. +pub const NavsObjIndex = enum(u32) { + _, -pub fn createEmpty( - arena: Allocator, - comp: *Compilation, - emit: Path, - options: link.File.OpenOptions, -) !*Wasm { - const gpa = comp.gpa; - const target = comp.root_mod.resolved_target.result; - assert(target.ofmt == .wasm); + pub fn key(i: @This(), wasm: *const Wasm) *InternPool.Nav.Index { + return &wasm.navs_obj.keys()[@intFromEnum(i)]; + } - const use_lld = build_options.have_llvm and comp.config.use_lld; - const use_llvm = comp.config.use_llvm; - const output_mode = comp.config.output_mode; - const shared_memory = comp.config.shared_memory; - const wasi_exec_model = comp.config.wasi_exec_model; + pub fn value(i: @This(), wasm: *const Wasm) *ZcuDataObj { + return &wasm.navs_obj.values()[@intFromEnum(i)]; + } - // If using LLD to link, this code should produce an object file so that it - // can be passed to LLD. - // If using LLVM to generate the object file for the zig compilation unit, - // we need a place to put the object file so that it can be subsequently - // handled. - const zcu_object_sub_path = if (!use_lld and !use_llvm) - null - else - try std.fmt.allocPrint(arena, "{s}.o", .{emit.sub_path}); + pub fn name(i: @This(), wasm: *const Wasm) [:0]const u8 { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const nav = ip.getNav(i.key(wasm).*); + return nav.fqn.toSlice(ip); + } +}; - const wasm = try arena.create(Wasm); - wasm.* = .{ - .base = .{ - .tag = .wasm, - .comp = comp, - .emit = emit, - .zcu_object_sub_path = zcu_object_sub_path, - .gc_sections = options.gc_sections orelse (output_mode != .Obj), - .print_gc_sections = options.print_gc_sections, - .stack_size = options.stack_size orelse switch (target.os.tag) { - .freestanding => 1 * 1024 * 1024, // 1 MiB - else => 16 * 1024 * 1024, // 16 MiB - }, - .allow_shlib_undefined = options.allow_shlib_undefined orelse false, - .file = null, - .disable_lld_caching = options.disable_lld_caching, - .build_id = options.build_id, - }, - .name = undefined, - .string_table = .empty, - .string_bytes = .empty, - .import_table = options.import_table, - .export_table = options.export_table, - .import_symbols = options.import_symbols, - .export_symbol_names = options.export_symbol_names, - .global_base = options.global_base, - .initial_memory = options.initial_memory, - .max_memory = options.max_memory, +/// Index into `Wasm.navs_exe`. +pub const NavsExeIndex = enum(u32) { + _, - .entry_name = undefined, - .zig_object = null, - .dump_argv_list = .empty, - .host_name = undefined, - .custom_sections = undefined, - .preloaded_strings = undefined, - }; - if (use_llvm and comp.config.have_zcu) { - wasm.llvm_object = try LlvmObject.create(arena, comp); + pub fn key(i: @This(), wasm: *const Wasm) *InternPool.Nav.Index { + return &wasm.navs_exe.keys()[@intFromEnum(i)]; } - errdefer wasm.base.destroy(); - wasm.host_name = try wasm.internString("env"); - - inline for (@typeInfo(CustomSections).@"struct".fields) |field| { - @field(wasm.custom_sections, field.name) = .{ - .index = .none, - .name = try wasm.internString(field.name), - }; + pub fn value(i: @This(), wasm: *const Wasm) *ZcuDataExe { + return &wasm.navs_exe.values()[@intFromEnum(i)]; } - inline for (@typeInfo(PreloadedStrings).@"struct".fields) |field| { - @field(wasm.preloaded_strings, field.name) = try wasm.internString(field.name); + pub fn name(i: @This(), wasm: *const Wasm) [:0]const u8 { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const nav = ip.getNav(i.key(wasm).*); + return nav.fqn.toSlice(ip); } +}; - wasm.entry_name = switch (options.entry) { - .disabled => .none, - .default => if (output_mode != .Exe) .none else defaultEntrySymbolName(&wasm.preloaded_strings, wasi_exec_model).toOptional(), - .enabled => defaultEntrySymbolName(&wasm.preloaded_strings, wasi_exec_model).toOptional(), - .named => |name| (try wasm.internString(name)).toOptional(), - }; +/// Index into `Wasm.uavs_obj`. +pub const UavsObjIndex = enum(u32) { + _, - if (use_lld and (use_llvm or !comp.config.have_zcu)) { - // LLVM emits the object file (if any); LLD links it into the final product. - return wasm; + pub fn key(i: @This(), wasm: *const Wasm) *InternPool.Index { + return &wasm.uavs_obj.keys()[@intFromEnum(i)]; } - // What path should this Wasm linker code output to? - // If using LLD to link, this code should produce an object file so that it - // can be passed to LLD. - const sub_path = if (use_lld) zcu_object_sub_path.? else emit.sub_path; + pub fn value(i: @This(), wasm: *const Wasm) *ZcuDataObj { + return &wasm.uavs_obj.values()[@intFromEnum(i)]; + } +}; - wasm.base.file = try emit.root_dir.handle.createFile(sub_path, .{ - .truncate = true, - .read = true, - .mode = if (fs.has_executable_bit) - if (target.os.tag == .wasi and output_mode == .Exe) - fs.File.default_mode | 0b001_000_000 - else - fs.File.default_mode - else - 0, - }); - wasm.name = sub_path; +/// Index into `Wasm.uavs_exe`. +pub const UavsExeIndex = enum(u32) { + _, - // create stack pointer symbol - { - const loc = try wasm.createSyntheticSymbol(wasm.preloaded_strings.__stack_pointer, .global); - const symbol = wasm.symbolLocSymbol(loc); - // For object files we will import the stack pointer symbol - if (output_mode == .Obj) { - symbol.setUndefined(true); - symbol.index = @intCast(wasm.imported_globals_count); - wasm.imported_globals_count += 1; - try wasm.imports.putNoClobber(gpa, loc, .{ - .module_name = wasm.host_name, - .name = symbol.name, - .kind = .{ .global = .{ .valtype = .i32, .mutable = true } }, - }); - } else { - symbol.index = @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len); - symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); - const global = try wasm.wasm_globals.addOne(gpa); - global.* = .{ - .global_type = .{ - .valtype = .i32, - .mutable = true, - }, - .init = .{ .i32_const = 0 }, - }; - } + pub fn key(i: @This(), wasm: *const Wasm) *InternPool.Index { + return &wasm.uavs_exe.keys()[@intFromEnum(i)]; + } + + pub fn value(i: @This(), wasm: *const Wasm) *ZcuDataExe { + return &wasm.uavs_exe.values()[@intFromEnum(i)]; + } +}; + +/// Used when emitting a relocatable object. +pub const ZcuDataObj = extern struct { + code: DataPayload, + relocs: OutReloc.Slice, +}; + +/// Used when not emitting a relocatable object. +pub const ZcuDataExe = extern struct { + code: DataPayload, + /// Tracks how many references there are for the purposes of sorting data segments. + count: u32, +}; + +/// An abstraction for calling `lowerZcuData` repeatedly until all data entries +/// are populated. +const ZcuDataStarts = struct { + uavs_i: u32, + + fn init(wasm: *const Wasm) ZcuDataStarts { + const comp = wasm.base.comp; + const is_obj = comp.config.output_mode == .Obj; + return if (is_obj) initObj(wasm) else initExe(wasm); } - // create indirect function pointer symbol - { - const loc = try wasm.createSyntheticSymbol(wasm.preloaded_strings.__indirect_function_table, .table); - const symbol = wasm.symbolLocSymbol(loc); - const table: std.wasm.Table = .{ - .limits = .{ .flags = 0, .min = 0, .max = undefined }, // will be overwritten during `mapFunctionTable` - .reftype = .funcref, + fn initObj(wasm: *const Wasm) ZcuDataStarts { + return .{ + .uavs_i = @intCast(wasm.uavs_obj.entries.len), }; - if (output_mode == .Obj or options.import_table) { - symbol.setUndefined(true); - symbol.index = @intCast(wasm.imported_tables_count); - wasm.imported_tables_count += 1; - try wasm.imports.put(gpa, loc, .{ - .module_name = wasm.host_name, - .name = symbol.name, - .kind = .{ .table = table }, - }); - } else { - symbol.index = @as(u32, @intCast(wasm.imported_tables_count + wasm.tables.items.len)); - try wasm.tables.append(gpa, table); - if (wasm.export_table) { - symbol.setFlag(.WASM_SYM_EXPORTED); - } else { - symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); - } - } } - // create __wasm_call_ctors - { - const loc = try wasm.createSyntheticSymbol(wasm.preloaded_strings.__wasm_call_ctors, .function); - const symbol = wasm.symbolLocSymbol(loc); - symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); - // we do not know the function index until after we merged all sections. - // Therefore we set `symbol.index` and create its corresponding references - // at the end during `initializeCallCtorsFunction`. + fn initExe(wasm: *const Wasm) ZcuDataStarts { + return .{ + .uavs_i = @intCast(wasm.uavs_exe.entries.len), + }; } - // shared-memory symbols for TLS support - if (shared_memory) { - { - const loc = try wasm.createSyntheticSymbol(wasm.preloaded_strings.__tls_base, .global); - const symbol = wasm.symbolLocSymbol(loc); - symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); - symbol.index = @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len); - symbol.mark(); - try wasm.wasm_globals.append(gpa, .{ - .global_type = .{ .valtype = .i32, .mutable = true }, - .init = .{ .i32_const = undefined }, - }); - } - { - const loc = try wasm.createSyntheticSymbol(wasm.preloaded_strings.__tls_size, .global); - const symbol = wasm.symbolLocSymbol(loc); - symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); - symbol.index = @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len); - symbol.mark(); - try wasm.wasm_globals.append(gpa, .{ - .global_type = .{ .valtype = .i32, .mutable = false }, - .init = .{ .i32_const = undefined }, - }); - } - { - const loc = try wasm.createSyntheticSymbol(wasm.preloaded_strings.__tls_align, .global); - const symbol = wasm.symbolLocSymbol(loc); - symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); - symbol.index = @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len); - symbol.mark(); - try wasm.wasm_globals.append(gpa, .{ - .global_type = .{ .valtype = .i32, .mutable = false }, - .init = .{ .i32_const = undefined }, - }); - } - { - const loc = try wasm.createSyntheticSymbol(wasm.preloaded_strings.__wasm_init_tls, .function); - const symbol = wasm.symbolLocSymbol(loc); - symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); + fn finish(zds: ZcuDataStarts, wasm: *Wasm, pt: Zcu.PerThread) !void { + const comp = wasm.base.comp; + const is_obj = comp.config.output_mode == .Obj; + return if (is_obj) finishObj(zds, wasm, pt) else finishExe(zds, wasm, pt); + } + + fn finishObj(zds: ZcuDataStarts, wasm: *Wasm, pt: Zcu.PerThread) !void { + var uavs_i = zds.uavs_i; + while (uavs_i < wasm.uavs_obj.entries.len) : (uavs_i += 1) { + // Call to `lowerZcuData` here possibly creates more entries in these tables. + wasm.uavs_obj.values()[uavs_i] = try lowerZcuData(wasm, pt, wasm.uavs_obj.keys()[uavs_i]); } } - if (comp.zcu) |zcu| { - if (!use_llvm) { - const zig_object = try arena.create(ZigObject); - wasm.zig_object = zig_object; - zig_object.* = .{ - .path = .{ - .root_dir = std.Build.Cache.Directory.cwd(), - .sub_path = try std.fmt.allocPrint(gpa, "{s}.o", .{fs.path.stem(zcu.main_mod.root_src_path)}), - }, - .stack_pointer_sym = .null, - }; - try zig_object.init(wasm); + fn finishExe(zds: ZcuDataStarts, wasm: *Wasm, pt: Zcu.PerThread) !void { + var uavs_i = zds.uavs_i; + while (uavs_i < wasm.uavs_exe.entries.len) : (uavs_i += 1) { + // Call to `lowerZcuData` here possibly creates more entries in these tables. + const zcu_data = try lowerZcuData(wasm, pt, wasm.uavs_exe.keys()[uavs_i]); + wasm.uavs_exe.values()[uavs_i].code = zcu_data.code; } } +}; - return wasm; -} +pub const ZcuFunc = union { + function: CodeGen.Function, + tag_name: TagName, -pub fn getTypeIndex(wasm: *const Wasm, func_type: std.wasm.Type) ?u32 { - var index: u32 = 0; - while (index < wasm.func_types.items.len) : (index += 1) { - if (wasm.func_types.items[index].eql(func_type)) return index; - } - return null; -} + pub const TagName = extern struct { + symbol_name: String, + type_index: FunctionType.Index, + /// Index into `Wasm.tag_name_offs`. + table_index: u32, + }; -/// Either creates a new import, or updates one if existing. -/// When `type_index` is non-null, we assume an external function. -/// In all other cases, a data-symbol will be created instead. -pub fn addOrUpdateImport( - wasm: *Wasm, - /// Name of the import - name: []const u8, - /// Symbol index that is external - symbol_index: Symbol.Index, - /// Optional library name (i.e. `extern "c" fn foo() void` - lib_name: ?[:0]const u8, - /// The index of the type that represents the function signature - /// when the extern is a function. When this is null, a data-symbol - /// is asserted instead. - type_index: ?u32, -) !void { - return wasm.zig_object.?.addOrUpdateImport(wasm, name, symbol_index, lib_name, type_index); -} + /// Index into `Wasm.zcu_funcs`. + /// Note that swapRemove is sometimes performed on `zcu_funcs`. + pub const Index = enum(u32) { + _, -/// For a given name, creates a new global synthetic symbol. -/// Leaves index undefined and the default flags (0). -fn createSyntheticSymbol(wasm: *Wasm, name: String, tag: Symbol.Tag) !SymbolLoc { - return wasm.createSyntheticSymbolOffset(name, tag); -} + pub fn key(i: @This(), wasm: *const Wasm) *InternPool.Index { + return &wasm.zcu_funcs.keys()[@intFromEnum(i)]; + } -fn createSyntheticSymbolOffset(wasm: *Wasm, name_offset: String, tag: Symbol.Tag) !SymbolLoc { - const sym_index: Symbol.Index = @enumFromInt(wasm.synthetic_symbols.items.len); - const loc: SymbolLoc = .{ .index = sym_index, .file = .none }; - const gpa = wasm.base.comp.gpa; - try wasm.synthetic_symbols.append(gpa, .{ - .name = name_offset, - .flags = 0, - .tag = tag, - .index = undefined, - .virtual_address = undefined, - }); - try wasm.resolved_symbols.putNoClobber(gpa, loc, {}); - try wasm.globals.put(gpa, name_offset, loc); - return loc; -} + pub fn value(i: @This(), wasm: *const Wasm) *ZcuFunc { + return &wasm.zcu_funcs.values()[@intFromEnum(i)]; + } -fn openParseObjectReportingFailure(wasm: *Wasm, path: Path) void { - const diags = &wasm.base.comp.link_diags; - const obj = link.openObject(path, false, false) catch |err| { - switch (diags.failParse(path, "failed to open object: {s}", .{@errorName(err)})) { - error.LinkFailure => return, + pub fn name(i: @This(), wasm: *const Wasm) [:0]const u8 { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const ip_index = i.key(wasm).*; + switch (ip.indexToKey(ip_index)) { + .func => |func| { + const nav = ip.getNav(func.owner_nav); + return nav.fqn.toSlice(ip); + }, + .enum_type => { + return i.value(wasm).tag_name.symbol_name.slice(wasm); + }, + else => unreachable, + } } - }; - wasm.parseObject(obj) catch |err| { - switch (diags.failParse(path, "failed to parse object: {s}", .{@errorName(err)})) { - error.LinkFailure => return, + + pub fn typeIndex(i: @This(), wasm: *Wasm) FunctionType.Index { + const comp = wasm.base.comp; + const zcu = comp.zcu.?; + const target = &comp.root_mod.resolved_target.result; + const ip = &zcu.intern_pool; + switch (ip.indexToKey(i.key(wasm).*)) { + .func => |func| { + const fn_ty = zcu.navValue(func.owner_nav).typeOf(zcu); + const fn_info = zcu.typeToFunc(fn_ty).?; + return wasm.getExistingFunctionType(fn_info.cc, fn_info.param_types.get(ip), .fromInterned(fn_info.return_type), target).?; + }, + .enum_type => { + return i.value(wasm).tag_name.type_index; + }, + else => unreachable, + } } }; -} - -fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void { - defer obj.file.close(); - const gpa = wasm.base.comp.gpa; - try wasm.objects.ensureUnusedCapacity(gpa, 1); - const stat = try obj.file.stat(); - const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig; +}; - const file_contents = try gpa.alloc(u8, size); - defer gpa.free(file_contents); +pub const NavExport = extern struct { + name: String, + nav_index: InternPool.Nav.Index, +}; - const n = try obj.file.preadAll(file_contents, 0); - if (n != file_contents.len) return error.UnexpectedEndOfFile; +pub const UavExport = extern struct { + name: String, + uav_index: InternPool.Index, +}; - wasm.objects.appendAssumeCapacity(try Object.create(wasm, file_contents, obj.path, null)); -} +pub const FunctionImport = extern struct { + flags: SymbolFlags, + module_name: OptionalString, + /// May be different than the key which is a symbol name. + name: String, + source_location: SourceLocation, + resolution: Resolution, + type: FunctionType.Index, + + /// Represents a synthetic function, a function from an object, or a + /// function from the Zcu. + pub const Resolution = enum(u32) { + unresolved, + __wasm_apply_global_tls_relocs, + __wasm_call_ctors, + __wasm_init_memory, + __wasm_init_tls, + // Next, index into `object_functions`. + // Next, index into `zcu_funcs`. + _, -/// Creates a new empty `Atom` and returns its `Atom.Index` -pub fn createAtom(wasm: *Wasm, sym_index: Symbol.Index, object_index: OptionalObjectId) !Atom.Index { - const gpa = wasm.base.comp.gpa; - const index: Atom.Index = @enumFromInt(wasm.managed_atoms.items.len); - const atom = try wasm.managed_atoms.addOne(gpa); - atom.* = .{ - .file = object_index, - .sym_index = sym_index, - }; - try wasm.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), index); + const first_object_function = @intFromEnum(Resolution.__wasm_init_tls) + 1; - return index; -} + pub const Unpacked = union(enum) { + unresolved, + __wasm_apply_global_tls_relocs, + __wasm_call_ctors, + __wasm_init_memory, + __wasm_init_tls, + object_function: ObjectFunctionIndex, + zcu_func: ZcuFunc.Index, + }; -pub fn getAtom(wasm: *const Wasm, index: Atom.Index) Atom { - return wasm.managed_atoms.items[@intFromEnum(index)]; -} + pub fn unpack(r: Resolution, wasm: *const Wasm) Unpacked { + return switch (r) { + .unresolved => .unresolved, + .__wasm_apply_global_tls_relocs => .__wasm_apply_global_tls_relocs, + .__wasm_call_ctors => .__wasm_call_ctors, + .__wasm_init_memory => .__wasm_init_memory, + .__wasm_init_tls => .__wasm_init_tls, + _ => { + const object_function_index = @intFromEnum(r) - first_object_function; -pub fn getAtomPtr(wasm: *Wasm, index: Atom.Index) *Atom { - return &wasm.managed_atoms.items[@intFromEnum(index)]; -} + const zcu_func_index = if (object_function_index < wasm.object_functions.items.len) + return .{ .object_function = @enumFromInt(object_function_index) } + else + object_function_index - wasm.object_functions.items.len; -fn parseArchive(wasm: *Wasm, obj: link.Input.Object) !void { - const gpa = wasm.base.comp.gpa; + return .{ .zcu_func = @enumFromInt(zcu_func_index) }; + }, + }; + } - defer obj.file.close(); + pub fn pack(wasm: *const Wasm, unpacked: Unpacked) Resolution { + return switch (unpacked) { + .unresolved => .unresolved, + .__wasm_apply_global_tls_relocs => .__wasm_apply_global_tls_relocs, + .__wasm_call_ctors => .__wasm_call_ctors, + .__wasm_init_memory => .__wasm_init_memory, + .__wasm_init_tls => .__wasm_init_tls, + .object_function => |i| @enumFromInt(first_object_function + @intFromEnum(i)), + .zcu_func => |i| @enumFromInt(first_object_function + wasm.object_functions.items.len + @intFromEnum(i)), + }; + } - const stat = try obj.file.stat(); - const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig; + pub fn fromIpNav(wasm: *const Wasm, nav_index: InternPool.Nav.Index) Resolution { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + return fromIpIndex(wasm, ip.getNav(nav_index).status.fully_resolved.val); + } - const file_contents = try gpa.alloc(u8, size); - var keep_file_contents = false; - defer if (!keep_file_contents) gpa.free(file_contents); + pub fn fromZcuFunc(wasm: *const Wasm, i: ZcuFunc.Index) Resolution { + return pack(wasm, .{ .zcu_func = i }); + } - const n = try obj.file.preadAll(file_contents, 0); - if (n != file_contents.len) return error.UnexpectedEndOfFile; + pub fn fromIpIndex(wasm: *const Wasm, ip_index: InternPool.Index) Resolution { + return fromZcuFunc(wasm, @enumFromInt(wasm.zcu_funcs.getIndex(ip_index).?)); + } - var archive = try Archive.parse(gpa, file_contents); + pub fn fromObjectFunction(wasm: *const Wasm, object_function: ObjectFunctionIndex) Resolution { + return pack(wasm, .{ .object_function = object_function }); + } - if (!obj.must_link) { - errdefer archive.deinit(gpa); - try wasm.lazy_archives.append(gpa, .{ - .path = .{ - .root_dir = obj.path.root_dir, - .sub_path = try gpa.dupe(u8, obj.path.sub_path), - }, - .file_contents = file_contents, - .archive = archive, - }); - keep_file_contents = true; - return; - } + pub fn isNavOrUnresolved(r: Resolution, wasm: *const Wasm) bool { + return switch (r.unpack(wasm)) { + .unresolved, .zcu_func => true, + else => false, + }; + } - defer archive.deinit(gpa); + pub fn typeIndex(r: Resolution, wasm: *Wasm) FunctionType.Index { + return switch (unpack(r, wasm)) { + .unresolved => unreachable, + .__wasm_apply_global_tls_relocs, + .__wasm_call_ctors, + .__wasm_init_memory, + => getExistingFuncType2(wasm, &.{}, &.{}), + .__wasm_init_tls => getExistingFuncType2(wasm, &.{.i32}, &.{}), + .object_function => |i| i.ptr(wasm).type_index, + .zcu_func => |i| i.typeIndex(wasm), + }; + } - // In this case we must force link all embedded object files within the archive - // We loop over all symbols, and then group them by offset as the offset - // notates where the object file starts. - var offsets = std.AutoArrayHashMap(u32, void).init(gpa); - defer offsets.deinit(); - for (archive.toc.values()) |symbol_offsets| { - for (symbol_offsets.items) |sym_offset| { - try offsets.put(sym_offset, {}); + pub fn name(r: Resolution, wasm: *const Wasm) ?[]const u8 { + return switch (unpack(r, wasm)) { + .unresolved => unreachable, + .__wasm_apply_global_tls_relocs => @tagName(Unpacked.__wasm_apply_global_tls_relocs), + .__wasm_call_ctors => @tagName(Unpacked.__wasm_call_ctors), + .__wasm_init_memory => @tagName(Unpacked.__wasm_init_memory), + .__wasm_init_tls => @tagName(Unpacked.__wasm_init_tls), + .object_function => |i| i.ptr(wasm).name.slice(wasm), + .zcu_func => |i| i.name(wasm), + }; } - } + }; - for (offsets.keys()) |file_offset| { - const object = try archive.parseObject(wasm, file_contents[file_offset..], obj.path); - try wasm.objects.append(gpa, object); - } -} + /// Index into `object_function_imports`. + pub const Index = enum(u32) { + _, -fn requiresTLSReloc(wasm: *const Wasm) bool { - for (wasm.got_symbols.items) |loc| { - if (wasm.symbolLocSymbol(loc).isTLS()) { - return true; + pub fn key(index: Index, wasm: *const Wasm) *String { + return &wasm.object_function_imports.keys()[@intFromEnum(index)]; } - } - return false; -} - -fn objectPath(wasm: *const Wasm, object_id: ObjectId) Path { - const obj = wasm.objectById(object_id) orelse return wasm.zig_object.?.path; - return obj.path; -} -fn objectSymbols(wasm: *const Wasm, object_id: ObjectId) []const Symbol { - const obj = wasm.objectById(object_id) orelse return wasm.zig_object.?.symbols.items; - return obj.symtable; -} + pub fn value(index: Index, wasm: *const Wasm) *FunctionImport { + return &wasm.object_function_imports.values()[@intFromEnum(index)]; + } -fn objectSymbol(wasm: *const Wasm, object_id: ObjectId, index: Symbol.Index) *Symbol { - const obj = wasm.objectById(object_id) orelse return &wasm.zig_object.?.symbols.items[@intFromEnum(index)]; - return &obj.symtable[@intFromEnum(index)]; -} + pub fn symbolName(index: Index, wasm: *const Wasm) String { + return index.key(wasm).*; + } -fn objectFunction(wasm: *const Wasm, object_id: ObjectId, sym_index: Symbol.Index) std.wasm.Func { - const obj = wasm.objectById(object_id) orelse { - const zo = wasm.zig_object.?; - const sym = zo.symbols.items[@intFromEnum(sym_index)]; - return zo.functions.items[sym.index]; - }; - const sym = obj.symtable[@intFromEnum(sym_index)]; - return obj.functions[sym.index - obj.imported_functions_count]; -} + pub fn importName(index: Index, wasm: *const Wasm) String { + return index.value(wasm).name; + } -fn objectImportedFunctions(wasm: *const Wasm, object_id: ObjectId) u32 { - const obj = wasm.objectById(object_id) orelse return wasm.zig_object.?.imported_functions_count; - return obj.imported_functions_count; -} + pub fn moduleName(index: Index, wasm: *const Wasm) OptionalString { + return index.value(wasm).module_name; + } -fn objectGlobals(wasm: *const Wasm, object_id: ObjectId) []const std.wasm.Global { - const obj = wasm.objectById(object_id) orelse return wasm.zig_object.?.globals.items; - return obj.globals; -} + pub fn functionType(index: Index, wasm: *const Wasm) FunctionType.Index { + return value(index, wasm).type; + } + }; +}; -fn objectFuncTypes(wasm: *const Wasm, object_id: ObjectId) []const std.wasm.Type { - const obj = wasm.objectById(object_id) orelse return wasm.zig_object.?.func_types.items; - return obj.func_types; -} +pub const ObjectFunction = extern struct { + flags: SymbolFlags, + /// `none` if this function has no symbol describing it. + name: OptionalString, + type_index: FunctionType.Index, + code: Code, + /// The offset within the code section where the data starts. + offset: u32, + /// The object file whose code section contains this function. + object_index: ObjectIndex, -fn objectSegmentInfo(wasm: *const Wasm, object_id: ObjectId) []const NamedSegment { - const obj = wasm.objectById(object_id) orelse return wasm.zig_object.?.segment_info.items; - return obj.segment_info; -} + pub const Code = DataPayload; -/// For a given symbol index, find its corresponding import. -/// Asserts import exists. -fn objectImport(wasm: *const Wasm, object_id: ObjectId, symbol_index: Symbol.Index) Import { - const obj = wasm.objectById(object_id) orelse return wasm.zig_object.?.imports.get(symbol_index).?; - return obj.findImport(obj.symtable[@intFromEnum(symbol_index)]); -} + pub fn relocations(of: *const ObjectFunction, wasm: *const Wasm) ObjectRelocation.IterableSlice { + const code_section_index = of.object_index.ptr(wasm).code_section_index.?; + const relocs = wasm.object_relocations_table.get(code_section_index) orelse return .empty; + return .init(relocs, of.offset, of.code.len, wasm); + } +}; -/// Returns the object element pointer, or null if it is the ZigObject. -fn objectById(wasm: *const Wasm, object_id: ObjectId) ?*Object { - if (object_id == .zig_object) return null; - return &wasm.objects.items[@intFromEnum(object_id)]; -} +pub const GlobalImport = extern struct { + flags: SymbolFlags, + module_name: OptionalString, + /// May be different than the key which is a symbol name. + name: String, + source_location: SourceLocation, + resolution: Resolution, + + /// Represents a synthetic global, a global from an object, or a global + /// from the Zcu. + pub const Resolution = enum(u32) { + unresolved, + __heap_base, + __heap_end, + __stack_pointer, + __tls_align, + __tls_base, + __tls_size, + // Next, index into `object_globals`. + // Next, index into `navs_obj` or `navs_exe` depending on whether emitting an object. + _, -fn resolveSymbolsInObject(wasm: *Wasm, object_id: ObjectId) !void { - const gpa = wasm.base.comp.gpa; - const diags = &wasm.base.comp.link_diags; - const obj_path = objectPath(wasm, object_id); - log.debug("Resolving symbols in object: '{'}'", .{obj_path}); - const symbols = objectSymbols(wasm, object_id); - - for (symbols, 0..) |symbol, i| { - const sym_index: Symbol.Index = @enumFromInt(i); - const location: SymbolLoc = .{ - .file = object_id.toOptional(), - .index = sym_index, + const first_object_global = @intFromEnum(Resolution.__tls_size) + 1; + + pub const Unpacked = union(enum) { + unresolved, + __heap_base, + __heap_end, + __stack_pointer, + __tls_align, + __tls_base, + __tls_size, + object_global: ObjectGlobalIndex, + nav_exe: NavsExeIndex, + nav_obj: NavsObjIndex, }; - if (symbol.name == wasm.preloaded_strings.__indirect_function_table) continue; - if (symbol.isLocal()) { - if (symbol.isUndefined()) { - diags.addParseError(obj_path, "local symbol '{s}' references import", .{ - wasm.stringSlice(symbol.name), - }); - } - try wasm.resolved_symbols.putNoClobber(gpa, location, {}); - continue; + pub fn unpack(r: Resolution, wasm: *const Wasm) Unpacked { + return switch (r) { + .unresolved => .unresolved, + .__heap_base => .__heap_base, + .__heap_end => .__heap_end, + .__stack_pointer => .__stack_pointer, + .__tls_align => .__tls_align, + .__tls_base => .__tls_base, + .__tls_size => .__tls_size, + _ => { + const i: u32 = @intFromEnum(r); + const object_global_index = i - first_object_global; + if (object_global_index < wasm.object_globals.items.len) + return .{ .object_global = @enumFromInt(object_global_index) }; + const comp = wasm.base.comp; + const is_obj = comp.config.output_mode == .Obj; + const nav_index = object_global_index - wasm.object_globals.items.len; + return if (is_obj) .{ + .nav_obj = @enumFromInt(nav_index), + } else .{ + .nav_exe = @enumFromInt(nav_index), + }; + }, + }; } - const maybe_existing = try wasm.globals.getOrPut(gpa, symbol.name); - if (!maybe_existing.found_existing) { - maybe_existing.value_ptr.* = location; - try wasm.resolved_symbols.putNoClobber(gpa, location, {}); - - if (symbol.isUndefined()) { - try wasm.undefs.putNoClobber(gpa, symbol.name, location); - } - continue; + pub fn pack(wasm: *const Wasm, unpacked: Unpacked) Resolution { + return switch (unpacked) { + .unresolved => .unresolved, + .__heap_base => .__heap_base, + .__heap_end => .__heap_end, + .__stack_pointer => .__stack_pointer, + .__tls_align => .__tls_align, + .__tls_base => .__tls_base, + .__tls_size => .__tls_size, + .object_global => |i| @enumFromInt(first_object_global + @intFromEnum(i)), + .nav_obj => |i| @enumFromInt(first_object_global + wasm.object_globals.items.len + @intFromEnum(i)), + .nav_exe => |i| @enumFromInt(first_object_global + wasm.object_globals.items.len + @intFromEnum(i)), + }; } - const existing_loc = maybe_existing.value_ptr.*; - const existing_sym: *Symbol = wasm.symbolLocSymbol(existing_loc); - const existing_file_path: Path = if (existing_loc.file.unwrap()) |id| objectPath(wasm, id) else .{ - .root_dir = std.Build.Cache.Directory.cwd(), - .sub_path = wasm.name, - }; - - if (!existing_sym.isUndefined()) outer: { - if (!symbol.isUndefined()) inner: { - if (symbol.isWeak()) { - break :inner; // ignore the new symbol (discard it) - } - if (existing_sym.isWeak()) { - break :outer; // existing is weak, while new one isn't. Replace it. - } - // both are defined and weak, we have a symbol collision. - var err = try diags.addErrorWithNotes(2); - try err.addMsg("symbol '{s}' defined multiple times", .{wasm.stringSlice(symbol.name)}); - try err.addNote("first definition in '{'}'", .{existing_file_path}); - try err.addNote("next definition in '{'}'", .{obj_path}); - } + pub fn fromIpNav(wasm: *const Wasm, ip_nav: InternPool.Nav.Index) Resolution { + const comp = wasm.base.comp; + const is_obj = comp.config.output_mode == .Obj; + return pack(wasm, if (is_obj) .{ + .nav_obj = @enumFromInt(wasm.navs_obj.getIndex(ip_nav).?), + } else .{ + .nav_exe = @enumFromInt(wasm.navs_exe.getIndex(ip_nav).?), + }); + } - try wasm.discarded.put(gpa, location, existing_loc); - continue; // Do not overwrite defined symbols with undefined symbols + pub fn fromObjectGlobal(wasm: *const Wasm, object_global: ObjectGlobalIndex) Resolution { + return pack(wasm, .{ .object_global = object_global }); } - if (symbol.tag != existing_sym.tag) { - var err = try diags.addErrorWithNotes(2); - try err.addMsg("symbol '{s}' mismatching types '{s}' and '{s}'", .{ - wasm.stringSlice(symbol.name), @tagName(symbol.tag), @tagName(existing_sym.tag), - }); - try err.addNote("first definition in '{'}'", .{existing_file_path}); - try err.addNote("next definition in '{'}'", .{obj_path}); + pub fn name(r: Resolution, wasm: *const Wasm) ?[]const u8 { + return switch (unpack(r, wasm)) { + .unresolved => unreachable, + .__heap_base => @tagName(Unpacked.__heap_base), + .__heap_end => @tagName(Unpacked.__heap_end), + .__stack_pointer => @tagName(Unpacked.__stack_pointer), + .__tls_align => @tagName(Unpacked.__tls_align), + .__tls_base => @tagName(Unpacked.__tls_base), + .__tls_size => @tagName(Unpacked.__tls_size), + .object_global => |i| i.name(wasm).slice(wasm), + .nav_obj => |i| i.name(wasm), + .nav_exe => |i| i.name(wasm), + }; } + }; - if (existing_sym.isUndefined() and symbol.isUndefined()) { - // only verify module/import name for function symbols - if (symbol.tag == .function) { - const existing_name = if (existing_loc.file.unwrap()) |existing_obj_id| - objectImport(wasm, existing_obj_id, existing_loc.index).module_name - else - wasm.imports.get(existing_loc).?.module_name; - - const module_name = objectImport(wasm, object_id, sym_index).module_name; - if (existing_name != module_name) { - var err = try diags.addErrorWithNotes(2); - try err.addMsg("symbol '{s}' module name mismatch. Expected '{s}', but found '{s}'", .{ - wasm.stringSlice(symbol.name), - wasm.stringSlice(existing_name), - wasm.stringSlice(module_name), - }); - try err.addNote("first definition in '{'}'", .{existing_file_path}); - try err.addNote("next definition in '{'}'", .{obj_path}); - } - } + /// Index into `Wasm.object_global_imports`. + pub const Index = enum(u32) { + _, - // both undefined so skip overwriting existing symbol and discard the new symbol - try wasm.discarded.put(gpa, location, existing_loc); - continue; + pub fn key(index: Index, wasm: *const Wasm) *String { + return &wasm.object_global_imports.keys()[@intFromEnum(index)]; } - if (existing_sym.tag == .global) { - const existing_ty = wasm.getGlobalType(existing_loc); - const new_ty = wasm.getGlobalType(location); - if (existing_ty.mutable != new_ty.mutable or existing_ty.valtype != new_ty.valtype) { - var err = try diags.addErrorWithNotes(2); - try err.addMsg("symbol '{s}' mismatching global types", .{wasm.stringSlice(symbol.name)}); - try err.addNote("first definition in '{'}'", .{existing_file_path}); - try err.addNote("next definition in '{'}'", .{obj_path}); - } + pub fn value(index: Index, wasm: *const Wasm) *GlobalImport { + return &wasm.object_global_imports.values()[@intFromEnum(index)]; } - if (existing_sym.tag == .function) { - const existing_ty = wasm.getFunctionSignature(existing_loc); - const new_ty = wasm.getFunctionSignature(location); - if (!existing_ty.eql(new_ty)) { - var err = try diags.addErrorWithNotes(3); - try err.addMsg("symbol '{s}' mismatching function signatures.", .{wasm.stringSlice(symbol.name)}); - try err.addNote("expected signature {}, but found signature {}", .{ existing_ty, new_ty }); - try err.addNote("first definition in '{'}'", .{existing_file_path}); - try err.addNote("next definition in '{'}'", .{obj_path}); - } + pub fn symbolName(index: Index, wasm: *const Wasm) String { + return index.key(wasm).*; } - // when both symbols are weak, we skip overwriting unless the existing - // symbol is weak and the new one isn't, in which case we *do* overwrite it. - if (existing_sym.isWeak() and symbol.isWeak()) blk: { - if (existing_sym.isUndefined() and !symbol.isUndefined()) break :blk; - try wasm.discarded.put(gpa, location, existing_loc); - continue; + pub fn importName(index: Index, wasm: *const Wasm) String { + return index.value(wasm).name; } - // simply overwrite with the new symbol - log.debug("Overwriting symbol '{s}'", .{wasm.stringSlice(symbol.name)}); - log.debug(" old definition in '{'}'", .{existing_file_path}); - log.debug(" new definition in '{'}'", .{obj_path}); - try wasm.discarded.putNoClobber(gpa, existing_loc, location); - maybe_existing.value_ptr.* = location; - try wasm.globals.put(gpa, symbol.name, location); - try wasm.resolved_symbols.put(gpa, location, {}); - assert(wasm.resolved_symbols.swapRemove(existing_loc)); - if (existing_sym.isUndefined()) { - _ = wasm.undefs.swapRemove(symbol.name); + pub fn moduleName(index: Index, wasm: *const Wasm) OptionalString { + return index.value(wasm).module_name; } - } -} -fn resolveSymbolsInArchives(wasm: *Wasm) !void { - if (wasm.lazy_archives.items.len == 0) return; - const gpa = wasm.base.comp.gpa; - const diags = &wasm.base.comp.link_diags; + pub fn globalType(index: Index, wasm: *const Wasm) ObjectGlobal.Type { + return value(index, wasm).type(); + } + }; - log.debug("Resolving symbols in lazy_archives", .{}); - var index: u32 = 0; - undef_loop: while (index < wasm.undefs.count()) { - const sym_name_index = wasm.undefs.keys()[index]; + pub fn @"type"(gi: *const GlobalImport) ObjectGlobal.Type { + return gi.flags.global_type.to(); + } +}; - for (wasm.lazy_archives.items) |lazy_archive| { - const sym_name = wasm.stringSlice(sym_name_index); - log.debug("Detected symbol '{s}' in archive '{'}', parsing objects..", .{ - sym_name, lazy_archive.path, - }); - const offset = lazy_archive.archive.toc.get(sym_name) orelse continue; // symbol does not exist in this archive - - // Symbol is found in unparsed object file within current archive. - // Parse object and and resolve symbols again before we check remaining - // undefined symbols. - const file_contents = lazy_archive.file_contents[offset.items[0]..]; - const object = lazy_archive.archive.parseObject(wasm, file_contents, lazy_archive.path) catch |err| { - // TODO this fails to include information to identify which object failed - return diags.failParse(lazy_archive.path, "failed to parse object in archive: {s}", .{@errorName(err)}); - }; - try wasm.objects.append(gpa, object); - try wasm.resolveSymbolsInObject(@enumFromInt(wasm.objects.items.len - 1)); +pub const ObjectGlobal = extern struct { + /// `none` if this function has no symbol describing it. + name: OptionalString, + flags: SymbolFlags, + expr: Expr, + /// The object file whose global section contains this global. + object_index: ObjectIndex, + offset: u32, + size: u32, - // continue loop for any remaining undefined symbols that still exist - // after resolving last object file - continue :undef_loop; - } - index += 1; + pub fn @"type"(og: *const ObjectGlobal) Type { + return og.flags.global_type.to(); } -} -/// Writes an unsigned 32-bit integer as a LEB128-encoded 'i32.const' value. -fn writeI32Const(writer: anytype, val: u32) !void { - try writer.writeByte(std.wasm.opcode(.i32_const)); - try leb.writeIleb128(writer, @as(i32, @bitCast(val))); -} - -fn setupInitMemoryFunction(wasm: *Wasm) !void { - const comp = wasm.base.comp; - const gpa = comp.gpa; - const shared_memory = comp.config.shared_memory; - const import_memory = comp.config.import_memory; + pub const Type = struct { + valtype: std.wasm.Valtype, + mutable: bool, + }; - // Passive segments are used to avoid memory being reinitialized on each - // thread's instantiation. These passive segments are initialized and - // dropped in __wasm_init_memory, which is registered as the start function - // We also initialize bss segments (using memory.fill) as part of this - // function. - if (!wasm.hasPassiveInitializationSegments()) { - return; + pub fn relocations(og: *const ObjectGlobal, wasm: *const Wasm) ObjectRelocation.IterableSlice { + const global_section_index = og.object_index.ptr(wasm).global_section_index.?; + const relocs = wasm.object_relocations_table.get(global_section_index) orelse return .empty; + return .init(relocs, og.offset, og.size, wasm); } - const sym_loc = try wasm.createSyntheticSymbol(wasm.preloaded_strings.__wasm_init_memory, .function); - wasm.symbolLocSymbol(sym_loc).mark(); - - const flag_address: u32 = if (shared_memory) address: { - // when we have passive initialization segments and shared memory - // `setupMemory` will create this symbol and set its virtual address. - const loc = wasm.globals.get(wasm.preloaded_strings.__wasm_init_memory_flag).?; - break :address wasm.symbolLocSymbol(loc).virtual_address; - } else 0; - - var function_body = std.ArrayList(u8).init(gpa); - defer function_body.deinit(); - const writer = function_body.writer(); - - // we have 0 locals - try leb.writeUleb128(writer, @as(u32, 0)); - - if (shared_memory) { - // destination blocks - // based on values we jump to corresponding label - try writer.writeByte(std.wasm.opcode(.block)); // $drop - try writer.writeByte(std.wasm.block_empty); // block type - - try writer.writeByte(std.wasm.opcode(.block)); // $wait - try writer.writeByte(std.wasm.block_empty); // block type - - try writer.writeByte(std.wasm.opcode(.block)); // $init - try writer.writeByte(std.wasm.block_empty); // block type - - // atomically check - try writeI32Const(writer, flag_address); - try writeI32Const(writer, 0); - try writeI32Const(writer, 1); - try writer.writeByte(std.wasm.opcode(.atomics_prefix)); - try leb.writeUleb128(writer, std.wasm.atomicsOpcode(.i32_atomic_rmw_cmpxchg)); - try leb.writeUleb128(writer, @as(u32, 2)); // alignment - try leb.writeUleb128(writer, @as(u32, 0)); // offset - - // based on the value from the atomic check, jump to the label. - try writer.writeByte(std.wasm.opcode(.br_table)); - try leb.writeUleb128(writer, @as(u32, 2)); // length of the table (we have 3 blocks but because of the mandatory default the length is 2). - try leb.writeUleb128(writer, @as(u32, 0)); // $init - try leb.writeUleb128(writer, @as(u32, 1)); // $wait - try leb.writeUleb128(writer, @as(u32, 2)); // $drop - try writer.writeByte(std.wasm.opcode(.end)); - } - - for (wasm.data_segments.keys(), wasm.data_segments.values(), 0..) |key, value, segment_index_usize| { - const segment_index: u32 = @intCast(segment_index_usize); - const segment = wasm.segmentPtr(value); - if (segment.needsPassiveInitialization(import_memory, key)) { - // For passive BSS segments we can simple issue a memory.fill(0). - // For non-BSS segments we do a memory.init. Both these - // instructions take as their first argument the destination - // address. - try writeI32Const(writer, segment.offset); - - if (shared_memory and std.mem.eql(u8, key, ".tdata")) { - // When we initialize the TLS segment we also set the `__tls_base` - // global. This allows the runtime to use this static copy of the - // TLS data for the first/main thread. - try writeI32Const(writer, segment.offset); - try writer.writeByte(std.wasm.opcode(.global_set)); - const loc = wasm.globals.get(wasm.preloaded_strings.__tls_base).?; - try leb.writeUleb128(writer, wasm.symbolLocSymbol(loc).index); - } +}; - try writeI32Const(writer, 0); - try writeI32Const(writer, segment.size); - try writer.writeByte(std.wasm.opcode(.misc_prefix)); - if (std.mem.eql(u8, key, ".bss")) { - // fill bss segment with zeroes - try leb.writeUleb128(writer, std.wasm.miscOpcode(.memory_fill)); - } else { - // initialize the segment - try leb.writeUleb128(writer, std.wasm.miscOpcode(.memory_init)); - try leb.writeUleb128(writer, segment_index); - } - try writer.writeByte(0); // memory index immediate - } - } - - if (shared_memory) { - // we set the init memory flag to value '2' - try writeI32Const(writer, flag_address); - try writeI32Const(writer, 2); - try writer.writeByte(std.wasm.opcode(.atomics_prefix)); - try leb.writeUleb128(writer, std.wasm.atomicsOpcode(.i32_atomic_store)); - try leb.writeUleb128(writer, @as(u32, 2)); // alignment - try leb.writeUleb128(writer, @as(u32, 0)); // offset - - // notify any waiters for segment initialization completion - try writeI32Const(writer, flag_address); - try writer.writeByte(std.wasm.opcode(.i32_const)); - try leb.writeIleb128(writer, @as(i32, -1)); // number of waiters - try writer.writeByte(std.wasm.opcode(.atomics_prefix)); - try leb.writeUleb128(writer, std.wasm.atomicsOpcode(.memory_atomic_notify)); - try leb.writeUleb128(writer, @as(u32, 2)); // alignment - try leb.writeUleb128(writer, @as(u32, 0)); // offset - try writer.writeByte(std.wasm.opcode(.drop)); - - // branch and drop segments - try writer.writeByte(std.wasm.opcode(.br)); - try leb.writeUleb128(writer, @as(u32, 1)); - - // wait for thread to initialize memory segments - try writer.writeByte(std.wasm.opcode(.end)); // end $wait - try writeI32Const(writer, flag_address); - try writeI32Const(writer, 1); // expected flag value - try writer.writeByte(std.wasm.opcode(.i64_const)); - try leb.writeIleb128(writer, @as(i64, -1)); // timeout - try writer.writeByte(std.wasm.opcode(.atomics_prefix)); - try leb.writeUleb128(writer, std.wasm.atomicsOpcode(.memory_atomic_wait32)); - try leb.writeUleb128(writer, @as(u32, 2)); // alignment - try leb.writeUleb128(writer, @as(u32, 0)); // offset - try writer.writeByte(std.wasm.opcode(.drop)); - - try writer.writeByte(std.wasm.opcode(.end)); // end $drop - } - - for (wasm.data_segments.keys(), wasm.data_segments.values(), 0..) |name, value, segment_index_usize| { - const segment_index: u32 = @intCast(segment_index_usize); - const segment = wasm.segmentPtr(value); - if (segment.needsPassiveInitialization(import_memory, name) and - !std.mem.eql(u8, name, ".bss")) - { - // The TLS region should not be dropped since its is needed - // during the initialization of each thread (__wasm_init_tls). - if (shared_memory and std.mem.eql(u8, name, ".tdata")) { - continue; - } +pub const RefType1 = enum(u1) { + funcref, + externref, - try writer.writeByte(std.wasm.opcode(.misc_prefix)); - try leb.writeUleb128(writer, std.wasm.miscOpcode(.data_drop)); - try leb.writeUleb128(writer, segment_index); - } + pub fn from(rt: std.wasm.RefType) RefType1 { + return switch (rt) { + .funcref => .funcref, + .externref => .externref, + }; } - // End of the function body - try writer.writeByte(std.wasm.opcode(.end)); + pub fn to(rt: RefType1) std.wasm.RefType { + return switch (rt) { + .funcref => .funcref, + .externref => .externref, + }; + } +}; - try wasm.createSyntheticFunction( - wasm.preloaded_strings.__wasm_init_memory, - std.wasm.Type{ .params = &.{}, .returns = &.{} }, - &function_body, - ); -} +pub const TableImport = extern struct { + flags: SymbolFlags, + module_name: String, + /// May be different than the key which is a symbol name. + name: String, + source_location: SourceLocation, + resolution: Resolution, + limits_min: u32, + limits_max: u32, + + /// Represents a synthetic table, or a table from an object. + pub const Resolution = enum(u32) { + unresolved, + __indirect_function_table, + // Next, index into `object_tables`. + _, -/// Constructs a synthetic function that performs runtime relocations for -/// TLS symbols. This function is called by `__wasm_init_tls`. -fn setupTLSRelocationsFunction(wasm: *Wasm) !void { - const comp = wasm.base.comp; - const gpa = comp.gpa; - const shared_memory = comp.config.shared_memory; + const first_object_table = @intFromEnum(Resolution.__indirect_function_table) + 1; - // When we have TLS GOT entries and shared memory is enabled, - // we must perform runtime relocations or else we don't create the function. - if (!shared_memory or !wasm.requiresTLSReloc()) { - return; - } + pub const Unpacked = union(enum) { + unresolved, + __indirect_function_table, + object_table: ObjectTableIndex, + }; - const loc = try wasm.createSyntheticSymbol(wasm.preloaded_strings.__wasm_apply_global_tls_relocs, .function); - wasm.symbolLocSymbol(loc).mark(); - var function_body = std.ArrayList(u8).init(gpa); - defer function_body.deinit(); - const writer = function_body.writer(); - - // locals (we have none) - try writer.writeByte(0); - for (wasm.got_symbols.items, 0..) |got_loc, got_index| { - const sym: *Symbol = wasm.symbolLocSymbol(got_loc); - if (!sym.isTLS()) continue; // only relocate TLS symbols - if (sym.tag == .data and sym.isDefined()) { - // get __tls_base - try writer.writeByte(std.wasm.opcode(.global_get)); - try leb.writeUleb128(writer, wasm.symbolLocSymbol(wasm.globals.get(wasm.preloaded_strings.__tls_base).?).index); - - // add the virtual address of the symbol - try writer.writeByte(std.wasm.opcode(.i32_const)); - try leb.writeUleb128(writer, sym.virtual_address); - } else if (sym.tag == .function) { - @panic("TODO: relocate GOT entry of function"); - } else continue; - - try writer.writeByte(std.wasm.opcode(.i32_add)); - try writer.writeByte(std.wasm.opcode(.global_set)); - try leb.writeUleb128(writer, wasm.imported_globals_count + @as(u32, @intCast(wasm.wasm_globals.items.len + got_index))); - } - try writer.writeByte(std.wasm.opcode(.end)); - - try wasm.createSyntheticFunction( - wasm.preloaded_strings.__wasm_apply_global_tls_relocs, - std.wasm.Type{ .params = &.{}, .returns = &.{} }, - &function_body, - ); -} + pub fn unpack(r: Resolution) Unpacked { + return switch (r) { + .unresolved => .unresolved, + .__indirect_function_table => .__indirect_function_table, + _ => .{ .object_table = @enumFromInt(@intFromEnum(r) - first_object_table) }, + }; + } -fn validateFeatures( - wasm: *const Wasm, - to_emit: *[@typeInfo(Feature.Tag).@"enum".fields.len]bool, - emit_features_count: *u32, -) !void { - const comp = wasm.base.comp; - const diags = &wasm.base.comp.link_diags; - const target = comp.root_mod.resolved_target.result; - const shared_memory = comp.config.shared_memory; - const cpu_features = target.cpu.features; - const infer = cpu_features.isEmpty(); // when the user did not define any features, we infer them from linked objects. - const known_features_count = @typeInfo(Feature.Tag).@"enum".fields.len; - - var allowed = [_]bool{false} ** known_features_count; - var used = [_]u17{0} ** known_features_count; - var disallowed = [_]u17{0} ** known_features_count; - var required = [_]u17{0} ** known_features_count; - - // when false, we fail linking. We only verify this after a loop to catch all invalid features. - var valid_feature_set = true; - // will be set to true when there's any TLS segment found in any of the object files - var has_tls = false; - - // When the user has given an explicit list of features to enable, - // we extract them and insert each into the 'allowed' list. - if (!infer) { - inline for (@typeInfo(std.Target.wasm.Feature).@"enum".fields) |feature_field| { - if (cpu_features.isEnabled(feature_field.value)) { - allowed[feature_field.value] = true; - emit_features_count.* += 1; - } + fn pack(unpacked: Unpacked) Resolution { + return switch (unpacked) { + .unresolved => .unresolved, + .__indirect_function_table => .__indirect_function_table, + .object_table => |i| @enumFromInt(first_object_table + @intFromEnum(i)), + }; } - } - // extract all the used, disallowed and required features from each - // linked object file so we can test them. - for (wasm.objects.items, 0..) |*object, file_index| { - for (object.features) |feature| { - const value = (@as(u16, @intCast(file_index)) << 1) | 1; - switch (feature.prefix) { - .used => { - used[@intFromEnum(feature.tag)] = value; - }, - .disallowed => { - disallowed[@intFromEnum(feature.tag)] = value; - }, - .required => { - required[@intFromEnum(feature.tag)] = value; - used[@intFromEnum(feature.tag)] = value; - }, - } + fn fromObjectTable(object_table: ObjectTableIndex) Resolution { + return pack(.{ .object_table = object_table }); } - for (object.segment_info) |segment| { - if (segment.isTLS()) { - has_tls = true; - } + pub fn refType(r: Resolution, wasm: *const Wasm) std.wasm.RefType { + return switch (unpack(r)) { + .unresolved => unreachable, + .__indirect_function_table => .funcref, + .object_table => |i| i.ptr(wasm).flags.ref_type.to(), + }; } - } - // when we infer the features, we allow each feature found in the 'used' set - // and insert it into the 'allowed' set. When features are not inferred, - // we validate that a used feature is allowed. - for (used, 0..) |used_set, used_index| { - const is_enabled = @as(u1, @truncate(used_set)) != 0; - if (infer) { - allowed[used_index] = is_enabled; - emit_features_count.* += @intFromBool(is_enabled); - } else if (is_enabled and !allowed[used_index]) { - diags.addParseError( - wasm.objects.items[used_set >> 1].path, - "feature '{}' not allowed, but used by linked object", - .{@as(Feature.Tag, @enumFromInt(used_index))}, - ); - valid_feature_set = false; + pub fn limits(r: Resolution, wasm: *const Wasm) std.wasm.Limits { + return switch (unpack(r)) { + .unresolved => unreachable, + .__indirect_function_table => .{ + .flags = .{ .has_max = true, .is_shared = false }, + .min = @intCast(wasm.flush_buffer.indirect_function_table.entries.len + 1), + .max = @intCast(wasm.flush_buffer.indirect_function_table.entries.len + 1), + }, + .object_table => |i| i.ptr(wasm).limits(), + }; } - } + }; - if (!valid_feature_set) { - return error.FlushFailure; - } + /// Index into `object_table_imports`. + pub const Index = enum(u32) { + _, - if (shared_memory) { - const disallowed_feature = disallowed[@intFromEnum(Feature.Tag.shared_mem)]; - if (@as(u1, @truncate(disallowed_feature)) != 0) { - diags.addParseError( - wasm.objects.items[disallowed_feature >> 1].path, - "shared-memory is disallowed because it wasn't compiled with 'atomics' and 'bulk-memory' features enabled", - .{}, - ); - valid_feature_set = false; + pub fn key(index: Index, wasm: *const Wasm) *String { + return &wasm.object_table_imports.keys()[@intFromEnum(index)]; } - for ([_]Feature.Tag{ .atomics, .bulk_memory }) |feature| { - if (!allowed[@intFromEnum(feature)]) { - var err = try diags.addErrorWithNotes(0); - try err.addMsg("feature '{}' is not used but is required for shared-memory", .{feature}); - } + pub fn value(index: Index, wasm: *const Wasm) *TableImport { + return &wasm.object_table_imports.values()[@intFromEnum(index)]; } - } - if (has_tls) { - for ([_]Feature.Tag{ .atomics, .bulk_memory }) |feature| { - if (!allowed[@intFromEnum(feature)]) { - var err = try diags.addErrorWithNotes(0); - try err.addMsg("feature '{}' is not used but is required for thread-local storage", .{feature}); - } + pub fn name(index: Index, wasm: *const Wasm) String { + return index.key(wasm).*; } - } - // For each linked object, validate the required and disallowed features - for (wasm.objects.items) |*object| { - var object_used_features = [_]bool{false} ** known_features_count; - for (object.features) |feature| { - if (feature.prefix == .disallowed) continue; // already defined in 'disallowed' set. - // from here a feature is always used - const disallowed_feature = disallowed[@intFromEnum(feature.tag)]; - if (@as(u1, @truncate(disallowed_feature)) != 0) { - var err = try diags.addErrorWithNotes(2); - try err.addMsg("feature '{}' is disallowed, but used by linked object", .{feature.tag}); - try err.addNote("disallowed by '{'}'", .{wasm.objects.items[disallowed_feature >> 1].path}); - try err.addNote("used in '{'}'", .{object.path}); - valid_feature_set = false; - } - object_used_features[@intFromEnum(feature.tag)] = true; + pub fn moduleName(index: Index, wasm: *const Wasm) OptionalString { + return index.value(wasm).module_name; } + }; - // validate the linked object file has each required feature - for (required, 0..) |required_feature, feature_index| { - const is_required = @as(u1, @truncate(required_feature)) != 0; - if (is_required and !object_used_features[feature_index]) { - var err = try diags.addErrorWithNotes(2); - try err.addMsg("feature '{}' is required but not used in linked object", .{@as(Feature.Tag, @enumFromInt(feature_index))}); - try err.addNote("required by '{'}'", .{wasm.objects.items[required_feature >> 1].path}); - try err.addNote("missing in '{'}'", .{object.path}); - valid_feature_set = false; - } - } + pub fn limits(ti: *const TableImport) std.wasm.Limits { + return .{ + .flags = .{ + .has_max = ti.flags.limits_has_max, + .is_shared = ti.flags.limits_is_shared, + }, + .min = ti.limits_min, + .max = ti.limits_max, + }; } +}; - if (!valid_feature_set) { - return error.FlushFailure; - } +pub const Table = extern struct { + module_name: OptionalString, + name: OptionalString, + flags: SymbolFlags, + limits_min: u32, + limits_max: u32, - to_emit.* = allowed; -} + pub fn limits(t: *const Table) std.wasm.Limits { + return .{ + .flags = .{ + .has_max = t.flags.limits_has_max, + .is_shared = t.flags.limits_is_shared, + }, + .min = t.limits_min, + .max = t.limits_max, + }; + } +}; -/// Creates synthetic linker-symbols, but only if they are being referenced from -/// any object file. For instance, the `__heap_base` symbol will only be created, -/// if one or multiple undefined references exist. When none exist, the symbol will -/// not be created, ensuring we don't unnecessarily emit unreferenced symbols. -fn resolveLazySymbols(wasm: *Wasm) !void { - const comp = wasm.base.comp; - const gpa = comp.gpa; - const shared_memory = comp.config.shared_memory; +/// Uniquely identifies a section across all objects. By subtracting +/// `Object.local_section_index_base` from this one, the Object section index +/// is obtained. +pub const ObjectSectionIndex = enum(u32) { + _, +}; - if (wasm.getExistingString("__heap_base")) |name_offset| { - if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| { - const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data); - try wasm.discarded.putNoClobber(gpa, kv.value, loc); - _ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations. - } - } +/// Index into `object_tables`. +pub const ObjectTableIndex = enum(u32) { + _, - if (wasm.getExistingString("__heap_end")) |name_offset| { - if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| { - const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data); - try wasm.discarded.putNoClobber(gpa, kv.value, loc); - _ = wasm.resolved_symbols.swapRemove(loc); - } + pub fn ptr(index: ObjectTableIndex, wasm: *const Wasm) *Table { + return &wasm.object_tables.items[@intFromEnum(index)]; } - if (!shared_memory) { - if (wasm.getExistingString("__tls_base")) |name_offset| { - if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| { - const loc = try wasm.createSyntheticSymbolOffset(name_offset, .global); - try wasm.discarded.putNoClobber(gpa, kv.value, loc); - _ = wasm.resolved_symbols.swapRemove(kv.value); - const symbol = wasm.symbolLocSymbol(loc); - symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); - symbol.index = @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len); - try wasm.wasm_globals.append(gpa, .{ - .global_type = .{ .valtype = .i32, .mutable = true }, - .init = .{ .i32_const = undefined }, - }); - } - } + pub fn chaseWeak(i: ObjectTableIndex, wasm: *const Wasm) ObjectTableIndex { + const table = ptr(i, wasm); + if (table.flags.binding != .weak) return i; + const name = table.name.unwrap().?; + const import = wasm.object_table_imports.getPtr(name).?; + assert(import.resolution != .unresolved); // otherwise it should resolve to this one. + return import.resolution.unpack().object_table; } -} +}; -pub fn findGlobalSymbol(wasm: *const Wasm, name: []const u8) ?SymbolLoc { - const name_index = wasm.getExistingString(name) orelse return null; - return wasm.globals.get(name_index); -} +/// Index into `Wasm.object_globals`. +pub const ObjectGlobalIndex = enum(u32) { + _, -fn checkUndefinedSymbols(wasm: *const Wasm) !void { - const comp = wasm.base.comp; - const diags = &wasm.base.comp.link_diags; - if (comp.config.output_mode == .Obj) return; - if (wasm.import_symbols) return; - - var found_undefined_symbols = false; - for (wasm.undefs.values()) |undef| { - const symbol = wasm.symbolLocSymbol(undef); - if (symbol.tag == .data) { - found_undefined_symbols = true; - const symbol_name = wasm.symbolLocName(undef); - switch (undef.file) { - .zig_object => { - // TODO: instead of saying the zig compilation unit, attach an actual source location - // to this diagnostic - diags.addError("unresolved symbol in Zig compilation unit: {s}", .{symbol_name}); - }, - .none => { - diags.addError("internal linker bug: unresolved synthetic symbol: {s}", .{symbol_name}); - }, - _ => { - const path = wasm.objects.items[@intFromEnum(undef.file)].path; - diags.addParseError(path, "unresolved symbol: {s}", .{symbol_name}); - }, - } - } - } - if (found_undefined_symbols) { - return error.LinkFailure; + pub fn ptr(index: ObjectGlobalIndex, wasm: *const Wasm) *ObjectGlobal { + return &wasm.object_globals.items[@intFromEnum(index)]; } -} - -pub fn deinit(wasm: *Wasm) void { - const gpa = wasm.base.comp.gpa; - if (wasm.llvm_object) |llvm_object| llvm_object.deinit(); - for (wasm.func_types.items) |*func_type| { - func_type.deinit(gpa); - } - for (wasm.segment_info.values()) |segment_info| { - gpa.free(segment_info.name); - } - if (wasm.zig_object) |zig_obj| { - zig_obj.deinit(wasm); - } - for (wasm.objects.items) |*object| { - object.deinit(gpa); + pub fn name(index: ObjectGlobalIndex, wasm: *const Wasm) OptionalString { + return index.ptr(wasm).name; } - for (wasm.lazy_archives.items) |*lazy_archive| lazy_archive.deinit(gpa); - wasm.lazy_archives.deinit(gpa); - - if (wasm.globals.get(wasm.preloaded_strings.__wasm_init_tls)) |loc| { - const atom = wasm.symbol_atom.get(loc).?; - wasm.getAtomPtr(atom).deinit(gpa); + pub fn chaseWeak(i: ObjectGlobalIndex, wasm: *const Wasm) ObjectGlobalIndex { + const global = ptr(i, wasm); + if (global.flags.binding != .weak) return i; + const import_name = global.name.unwrap().?; + const import = wasm.object_global_imports.getPtr(import_name).?; + assert(import.resolution != .unresolved); // otherwise it should resolve to this one. + return import.resolution.unpack(wasm).object_global; } +}; - wasm.synthetic_symbols.deinit(gpa); - wasm.globals.deinit(gpa); - wasm.resolved_symbols.deinit(gpa); - wasm.undefs.deinit(gpa); - wasm.discarded.deinit(gpa); - wasm.symbol_atom.deinit(gpa); - wasm.atoms.deinit(gpa); - wasm.managed_atoms.deinit(gpa); - wasm.segments.deinit(gpa); - wasm.data_segments.deinit(gpa); - wasm.segment_info.deinit(gpa); - wasm.objects.deinit(gpa); +pub const ObjectMemory = extern struct { + flags: SymbolFlags, + name: OptionalString, + limits_min: u32, + limits_max: u32, - // free output sections - wasm.imports.deinit(gpa); - wasm.func_types.deinit(gpa); - wasm.functions.deinit(gpa); - wasm.wasm_globals.deinit(gpa); - wasm.function_table.deinit(gpa); - wasm.tables.deinit(gpa); - wasm.init_funcs.deinit(gpa); - wasm.exports.deinit(gpa); + /// Index into `Wasm.object_memories`. + pub const Index = enum(u32) { + _, - wasm.string_bytes.deinit(gpa); - wasm.string_table.deinit(gpa); - wasm.dump_argv_list.deinit(gpa); -} + pub fn ptr(index: Index, wasm: *const Wasm) *ObjectMemory { + return &wasm.object_memories.items[@intFromEnum(index)]; + } + }; -pub fn updateFunc(wasm: *Wasm, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { - if (build_options.skip_non_native and builtin.object_format != .wasm) { - @panic("Attempted to compile for object format that was disabled by build configuration"); + pub fn limits(om: *const ObjectMemory) std.wasm.Limits { + return .{ + .flags = .{ + .has_max = om.limits_has_max, + .is_shared = om.limits_is_shared, + }, + .min = om.limits_min, + .max = om.limits_max, + }; } - if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness); - try wasm.zig_object.?.updateFunc(wasm, pt, func_index, air, liveness); -} +}; -// Generate code for the "Nav", storing it in memory to be later written to -// the file on flush(). -pub fn updateNav(wasm: *Wasm, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void { - if (build_options.skip_non_native and builtin.object_format != .wasm) { - @panic("Attempted to compile for object format that was disabled by build configuration"); - } - if (wasm.llvm_object) |llvm_object| return llvm_object.updateNav(pt, nav); - try wasm.zig_object.?.updateNav(wasm, pt, nav); -} +/// Index into `Wasm.object_functions`. +pub const ObjectFunctionIndex = enum(u32) { + _, -pub fn updateLineNumber(wasm: *Wasm, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void { - if (wasm.llvm_object) |_| return; - try wasm.zig_object.?.updateLineNumber(pt, ti_id); -} + pub fn ptr(index: ObjectFunctionIndex, wasm: *const Wasm) *ObjectFunction { + return &wasm.object_functions.items[@intFromEnum(index)]; + } -/// From a given symbol location, returns its `wasm.GlobalType`. -/// Asserts the Symbol represents a global. -fn getGlobalType(wasm: *const Wasm, loc: SymbolLoc) std.wasm.GlobalType { - const symbol = wasm.symbolLocSymbol(loc); - assert(symbol.tag == .global); - const is_undefined = symbol.isUndefined(); - switch (loc.file) { - .zig_object => { - const zo = wasm.zig_object.?; - return if (is_undefined) - zo.imports.get(loc.index).?.kind.global - else - zo.globals.items[symbol.index - zo.imported_globals_count].global_type; - }, - .none => { - return if (is_undefined) - wasm.imports.get(loc).?.kind.global - else - wasm.wasm_globals.items[symbol.index].global_type; - }, - _ => { - const obj = &wasm.objects.items[@intFromEnum(loc.file)]; - return if (is_undefined) - obj.findImport(obj.symtable[@intFromEnum(loc.index)]).kind.global - else - obj.globals[symbol.index - obj.imported_globals_count].global_type; - }, + pub fn toOptional(i: ObjectFunctionIndex) OptionalObjectFunctionIndex { + const result: OptionalObjectFunctionIndex = @enumFromInt(@intFromEnum(i)); + assert(result != .none); + return result; } -} -/// From a given symbol location, returns its `wasm.Type`. -/// Asserts the Symbol represents a function. -fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type { - const symbol = wasm.symbolLocSymbol(loc); - assert(symbol.tag == .function); - const is_undefined = symbol.isUndefined(); - switch (loc.file) { - .zig_object => { - const zo = wasm.zig_object.?; - if (is_undefined) { - const type_index = zo.imports.get(loc.index).?.kind.function; - return zo.func_types.items[type_index]; - } - const sym = zo.symbols.items[@intFromEnum(loc.index)]; - const type_index = zo.functions.items[sym.index].type_index; - return zo.func_types.items[type_index]; - }, - .none => { - if (is_undefined) { - const type_index = wasm.imports.get(loc).?.kind.function; - return wasm.func_types.items[type_index]; - } - return wasm.func_types.items[ - wasm.functions.get(.{ - .file = .none, - .index = symbol.index, - }).?.func.type_index - ]; - }, - _ => { - const obj = &wasm.objects.items[@intFromEnum(loc.file)]; - if (is_undefined) { - const type_index = obj.findImport(obj.symtable[@intFromEnum(loc.index)]).kind.function; - return obj.func_types[type_index]; - } - const sym = obj.symtable[@intFromEnum(loc.index)]; - const type_index = obj.functions[sym.index - obj.imported_functions_count].type_index; - return obj.func_types[type_index]; - }, + pub fn chaseWeak(i: ObjectFunctionIndex, wasm: *const Wasm) ObjectFunctionIndex { + const func = ptr(i, wasm); + if (func.flags.binding != .weak) return i; + const name = func.name.unwrap().?; + const import = wasm.object_function_imports.getPtr(name).?; + assert(import.resolution != .unresolved); // otherwise it should resolve to this one. + return import.resolution.unpack(wasm).object_function; } -} +}; -/// Returns the symbol index from a symbol of which its flag is set global, -/// such as an exported or imported symbol. -/// If the symbol does not yet exist, creates a new one symbol instead -/// and then returns the index to it. -pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !Symbol.Index { - _ = lib_name; - const name_index = try wasm.internString(name); - return wasm.zig_object.?.getGlobalSymbol(wasm.base.comp.gpa, name_index); -} +/// Index into `object_functions`, or null. +pub const OptionalObjectFunctionIndex = enum(u32) { + none = std.math.maxInt(u32), + _, -/// For a given `Nav`, find the given symbol index's atom, and create a relocation for the type. -/// Returns the given pointer address -pub fn getNavVAddr( - wasm: *Wasm, - pt: Zcu.PerThread, - nav: InternPool.Nav.Index, - reloc_info: link.File.RelocInfo, -) !u64 { - return wasm.zig_object.?.getNavVAddr(wasm, pt, nav, reloc_info); -} + pub fn unwrap(i: OptionalObjectFunctionIndex) ?ObjectFunctionIndex { + if (i == .none) return null; + return @enumFromInt(@intFromEnum(i)); + } +}; -pub fn lowerUav( - wasm: *Wasm, - pt: Zcu.PerThread, - uav: InternPool.Index, - explicit_alignment: Alignment, - src_loc: Zcu.LazySrcLoc, -) !codegen.GenResult { - return wasm.zig_object.?.lowerUav(wasm, pt, uav, explicit_alignment, src_loc); -} +pub const ObjectDataSegment = extern struct { + /// `none` if segment info custom subsection is missing. + name: OptionalString, + flags: Flags, + payload: DataPayload, + offset: u32, + object_index: ObjectIndex, + + pub const Flags = packed struct(u32) { + alive: bool = false, + is_passive: bool = false, + alignment: Alignment = .none, + /// Signals that the segment contains only null terminated strings allowing + /// the linker to perform merging. + strings: bool = false, + /// The segment contains thread-local data. This means that a unique copy + /// of this segment will be created for each thread. + tls: bool = false, + /// If the object file is included in the final link, the segment should be + /// retained in the final output regardless of whether it is used by the + /// program. + retain: bool = false, + + _: u21 = 0, + }; -pub fn getUavVAddr(wasm: *Wasm, uav: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { - return wasm.zig_object.?.getUavVAddr(wasm, uav, reloc_info); -} + /// Index into `Wasm.object_data_segments`. + pub const Index = enum(u32) { + _, -pub fn deleteExport( - wasm: *Wasm, - exported: Zcu.Exported, - name: InternPool.NullTerminatedString, -) void { - if (wasm.llvm_object) |_| return; - return wasm.zig_object.?.deleteExport(wasm, exported, name); -} + pub fn ptr(i: Index, wasm: *const Wasm) *ObjectDataSegment { + return &wasm.object_data_segments.items[@intFromEnum(i)]; + } + }; -pub fn updateExports( - wasm: *Wasm, - pt: Zcu.PerThread, - exported: Zcu.Exported, - export_indices: []const u32, -) !void { - if (build_options.skip_non_native and builtin.object_format != .wasm) { - @panic("Attempted to compile for object format that was disabled by build configuration"); + pub fn relocations(ods: *const ObjectDataSegment, wasm: *const Wasm) ObjectRelocation.IterableSlice { + const data_section_index = ods.object_index.ptr(wasm).data_section_index.?; + const relocs = wasm.object_relocations_table.get(data_section_index) orelse return .empty; + return .init(relocs, ods.offset, ods.payload.len, wasm); } - if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices); - return wasm.zig_object.?.updateExports(wasm, pt, exported, export_indices); -} +}; -pub fn freeDecl(wasm: *Wasm, decl_index: InternPool.DeclIndex) void { - if (wasm.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index); - return wasm.zig_object.?.freeDecl(wasm, decl_index); -} +/// A local or exported global const from an object file. +pub const ObjectData = extern struct { + segment: ObjectDataSegment.Index, + /// Index into the object segment payload. Must be <= the segment's size. + offset: u32, + /// May be zero. `offset + size` must be <= the segment's size. + size: u32, + name: String, + flags: SymbolFlags, -/// Assigns indexes to all indirect functions. -/// Starts at offset 1, where the value `0` represents an unresolved function pointer -/// or null-pointer -fn mapFunctionTable(wasm: *Wasm) void { - var it = wasm.function_table.iterator(); - var index: u32 = 1; - while (it.next()) |entry| { - const symbol = wasm.symbolLocSymbol(entry.key_ptr.*); - if (symbol.isAlive()) { - entry.value_ptr.* = index; - index += 1; - } else { - wasm.function_table.removeByPtr(entry.key_ptr); + /// Index into `Wasm.object_datas`. + pub const Index = enum(u32) { + _, + + pub fn ptr(i: Index, wasm: *const Wasm) *ObjectData { + return &wasm.object_datas.items[@intFromEnum(i)]; } - } + }; +}; - if (wasm.import_table or wasm.base.comp.config.output_mode == .Obj) { - const sym_loc = wasm.globals.get(wasm.preloaded_strings.__indirect_function_table).?; - const import = wasm.imports.getPtr(sym_loc).?; - import.kind.table.limits.min = index - 1; // we start at index 1. - } else if (index > 1) { - log.debug("Appending indirect function table", .{}); - const sym_loc = wasm.globals.get(wasm.preloaded_strings.__indirect_function_table).?; - const symbol = wasm.symbolLocSymbol(sym_loc); - const table = &wasm.tables.items[symbol.index - wasm.imported_tables_count]; - table.limits = .{ .min = index, .max = index, .flags = 0x1 }; - } -} +pub const ObjectDataImport = extern struct { + resolution: Resolution, + flags: SymbolFlags, + source_location: SourceLocation, + + pub const Resolution = enum(u32) { + unresolved, + __zig_error_names, + __zig_error_name_table, + __heap_base, + __heap_end, + /// Next, an `ObjectData.Index`. + /// Next, index into `uavs_obj` or `uavs_exe` depending on whether emitting an object. + /// Next, index into `navs_obj` or `navs_exe` depending on whether emitting an object. + _, -/// From a given index, append the given `Atom` at the back of the linked list. -/// Simply inserts it into the map of atoms when it doesn't exist yet. -pub fn appendAtomAtIndex(wasm: *Wasm, index: Segment.Index, atom_index: Atom.Index) !void { - const gpa = wasm.base.comp.gpa; - const atom = wasm.getAtomPtr(atom_index); - if (wasm.atoms.getPtr(index)) |last_index_ptr| { - atom.prev = last_index_ptr.*; - last_index_ptr.* = atom_index; - } else { - try wasm.atoms.putNoClobber(gpa, index, atom_index); - } -} + const first_object = @intFromEnum(Resolution.__heap_end) + 1; + + pub const Unpacked = union(enum) { + unresolved, + __zig_error_names, + __zig_error_name_table, + __heap_base, + __heap_end, + object: ObjectData.Index, + uav_exe: UavsExeIndex, + uav_obj: UavsObjIndex, + nav_exe: NavsExeIndex, + nav_obj: NavsObjIndex, + }; -fn allocateAtoms(wasm: *Wasm) !void { - // first sort the data segments - try sortDataSegments(wasm); - - var it = wasm.atoms.iterator(); - while (it.next()) |entry| { - const segment = wasm.segmentPtr(entry.key_ptr.*); - var atom_index = entry.value_ptr.*; - if (entry.key_ptr.toOptional() == wasm.code_section_index) { - // Code section is allocated upon writing as they are required to be ordered - // to synchronise with the function section. - continue; - } - var offset: u32 = 0; - while (true) { - const atom = wasm.getAtomPtr(atom_index); - const symbol_loc = atom.symbolLoc(); - // Ensure we get the original symbol, so we verify the correct symbol on whether - // it is dead or not and ensure an atom is removed when dead. - // This is required as we may have parsed aliases into atoms. - const sym = switch (symbol_loc.file) { - .zig_object => wasm.zig_object.?.symbols.items[@intFromEnum(symbol_loc.index)], - .none => wasm.synthetic_symbols.items[@intFromEnum(symbol_loc.index)], - _ => wasm.objects.items[@intFromEnum(symbol_loc.file)].symtable[@intFromEnum(symbol_loc.index)], + pub fn unpack(r: Resolution, wasm: *const Wasm) Unpacked { + return switch (r) { + .unresolved => .unresolved, + .__zig_error_names => .__zig_error_names, + .__zig_error_name_table => .__zig_error_name_table, + .__heap_base => .__heap_base, + .__heap_end => .__heap_end, + _ => { + const object_index = @intFromEnum(r) - first_object; + + const uav_index = if (object_index < wasm.object_datas.items.len) + return .{ .object = @enumFromInt(object_index) } + else + object_index - wasm.object_datas.items.len; + + const comp = wasm.base.comp; + const is_obj = comp.config.output_mode == .Obj; + if (is_obj) { + const nav_index = if (uav_index < wasm.uavs_obj.entries.len) + return .{ .uav_obj = @enumFromInt(uav_index) } + else + uav_index - wasm.uavs_obj.entries.len; + + return .{ .nav_obj = @enumFromInt(nav_index) }; + } else { + const nav_index = if (uav_index < wasm.uavs_exe.entries.len) + return .{ .uav_exe = @enumFromInt(uav_index) } + else + uav_index - wasm.uavs_exe.entries.len; + + return .{ .nav_exe = @enumFromInt(nav_index) }; + } + }, }; + } - // Dead symbols must be unlinked from the linked-list to prevent them - // from being emit into the binary. - if (sym.isDead()) { - if (entry.value_ptr.* == atom_index and atom.prev != .null) { - // When the atom is dead and is also the first atom retrieved from wasm.atoms(index) we update - // the entry to point it to the previous atom to ensure we do not start with a dead symbol that - // was removed and therefore do not emit any code at all. - entry.value_ptr.* = atom.prev; - } - if (atom.prev == .null) break; - atom_index = atom.prev; - atom.prev = .null; - continue; - } - offset = @intCast(atom.alignment.forward(offset)); - atom.offset = offset; - log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{ - wasm.symbolLocName(symbol_loc), - offset, - offset + atom.size, - atom.size, - }); - offset += atom.size; - if (atom.prev == .null) break; - atom_index = atom.prev; + pub fn pack(wasm: *const Wasm, unpacked: Unpacked) Resolution { + return switch (unpacked) { + .unresolved => .unresolved, + .__zig_error_names => .__zig_error_names, + .__zig_error_name_table => .__zig_error_name_table, + .__heap_base => .__heap_base, + .__heap_end => .__heap_end, + .object => |i| @enumFromInt(first_object + @intFromEnum(i)), + inline .uav_exe, .uav_obj => |i| @enumFromInt(first_object + wasm.object_datas.items.len + @intFromEnum(i)), + .nav_exe => |i| @enumFromInt(first_object + wasm.object_datas.items.len + wasm.uavs_exe.entries.len + @intFromEnum(i)), + .nav_obj => |i| @enumFromInt(first_object + wasm.object_datas.items.len + wasm.uavs_obj.entries.len + @intFromEnum(i)), + }; } - segment.size = @intCast(segment.alignment.forward(offset)); - } -} -/// For each data symbol, sets the virtual address. -fn allocateVirtualAddresses(wasm: *Wasm) void { - for (wasm.resolved_symbols.keys()) |loc| { - const symbol = wasm.symbolLocSymbol(loc); - if (symbol.tag != .data or symbol.isDead()) { - // Only data symbols have virtual addresses. - // Dead symbols do not get allocated, so we don't need to set their virtual address either. - continue; - } - const atom_index = wasm.symbol_atom.get(loc) orelse { - // synthetic symbol that does not contain an atom - continue; - }; + pub fn fromObjectDataIndex(wasm: *const Wasm, object_data_index: ObjectData.Index) Resolution { + return pack(wasm, .{ .object = object_data_index }); + } - const atom = wasm.getAtom(atom_index); - const merge_segment = wasm.base.comp.config.output_mode != .Obj; - const segment_info = switch (atom.file) { - .zig_object => wasm.zig_object.?.segment_info.items, - .none => wasm.segment_info.values(), - _ => wasm.objects.items[@intFromEnum(atom.file)].segment_info, - }; - const segment_name = segment_info[symbol.index].outputName(merge_segment); - const segment_index = wasm.data_segments.get(segment_name).?; - const segment = wasm.segmentPtr(segment_index); - - // TLS symbols have their virtual address set relative to their own TLS segment, - // rather than the entire Data section. - if (symbol.hasFlag(.WASM_SYM_TLS)) { - symbol.virtual_address = atom.offset; - } else { - symbol.virtual_address = atom.offset + segment.offset; + pub fn objectDataSegment(r: Resolution, wasm: *const Wasm) ?ObjectDataSegment.Index { + return switch (unpack(r, wasm)) { + .unresolved => unreachable, + .object => |i| i.ptr(wasm).segment, + .__zig_error_names, + .__zig_error_name_table, + .__heap_base, + .__heap_end, + .uav_exe, + .uav_obj, + .nav_exe, + .nav_obj, + => null, + }; } - } -} -fn sortDataSegments(wasm: *Wasm) !void { - const gpa = wasm.base.comp.gpa; - var new_mapping: std.StringArrayHashMapUnmanaged(Segment.Index) = .empty; - try new_mapping.ensureUnusedCapacity(gpa, wasm.data_segments.count()); - errdefer new_mapping.deinit(gpa); + pub fn dataLoc(r: Resolution, wasm: *const Wasm) DataLoc { + return switch (unpack(r, wasm)) { + .unresolved => unreachable, + .object => |i| { + const ptr = i.ptr(wasm); + return .{ + .segment = .fromObjectDataSegment(wasm, ptr.segment), + .offset = ptr.offset, + }; + }, + .__zig_error_names => .{ .segment = .__zig_error_names, .offset = 0 }, + .__zig_error_name_table => .{ .segment = .__zig_error_name_table, .offset = 0 }, + .__heap_base => .{ .segment = .__heap_base, .offset = 0 }, + .__heap_end => .{ .segment = .__heap_end, .offset = 0 }, + .uav_exe => @panic("TODO"), + .uav_obj => @panic("TODO"), + .nav_exe => @panic("TODO"), + .nav_obj => @panic("TODO"), + }; + } + }; - const keys = try gpa.dupe([]const u8, wasm.data_segments.keys()); - defer gpa.free(keys); + /// Points into `Wasm.object_data_imports`. + pub const Index = enum(u32) { + _, - const SortContext = struct { - fn sort(_: void, lhs: []const u8, rhs: []const u8) bool { - return order(lhs) < order(rhs); + pub fn value(i: @This(), wasm: *const Wasm) *ObjectDataImport { + return &wasm.object_data_imports.values()[@intFromEnum(i)]; } - fn order(name: []const u8) u8 { - if (mem.startsWith(u8, name, ".rodata")) return 0; - if (mem.startsWith(u8, name, ".data")) return 1; - if (mem.startsWith(u8, name, ".text")) return 2; - return 3; + pub fn fromSymbolName(wasm: *const Wasm, name: String) ?Index { + return @enumFromInt(wasm.object_data_imports.getIndex(name) orelse return null); } }; +}; - mem.sort([]const u8, keys, {}, SortContext.sort); - for (keys) |key| { - const segment_index = wasm.data_segments.get(key).?; - new_mapping.putAssumeCapacity(key, segment_index); - } - wasm.data_segments.deinit(gpa); - wasm.data_segments = new_mapping; -} +pub const DataPayload = extern struct { + off: Off, + /// The size in bytes of the data representing the segment within the section. + len: u32, -/// Obtains all initfuncs from each object file, verifies its function signature, -/// and then appends it to our final `init_funcs` list. -/// After all functions have been inserted, the functions will be ordered based -/// on their priority. -/// NOTE: This function must be called before we merged any other section. -/// This is because all init funcs in the object files contain references to the -/// original functions and their types. We need to know the type to verify it doesn't -/// contain any parameters. -fn setupInitFunctions(wasm: *Wasm) !void { - const gpa = wasm.base.comp.gpa; - const diags = &wasm.base.comp.link_diags; - // There's no constructors for Zig so we can simply search through linked object files only. - for (wasm.objects.items, 0..) |*object, object_index| { - try wasm.init_funcs.ensureUnusedCapacity(gpa, object.init_funcs.len); - for (object.init_funcs) |init_func| { - const symbol = object.symtable[init_func.symbol_index]; - const ty: std.wasm.Type = if (symbol.isUndefined()) ty: { - const imp: Import = object.findImport(symbol); - break :ty object.func_types[imp.kind.function]; - } else ty: { - const func_index = symbol.index - object.imported_functions_count; - const func = object.functions[func_index]; - break :ty object.func_types[func.type_index]; - }; - if (ty.params.len != 0) { - var err = try diags.addErrorWithNotes(0); - try err.addMsg("constructor functions cannot take arguments: '{s}'", .{wasm.stringSlice(symbol.name)}); - } - log.debug("appended init func '{s}'\n", .{wasm.stringSlice(symbol.name)}); - wasm.init_funcs.appendAssumeCapacity(.{ - .index = @enumFromInt(init_func.symbol_index), - .file = @enumFromInt(object_index), - .priority = init_func.priority, - }); - try wasm.mark(.{ - .index = @enumFromInt(init_func.symbol_index), - .file = @enumFromInt(object_index), - }); - } - } + pub const Off = enum(u32) { + /// The payload is all zeroes (bss section). + none = std.math.maxInt(u32), + /// Points into string_bytes. No corresponding string_table entry. + _, - // sort the initfunctions based on their priority - mem.sort(InitFuncLoc, wasm.init_funcs.items, {}, InitFuncLoc.lessThan); + pub fn unwrap(off: Off) ?u32 { + return if (off == .none) null else @intFromEnum(off); + } + }; - if (wasm.init_funcs.items.len > 0) { - const loc = wasm.globals.get(wasm.preloaded_strings.__wasm_call_ctors).?; - try wasm.mark(loc); + pub fn slice(p: DataPayload, wasm: *const Wasm) []const u8 { + return wasm.string_bytes.items[p.off.unwrap().?..][0..p.len]; } -} +}; -/// Creates a function body for the `__wasm_call_ctors` symbol. -/// Loops over all constructors found in `init_funcs` and calls them -/// respectively based on their priority which was sorted by `setupInitFunctions`. -/// NOTE: This function must be called after we merged all sections to ensure the -/// references to the function stored in the symbol have been finalized so we end -/// up calling the resolved function. -fn initializeCallCtorsFunction(wasm: *Wasm) !void { - const gpa = wasm.base.comp.gpa; - // No code to emit, so also no ctors to call - if (wasm.code_section_index == .none) { - // Make sure to remove it from the resolved symbols so we do not emit - // it within any section. TODO: Remove this once we implement garbage collection. - const loc = wasm.globals.get(wasm.preloaded_strings.__wasm_call_ctors).?; - assert(wasm.resolved_symbols.swapRemove(loc)); - return; - } +/// A reference to a local or exported global const. +pub const DataSegmentId = enum(u32) { + __zig_error_names, + __zig_error_name_table, + /// All name string bytes for all `@tagName` implementations, concatenated together. + __zig_tag_names, + /// All tag name slices for all `@tagName` implementations, concatenated together. + __zig_tag_name_table, + /// This and `__heap_end` are better retrieved via a global, but there is + /// some suboptimal code out there (wasi libc) that additionally needs them + /// as data symbols. + __heap_base, + __heap_end, + /// First, an `ObjectDataSegment.Index`. + /// Next, index into `uavs_obj` or `uavs_exe` depending on whether emitting an object. + /// Next, index into `navs_obj` or `navs_exe` depending on whether emitting an object. + _, - var function_body = std.ArrayList(u8).init(gpa); - defer function_body.deinit(); - const writer = function_body.writer(); + const first_object = @intFromEnum(DataSegmentId.__heap_end) + 1; - // Create the function body - { - // Write locals count (we have none) - try leb.writeUleb128(writer, @as(u32, 0)); + pub const Category = enum { + /// Thread-local variables. + tls, + /// Data that is not zero initialized and not threadlocal. + data, + /// Zero-initialized. Does not require corresponding bytes in the + /// output file. + zero, + }; - // call constructors - for (wasm.init_funcs.items) |init_func_loc| { - const symbol = init_func_loc.getSymbol(wasm); - const func = wasm.functions.values()[symbol.index - wasm.imported_functions_count].func; - const ty = wasm.func_types.items[func.type_index]; + pub const Unpacked = union(enum) { + __zig_error_names, + __zig_error_name_table, + __zig_tag_names, + __zig_tag_name_table, + __heap_base, + __heap_end, + object: ObjectDataSegment.Index, + uav_exe: UavsExeIndex, + uav_obj: UavsObjIndex, + nav_exe: NavsExeIndex, + nav_obj: NavsObjIndex, + }; - // Call function by its function index - try writer.writeByte(std.wasm.opcode(.call)); - try leb.writeUleb128(writer, symbol.index); + pub fn pack(wasm: *const Wasm, unpacked: Unpacked) DataSegmentId { + return switch (unpacked) { + .__zig_error_names => .__zig_error_names, + .__zig_error_name_table => .__zig_error_name_table, + .__zig_tag_names => .__zig_tag_names, + .__zig_tag_name_table => .__zig_tag_name_table, + .__heap_base => .__heap_base, + .__heap_end => .__heap_end, + .object => |i| @enumFromInt(first_object + @intFromEnum(i)), + inline .uav_exe, .uav_obj => |i| @enumFromInt(first_object + wasm.object_data_segments.items.len + @intFromEnum(i)), + .nav_exe => |i| @enumFromInt(first_object + wasm.object_data_segments.items.len + wasm.uavs_exe.entries.len + @intFromEnum(i)), + .nav_obj => |i| @enumFromInt(first_object + wasm.object_data_segments.items.len + wasm.uavs_obj.entries.len + @intFromEnum(i)), + }; + } - // drop all returned values from the stack as __wasm_call_ctors has no return value - for (ty.returns) |_| { - try writer.writeByte(std.wasm.opcode(.drop)); - } - } + pub fn unpack(id: DataSegmentId, wasm: *const Wasm) Unpacked { + return switch (id) { + .__zig_error_names => .__zig_error_names, + .__zig_error_name_table => .__zig_error_name_table, + .__zig_tag_names => .__zig_tag_names, + .__zig_tag_name_table => .__zig_tag_name_table, + .__heap_base => .__heap_base, + .__heap_end => .__heap_end, + _ => { + const object_index = @intFromEnum(id) - first_object; - // End function body - try writer.writeByte(std.wasm.opcode(.end)); - } + const uav_index = if (object_index < wasm.object_data_segments.items.len) + return .{ .object = @enumFromInt(object_index) } + else + object_index - wasm.object_data_segments.items.len; - try wasm.createSyntheticFunction( - wasm.preloaded_strings.__wasm_call_ctors, - std.wasm.Type{ .params = &.{}, .returns = &.{} }, - &function_body, - ); -} + const comp = wasm.base.comp; + const is_obj = comp.config.output_mode == .Obj; + if (is_obj) { + const nav_index = if (uav_index < wasm.uavs_obj.entries.len) + return .{ .uav_obj = @enumFromInt(uav_index) } + else + uav_index - wasm.uavs_obj.entries.len; -fn createSyntheticFunction( - wasm: *Wasm, - symbol_name: String, - func_ty: std.wasm.Type, - function_body: *std.ArrayList(u8), -) !void { - const gpa = wasm.base.comp.gpa; - const loc = wasm.globals.get(symbol_name).?; - const symbol = wasm.symbolLocSymbol(loc); - if (symbol.isDead()) { - return; + return .{ .nav_obj = @enumFromInt(nav_index) }; + } else { + const nav_index = if (uav_index < wasm.uavs_exe.entries.len) + return .{ .uav_exe = @enumFromInt(uav_index) } + else + uav_index - wasm.uavs_exe.entries.len; + + return .{ .nav_exe = @enumFromInt(nav_index) }; + } + }, + }; } - const ty_index = try wasm.putOrGetFuncType(func_ty); - // create function with above type - const func_index = wasm.imported_functions_count + @as(u32, @intCast(wasm.functions.count())); - try wasm.functions.putNoClobber( - gpa, - .{ .file = .none, .index = func_index }, - .{ .func = .{ .type_index = ty_index }, .sym_index = loc.index }, - ); - symbol.index = func_index; - - // create the atom that will be output into the final binary - const atom_index = try wasm.createAtom(loc.index, .none); - const atom = wasm.getAtomPtr(atom_index); - atom.size = @intCast(function_body.items.len); - atom.code = function_body.moveToUnmanaged(); - try wasm.appendAtomAtIndex(wasm.code_section_index.unwrap().?, atom_index); -} -/// Unlike `createSyntheticFunction` this function is to be called by -/// the codegeneration backend. This will not allocate the created Atom yet. -/// Returns the index of the symbol. -pub fn createFunction( - wasm: *Wasm, - symbol_name: []const u8, - func_ty: std.wasm.Type, - function_body: *std.ArrayList(u8), - relocations: *std.ArrayList(Relocation), -) !Symbol.Index { - return wasm.zig_object.?.createFunction(wasm, symbol_name, func_ty, function_body, relocations); -} + pub fn fromNav(wasm: *const Wasm, nav_index: InternPool.Nav.Index) DataSegmentId { + const comp = wasm.base.comp; + const is_obj = comp.config.output_mode == .Obj; + return pack(wasm, if (is_obj) .{ + .nav_obj = @enumFromInt(wasm.navs_obj.getIndex(nav_index).?), + } else .{ + .nav_exe = @enumFromInt(wasm.navs_exe.getIndex(nav_index).?), + }); + } -/// If required, sets the function index in the `start` section. -fn setupStartSection(wasm: *Wasm) !void { - if (wasm.globals.get(wasm.preloaded_strings.__wasm_init_memory)) |loc| { - wasm.entry = wasm.symbolLocSymbol(loc).index; + pub fn fromObjectDataSegment(wasm: *const Wasm, object_data_segment: ObjectDataSegment.Index) DataSegmentId { + return pack(wasm, .{ .object = object_data_segment }); } -} -fn initializeTLSFunction(wasm: *Wasm) !void { - const comp = wasm.base.comp; - const gpa = comp.gpa; - const shared_memory = comp.config.shared_memory; + pub fn category(id: DataSegmentId, wasm: *const Wasm) Category { + return switch (unpack(id, wasm)) { + .__zig_error_names, + .__zig_error_name_table, + .__zig_tag_names, + .__zig_tag_name_table, + .__heap_base, + .__heap_end, + => .data, - if (!shared_memory) return; + .object => |i| { + const ptr = i.ptr(wasm); + if (ptr.flags.tls) return .tls; + if (wasm.isBss(ptr.name)) return .zero; + return .data; + }, + inline .uav_exe, .uav_obj => |i| if (i.value(wasm).code.off == .none) .zero else .data, + inline .nav_exe, .nav_obj => |i| { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const nav = ip.getNav(i.key(wasm).*); + if (nav.isThreadlocal(ip)) return .tls; + const code = i.value(wasm).code; + return if (code.off == .none) .zero else .data; + }, + }; + } - // ensure function is marked as we must emit it - wasm.symbolLocSymbol(wasm.globals.get(wasm.preloaded_strings.__wasm_init_tls).?).mark(); + pub fn isTls(id: DataSegmentId, wasm: *const Wasm) bool { + return switch (unpack(id, wasm)) { + .__zig_error_names, + .__zig_error_name_table, + .__zig_tag_names, + .__zig_tag_name_table, + .__heap_base, + .__heap_end, + => false, + + .object => |i| i.ptr(wasm).flags.tls, + .uav_exe, .uav_obj => false, + inline .nav_exe, .nav_obj => |i| { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const nav = ip.getNav(i.key(wasm).*); + return nav.isThreadlocal(ip); + }, + }; + } - var function_body = std.ArrayList(u8).init(gpa); - defer function_body.deinit(); - const writer = function_body.writer(); + pub fn isBss(id: DataSegmentId, wasm: *const Wasm) bool { + return id.category(wasm) == .zero; + } + + pub fn name(id: DataSegmentId, wasm: *const Wasm) []const u8 { + return switch (unpack(id, wasm)) { + .__zig_error_names, + .__zig_error_name_table, + .__zig_tag_names, + .__zig_tag_name_table, + .uav_exe, + .uav_obj, + .__heap_base, + .__heap_end, + => ".data", + + .object => |i| i.ptr(wasm).name.unwrap().?.slice(wasm), + inline .nav_exe, .nav_obj => |i| { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const nav = ip.getNav(i.key(wasm).*); + return nav.getLinkSection().toSlice(ip) orelse switch (category(id, wasm)) { + .tls => ".tdata", + .data => ".data", + .zero => ".bss", + }; + }, + }; + } - // locals - try writer.writeByte(0); + pub fn alignment(id: DataSegmentId, wasm: *const Wasm) Alignment { + return switch (unpack(id, wasm)) { + .__zig_error_names, .__zig_tag_names => .@"1", + .__zig_error_name_table, .__zig_tag_name_table, .__heap_base, .__heap_end => wasm.pointerAlignment(), + .object => |i| i.ptr(wasm).flags.alignment, + inline .uav_exe, .uav_obj => |i| { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const ip_index = i.key(wasm).*; + if (wasm.overaligned_uavs.get(ip_index)) |a| return a; + const ty: Zcu.Type = .fromInterned(ip.typeOf(ip_index)); + const result = ty.abiAlignment(zcu); + assert(result != .none); + return result; + }, + inline .nav_exe, .nav_obj => |i| { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const nav = ip.getNav(i.key(wasm).*); + const explicit = nav.getAlignment(); + if (explicit != .none) return explicit; + const ty: Zcu.Type = .fromInterned(nav.typeOf(ip)); + const result = ty.abiAlignment(zcu); + assert(result != .none); + return result; + }, + }; + } - // If there's a TLS segment, initialize it during runtime using the bulk-memory feature - if (wasm.data_segments.getIndex(".tdata")) |data_index| { - const segment_index = wasm.data_segments.entries.items(.value)[data_index]; - const segment = wasm.segmentPtr(segment_index); + pub fn refCount(id: DataSegmentId, wasm: *const Wasm) u32 { + return switch (unpack(id, wasm)) { + .__zig_error_names => @intCast(wasm.error_name_offs.items.len), + .__zig_error_name_table => wasm.error_name_table_ref_count, + .__zig_tag_names => @intCast(wasm.tag_name_offs.items.len), + .__zig_tag_name_table => wasm.tag_name_table_ref_count, + .object, .uav_obj, .nav_obj, .__heap_base, .__heap_end => 0, + inline .uav_exe, .nav_exe => |i| i.value(wasm).count, + }; + } - const param_local: u32 = 0; + pub fn isPassive(id: DataSegmentId, wasm: *const Wasm) bool { + const comp = wasm.base.comp; + if (comp.config.import_memory) return true; + return switch (unpack(id, wasm)) { + .__zig_error_names, + .__zig_error_name_table, + .__zig_tag_names, + .__zig_tag_name_table, + .__heap_base, + .__heap_end, + => false, + + .object => |i| i.ptr(wasm).flags.is_passive, + .uav_exe, .uav_obj, .nav_exe, .nav_obj => false, + }; + } - try writer.writeByte(std.wasm.opcode(.local_get)); - try leb.writeUleb128(writer, param_local); + pub fn isEmpty(id: DataSegmentId, wasm: *const Wasm) bool { + return switch (unpack(id, wasm)) { + .__zig_error_names, + .__zig_error_name_table, + .__zig_tag_names, + .__zig_tag_name_table, + .__heap_base, + .__heap_end, + => false, - const tls_base_loc = wasm.globals.get(wasm.preloaded_strings.__tls_base).?; - try writer.writeByte(std.wasm.opcode(.global_set)); - try leb.writeUleb128(writer, wasm.symbolLocSymbol(tls_base_loc).index); + .object => |i| i.ptr(wasm).payload.off == .none, + inline .uav_exe, .uav_obj, .nav_exe, .nav_obj => |i| i.value(wasm).code.off == .none, + }; + } - // load stack values for the bulk-memory operation - { - try writer.writeByte(std.wasm.opcode(.local_get)); - try leb.writeUleb128(writer, param_local); + pub fn size(id: DataSegmentId, wasm: *const Wasm) u32 { + return switch (unpack(id, wasm)) { + .__zig_error_names => @intCast(wasm.error_name_bytes.items.len), + .__zig_error_name_table => { + const comp = wasm.base.comp; + const zcu = comp.zcu.?; + const errors_len = wasm.error_name_offs.items.len; + const elem_size = Zcu.Type.slice_const_u8_sentinel_0.abiSize(zcu); + return @intCast(errors_len * elem_size); + }, + .__zig_tag_names => @intCast(wasm.tag_name_bytes.items.len), + .__zig_tag_name_table => { + const comp = wasm.base.comp; + const zcu = comp.zcu.?; + const table_len = wasm.tag_name_offs.items.len; + const elem_size = Zcu.Type.slice_const_u8_sentinel_0.abiSize(zcu); + return @intCast(table_len * elem_size); + }, + .__heap_base, .__heap_end => wasm.pointerSize(), + .object => |i| i.ptr(wasm).payload.len, + inline .uav_exe, .uav_obj, .nav_exe, .nav_obj => |i| i.value(wasm).code.len, + }; + } +}; - try writer.writeByte(std.wasm.opcode(.i32_const)); - try leb.writeUleb128(writer, @as(u32, 0)); //segment offset +pub const DataLoc = struct { + segment: Wasm.DataSegmentId, + offset: u32, - try writer.writeByte(std.wasm.opcode(.i32_const)); - try leb.writeUleb128(writer, @as(u32, segment.size)); //segment offset - } + pub fn fromObjectDataIndex(wasm: *const Wasm, i: Wasm.ObjectData.Index) DataLoc { + const ptr = i.ptr(wasm); + return .{ + .segment = .fromObjectDataSegment(wasm, ptr.segment), + .offset = ptr.offset, + }; + } - // perform the bulk-memory operation to initialize the data segment - try writer.writeByte(std.wasm.opcode(.misc_prefix)); - try leb.writeUleb128(writer, std.wasm.miscOpcode(.memory_init)); - // segment immediate - try leb.writeUleb128(writer, @as(u32, @intCast(data_index))); - // memory index immediate (always 0) - try leb.writeUleb128(writer, @as(u32, 0)); + pub fn fromDataImportId(wasm: *const Wasm, id: Wasm.DataImportId) DataLoc { + return switch (id.unpack(wasm)) { + .object_data_import => |i| .fromObjectDataImportIndex(wasm, i), + .zcu_import => |i| .fromZcuImport(wasm, i), + }; } - // If we have to perform any TLS relocations, call the corresponding function - // which performs all runtime TLS relocations. This is a synthetic function, - // generated by the linker. - if (wasm.globals.get(wasm.preloaded_strings.__wasm_apply_global_tls_relocs)) |loc| { - try writer.writeByte(std.wasm.opcode(.call)); - try leb.writeUleb128(writer, wasm.symbolLocSymbol(loc).index); - wasm.symbolLocSymbol(loc).mark(); + pub fn fromObjectDataImportIndex(wasm: *const Wasm, i: Wasm.ObjectDataImport.Index) DataLoc { + return i.value(wasm).resolution.dataLoc(wasm); } - try writer.writeByte(std.wasm.opcode(.end)); + pub fn fromZcuImport(wasm: *const Wasm, zcu_import: ZcuImportIndex) DataLoc { + const nav_index = zcu_import.ptr(wasm).*; + return .{ + .segment = .fromNav(wasm, nav_index), + .offset = 0, + }; + } +}; - try wasm.createSyntheticFunction( - wasm.preloaded_strings.__wasm_init_tls, - std.wasm.Type{ .params = &.{.i32}, .returns = &.{} }, - &function_body, - ); -} +/// Index into `Wasm.uavs`. +pub const UavIndex = enum(u32) { + _, +}; -fn setupImports(wasm: *Wasm) !void { - const gpa = wasm.base.comp.gpa; - log.debug("Merging imports", .{}); - for (wasm.resolved_symbols.keys()) |symbol_loc| { - const object_id = symbol_loc.file.unwrap() orelse { - // Synthetic symbols will already exist in the `import` section - continue; - }; +pub const CustomSegment = extern struct { + payload: Payload, + flags: SymbolFlags, + section_name: String, - const symbol = wasm.symbolLocSymbol(symbol_loc); - if (symbol.isDead()) continue; - if (!symbol.requiresImport()) continue; - if (symbol.name == wasm.preloaded_strings.__indirect_function_table) continue; + pub const Payload = DataPayload; +}; + +/// An index into string_bytes where a wasm expression is found. +pub const Expr = enum(u32) { + _, - log.debug("Symbol '{s}' will be imported from the host", .{wasm.stringSlice(symbol.name)}); - const import = objectImport(wasm, object_id, symbol_loc.index); + pub const end = @intFromEnum(std.wasm.Opcode.end); - // We copy the import to a new import to ensure the names contain references - // to the internal string table, rather than of the object file. - const new_imp: Import = .{ - .module_name = import.module_name, - .name = import.name, - .kind = import.kind, + pub fn slice(index: Expr, wasm: *const Wasm) [:end]const u8 { + const start_slice = wasm.string_bytes.items[@intFromEnum(index)..]; + const end_pos = Object.exprEndPos(start_slice, 0) catch |err| switch (err) { + error.InvalidInitOpcode => unreachable, }; - // TODO: De-duplicate imports when they contain the same names and type - try wasm.imports.putNoClobber(gpa, symbol_loc, new_imp); - } - - // Assign all indexes of the imports to their representing symbols - var function_index: u32 = 0; - var global_index: u32 = 0; - var table_index: u32 = 0; - var it = wasm.imports.iterator(); - while (it.next()) |entry| { - const symbol = wasm.symbolLocSymbol(entry.key_ptr.*); - const import: Import = entry.value_ptr.*; - switch (import.kind) { - .function => { - symbol.index = function_index; - function_index += 1; - }, - .global => { - symbol.index = global_index; - global_index += 1; - }, - .table => { - symbol.index = table_index; - table_index += 1; - }, - else => unreachable, - } + return start_slice[0..end_pos :end]; } - wasm.imported_functions_count = function_index; - wasm.imported_globals_count = global_index; - wasm.imported_tables_count = table_index; +}; - log.debug("Merged ({d}) functions, ({d}) globals, and ({d}) tables into import section", .{ - function_index, - global_index, - table_index, - }); -} +pub const FunctionType = extern struct { + params: ValtypeList, + returns: ValtypeList, -/// Takes the global, function and table section from each linked object file -/// and merges it into a single section for each. -fn mergeSections(wasm: *Wasm) !void { - const gpa = wasm.base.comp.gpa; + /// Index into func_types + pub const Index = enum(u32) { + _, - var removed_duplicates = std.ArrayList(SymbolLoc).init(gpa); - defer removed_duplicates.deinit(); + pub fn ptr(i: Index, wasm: *const Wasm) *FunctionType { + return &wasm.func_types.keys()[@intFromEnum(i)]; + } - for (wasm.resolved_symbols.keys()) |sym_loc| { - const object_id = sym_loc.file.unwrap() orelse { - // Synthetic symbols already live in the corresponding sections. - continue; - }; + pub fn fmt(i: Index, wasm: *const Wasm) Formatter { + return i.ptr(wasm).fmt(wasm); + } + }; - const symbol = objectSymbol(wasm, object_id, sym_loc.index); - if (symbol.isDead() or symbol.isUndefined()) { - // Skip undefined symbols as they go in the `import` section - continue; - } - - switch (symbol.tag) { - .function => { - const gop = try wasm.functions.getOrPut( - gpa, - .{ .file = sym_loc.file, .index = symbol.index }, - ); - if (gop.found_existing) { - // We found an alias to the same function, discard this symbol in favor of - // the original symbol and point the discard function to it. This ensures - // we only emit a single function, instead of duplicates. - // we favor keeping the global over a local. - const original_loc: SymbolLoc = .{ .file = gop.key_ptr.file, .index = gop.value_ptr.sym_index }; - const original_sym = wasm.symbolLocSymbol(original_loc); - if (original_sym.isLocal() and symbol.isGlobal()) { - original_sym.unmark(); - try wasm.discarded.put(gpa, original_loc, sym_loc); - try removed_duplicates.append(original_loc); - } else { - symbol.unmark(); - try wasm.discarded.putNoClobber(gpa, sym_loc, original_loc); - try removed_duplicates.append(sym_loc); - continue; + pub const format = @compileError("can't format without *Wasm reference"); + + pub fn eql(a: FunctionType, b: FunctionType) bool { + return a.params == b.params and a.returns == b.returns; + } + + pub fn fmt(ft: FunctionType, wasm: *const Wasm) Formatter { + return .{ .wasm = wasm, .ft = ft }; + } + + const Formatter = struct { + wasm: *const Wasm, + ft: FunctionType, + + pub fn format( + self: Formatter, + comptime format_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, + ) !void { + if (format_string.len != 0) std.fmt.invalidFmtError(format_string, self); + _ = options; + const params = self.ft.params.slice(self.wasm); + const returns = self.ft.returns.slice(self.wasm); + + try writer.writeByte('('); + for (params, 0..) |param, i| { + try writer.print("{s}", .{@tagName(param)}); + if (i + 1 != params.len) { + try writer.writeAll(", "); + } + } + try writer.writeAll(") -> "); + if (returns.len == 0) { + try writer.writeAll("nil"); + } else { + for (returns, 0..) |return_ty, i| { + try writer.print("{s}", .{@tagName(return_ty)}); + if (i + 1 != returns.len) { + try writer.writeAll(", "); } } - gop.value_ptr.* = .{ - .func = objectFunction(wasm, object_id, sym_loc.index), - .sym_index = sym_loc.index, - }; - symbol.index = @as(u32, @intCast(gop.index)) + wasm.imported_functions_count; - }, - .global => { - const index = symbol.index - objectImportedFunctions(wasm, object_id); - const original_global = objectGlobals(wasm, object_id)[index]; - symbol.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count; - try wasm.wasm_globals.append(gpa, original_global); - }, - .table => { - const index = symbol.index - objectImportedFunctions(wasm, object_id); - // assert it's a regular relocatable object file as `ZigObject` will never - // contain a table. - const original_table = wasm.objectById(object_id).?.tables[index]; - symbol.index = @as(u32, @intCast(wasm.tables.items.len)) + wasm.imported_tables_count; - try wasm.tables.append(gpa, original_table); - }, - .dead, .undefined => unreachable, - else => {}, + } } - } + }; +}; - // For any removed duplicates, remove them from the resolved symbols list - for (removed_duplicates.items) |sym_loc| { - assert(wasm.resolved_symbols.swapRemove(sym_loc)); - gc_log.debug("Removed duplicate for function '{s}'", .{wasm.symbolLocName(sym_loc)}); - } +/// Represents a function entry, holding the index to its type +pub const Func = extern struct { + type_index: FunctionType.Index, +}; - log.debug("Merged ({d}) functions", .{wasm.functions.count()}); - log.debug("Merged ({d}) globals", .{wasm.wasm_globals.items.len}); - log.debug("Merged ({d}) tables", .{wasm.tables.items.len}); -} +/// Type reflection is used on the field names to autopopulate each field +/// during initialization. +const PreloadedStrings = struct { + __heap_base: String, + __heap_end: String, + __indirect_function_table: String, + __linear_memory: String, + __stack_pointer: String, + __tls_align: String, + __tls_base: String, + __tls_size: String, + __wasm_apply_global_tls_relocs: String, + __wasm_call_ctors: String, + __wasm_init_memory: String, + __wasm_init_memory_flag: String, + __wasm_init_tls: String, + __zig_error_names: String, + __zig_error_name_table: String, + __zig_errors_len: String, + _initialize: String, + _start: String, + memory: String, +}; -/// Merges function types of all object files into the final -/// 'types' section, while assigning the type index to the representing -/// section (import, export, function). -fn mergeTypes(wasm: *Wasm) !void { - const gpa = wasm.base.comp.gpa; - // A map to track which functions have already had their - // type inserted. If we do this for the same function multiple times, - // it will be overwritten with the incorrect type. - var dirty = std.AutoHashMap(u32, void).init(gpa); - try dirty.ensureUnusedCapacity(@as(u32, @intCast(wasm.functions.count()))); - defer dirty.deinit(); - - for (wasm.resolved_symbols.keys()) |sym_loc| { - const object_id = sym_loc.file.unwrap() orelse { - // zig code-generated symbols are already present in final type section - continue; - }; +/// Index into string_bytes +pub const String = enum(u32) { + _, + + const Table = std.HashMapUnmanaged(String, void, TableContext, std.hash_map.default_max_load_percentage); + + const TableContext = struct { + bytes: []const u8, - const symbol = objectSymbol(wasm, object_id, sym_loc.index); - if (symbol.tag != .function or symbol.isDead()) { - // Only functions have types. Only retrieve the type of referenced functions. - continue; + pub fn eql(_: @This(), a: String, b: String) bool { + return a == b; } - if (symbol.isUndefined()) { - log.debug("Adding type from extern function '{s}'", .{wasm.symbolLocName(sym_loc)}); - const import: *Import = wasm.imports.getPtr(sym_loc) orelse continue; - const original_type = objectFuncTypes(wasm, object_id)[import.kind.function]; - import.kind.function = try wasm.putOrGetFuncType(original_type); - } else if (!dirty.contains(symbol.index)) { - log.debug("Adding type from function '{s}'", .{wasm.symbolLocName(sym_loc)}); - const func = &wasm.functions.values()[symbol.index - wasm.imported_functions_count].func; - func.type_index = try wasm.putOrGetFuncType(objectFuncTypes(wasm, object_id)[func.type_index]); - dirty.putAssumeCapacityNoClobber(symbol.index, {}); + pub fn hash(ctx: @This(), key: String) u64 { + return std.hash_map.hashString(mem.sliceTo(ctx.bytes[@intFromEnum(key)..], 0)); } - } - log.debug("Completed merging and deduplicating types. Total count: ({d})", .{wasm.func_types.items.len}); -} + }; -fn checkExportNames(wasm: *Wasm) !void { - const force_exp_names = wasm.export_symbol_names; - const diags = &wasm.base.comp.link_diags; - if (force_exp_names.len > 0) { - var failed_exports = false; - - for (force_exp_names) |exp_name| { - const exp_name_interned = try wasm.internString(exp_name); - const loc = wasm.globals.get(exp_name_interned) orelse { - var err = try diags.addErrorWithNotes(0); - try err.addMsg("could not export '{s}', symbol not found", .{exp_name}); - failed_exports = true; - continue; - }; + const TableIndexAdapter = struct { + bytes: []const u8, - const symbol = wasm.symbolLocSymbol(loc); - symbol.setFlag(.WASM_SYM_EXPORTED); + pub fn eql(ctx: @This(), a: []const u8, b: String) bool { + return mem.eql(u8, a, mem.sliceTo(ctx.bytes[@intFromEnum(b)..], 0)); } - if (failed_exports) { - return error.FlushFailure; + pub fn hash(_: @This(), adapted_key: []const u8) u64 { + assert(mem.indexOfScalar(u8, adapted_key, 0) == null); + return std.hash_map.hashString(adapted_key); } + }; + + pub fn slice(index: String, wasm: *const Wasm) [:0]const u8 { + const start_slice = wasm.string_bytes.items[@intFromEnum(index)..]; + return start_slice[0..mem.indexOfScalar(u8, start_slice, 0).? :0]; } -} -fn setupExports(wasm: *Wasm) !void { - const comp = wasm.base.comp; - const gpa = comp.gpa; - if (comp.config.output_mode == .Obj) return; - log.debug("Building exports from symbols", .{}); - - for (wasm.resolved_symbols.keys()) |sym_loc| { - const symbol = wasm.symbolLocSymbol(sym_loc); - if (!symbol.isExported(comp.config.rdynamic)) continue; - - const exp: Export = if (symbol.tag == .data) exp: { - const global_index = @as(u32, @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len)); - try wasm.wasm_globals.append(gpa, .{ - .global_type = .{ .valtype = .i32, .mutable = false }, - .init = .{ .i32_const = @as(i32, @intCast(symbol.virtual_address)) }, - }); - break :exp .{ - .name = symbol.name, - .kind = .global, - .index = global_index, - }; - } else .{ - .name = symbol.name, - .kind = symbol.tag.externalType(), - .index = symbol.index, - }; - log.debug("Exporting symbol '{s}' as '{s}' at index: ({d})", .{ - wasm.stringSlice(symbol.name), - wasm.stringSlice(exp.name), - exp.index, - }); - try wasm.exports.append(gpa, exp); + pub fn toOptional(i: String) OptionalString { + const result: OptionalString = @enumFromInt(@intFromEnum(i)); + assert(result != .none); + return result; } +}; - log.debug("Completed building exports. Total count: ({d})", .{wasm.exports.items.len}); -} +pub const OptionalString = enum(u32) { + none = std.math.maxInt(u32), + _, -fn setupStart(wasm: *Wasm) !void { - const comp = wasm.base.comp; - const diags = &wasm.base.comp.link_diags; - // do not export entry point if user set none or no default was set. - const entry_name = wasm.entry_name.unwrap() orelse return; - - const symbol_loc = wasm.globals.get(entry_name) orelse { - var err = try diags.addErrorWithNotes(1); - try err.addMsg("entry symbol '{s}' missing", .{wasm.stringSlice(entry_name)}); - try err.addNote("'-fno-entry' suppresses this error", .{}); - return error.LinkFailure; - }; + pub fn unwrap(i: OptionalString) ?String { + if (i == .none) return null; + return @enumFromInt(@intFromEnum(i)); + } + + pub fn slice(index: OptionalString, wasm: *const Wasm) ?[:0]const u8 { + return (index.unwrap() orelse return null).slice(wasm); + } +}; - const symbol = wasm.symbolLocSymbol(symbol_loc); - if (symbol.tag != .function) - return diags.fail("entry symbol '{s}' is not a function", .{wasm.stringSlice(entry_name)}); +/// Stored identically to `String`. The bytes are reinterpreted as +/// `std.wasm.Valtype` elements. +pub const ValtypeList = enum(u32) { + _, - // Ensure the symbol is exported so host environment can access it - if (comp.config.output_mode != .Obj) { - symbol.setFlag(.WASM_SYM_EXPORTED); + pub fn fromString(s: String) ValtypeList { + return @enumFromInt(@intFromEnum(s)); } -} -/// Sets up the memory section of the wasm module, as well as the stack. -fn setupMemory(wasm: *Wasm) !void { - const comp = wasm.base.comp; - const diags = &wasm.base.comp.link_diags; - const shared_memory = comp.config.shared_memory; - log.debug("Setting up memory layout", .{}); - const page_size = std.wasm.page_size; // 64kb - const stack_alignment: Alignment = .@"16"; // wasm's stack alignment as specified by tool-convention - const heap_alignment: Alignment = .@"16"; // wasm's heap alignment as specified by tool-convention - - // Always place the stack at the start by default - // unless the user specified the global-base flag - var place_stack_first = true; - var memory_ptr: u64 = if (wasm.global_base) |base| blk: { - place_stack_first = false; - break :blk base; - } else 0; + pub fn slice(index: ValtypeList, wasm: *const Wasm) []const std.wasm.Valtype { + return @ptrCast(String.slice(@enumFromInt(@intFromEnum(index)), wasm)); + } +}; - const is_obj = comp.config.output_mode == .Obj; +/// Index into `Wasm.imports`. +pub const ZcuImportIndex = enum(u32) { + _, - const stack_ptr = if (wasm.globals.get(wasm.preloaded_strings.__stack_pointer)) |loc| index: { - const sym = wasm.symbolLocSymbol(loc); - break :index sym.index - wasm.imported_globals_count; - } else null; + pub fn ptr(index: ZcuImportIndex, wasm: *const Wasm) *InternPool.Nav.Index { + return &wasm.imports.keys()[@intFromEnum(index)]; + } - if (place_stack_first and !is_obj) { - memory_ptr = stack_alignment.forward(memory_ptr); - memory_ptr += wasm.base.stack_size; - // We always put the stack pointer global at index 0 - if (stack_ptr) |index| { - wasm.wasm_globals.items[index].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr)))); - } + pub fn importName(index: ZcuImportIndex, wasm: *const Wasm) String { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const nav_index = index.ptr(wasm).*; + const ext = ip.getNav(nav_index).getResolvedExtern(ip).?; + const name_slice = ext.name.toSlice(ip); + return wasm.getExistingString(name_slice).?; } - var offset: u32 = @as(u32, @intCast(memory_ptr)); - var data_seg_it = wasm.data_segments.iterator(); - while (data_seg_it.next()) |entry| { - const segment = wasm.segmentPtr(entry.value_ptr.*); - memory_ptr = segment.alignment.forward(memory_ptr); + pub fn moduleName(index: ZcuImportIndex, wasm: *const Wasm) OptionalString { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const nav_index = index.ptr(wasm).*; + const ext = ip.getNav(nav_index).getResolvedExtern(ip).?; + const lib_name = ext.lib_name.toSlice(ip) orelse return .none; + return wasm.getExistingString(lib_name).?.toOptional(); + } - // set TLS-related symbols - if (mem.eql(u8, entry.key_ptr.*, ".tdata")) { - if (wasm.globals.get(wasm.preloaded_strings.__tls_size)) |loc| { - const sym = wasm.symbolLocSymbol(loc); - wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.size); - } - if (wasm.globals.get(wasm.preloaded_strings.__tls_align)) |loc| { - const sym = wasm.symbolLocSymbol(loc); - wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment.toByteUnits().?); - } - if (wasm.globals.get(wasm.preloaded_strings.__tls_base)) |loc| { - const sym = wasm.symbolLocSymbol(loc); - wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = if (shared_memory) - @as(i32, 0) - else - @as(i32, @intCast(memory_ptr)); - } - } + pub fn functionType(index: ZcuImportIndex, wasm: *Wasm) FunctionType.Index { + const comp = wasm.base.comp; + const target = &comp.root_mod.resolved_target.result; + const zcu = comp.zcu.?; + const ip = &zcu.intern_pool; + const nav_index = index.ptr(wasm).*; + const ext = ip.getNav(nav_index).getResolvedExtern(ip).?; + const fn_info = zcu.typeToFunc(.fromInterned(ext.ty)).?; + return getExistingFunctionType(wasm, fn_info.cc, fn_info.param_types.get(ip), .fromInterned(fn_info.return_type), target).?; + } - memory_ptr += segment.size; - segment.offset = offset; - offset += segment.size; + pub fn globalType(index: ZcuImportIndex, wasm: *const Wasm) ObjectGlobal.Type { + _ = index; + _ = wasm; + unreachable; // Zig has no way to create Wasm globals yet. } +}; - // create the memory init flag which is used by the init memory function - if (shared_memory and wasm.hasPassiveInitializationSegments()) { - // align to pointer size - memory_ptr = mem.alignForward(u64, memory_ptr, 4); - const loc = try wasm.createSyntheticSymbol(wasm.preloaded_strings.__wasm_init_memory_flag, .data); - const sym = wasm.symbolLocSymbol(loc); - sym.mark(); - sym.virtual_address = @as(u32, @intCast(memory_ptr)); - memory_ptr += 4; +/// 0. Index into `Wasm.object_function_imports`. +/// 1. Index into `Wasm.imports`. +pub const FunctionImportId = enum(u32) { + _, + + pub const Unpacked = union(enum) { + object_function_import: FunctionImport.Index, + zcu_import: ZcuImportIndex, + }; + + pub fn pack(unpacked: Unpacked, wasm: *const Wasm) FunctionImportId { + return switch (unpacked) { + .object_function_import => |i| @enumFromInt(@intFromEnum(i)), + .zcu_import => |i| @enumFromInt(@intFromEnum(i) + wasm.object_function_imports.entries.len), + }; } - if (!place_stack_first and !is_obj) { - memory_ptr = stack_alignment.forward(memory_ptr); - memory_ptr += wasm.base.stack_size; - if (stack_ptr) |index| { - wasm.wasm_globals.items[index].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr)))); - } + pub fn unpack(id: FunctionImportId, wasm: *const Wasm) Unpacked { + const i = @intFromEnum(id); + if (i < wasm.object_function_imports.entries.len) return .{ .object_function_import = @enumFromInt(i) }; + const zcu_import_i = i - wasm.object_function_imports.entries.len; + return .{ .zcu_import = @enumFromInt(zcu_import_i) }; } - // One of the linked object files has a reference to the __heap_base symbol. - // We must set its virtual address so it can be used in relocations. - if (wasm.globals.get(wasm.preloaded_strings.__heap_base)) |loc| { - const symbol = wasm.symbolLocSymbol(loc); - symbol.virtual_address = @intCast(heap_alignment.forward(memory_ptr)); + pub fn fromObject(function_import_index: FunctionImport.Index, wasm: *const Wasm) FunctionImportId { + return pack(.{ .object_function_import = function_import_index }, wasm); } - // Setup the max amount of pages - // For now we only support wasm32 by setting the maximum allowed memory size 2^32-1 - const max_memory_allowed: u64 = (1 << 32) - 1; + pub fn fromZcuImport(zcu_import: ZcuImportIndex, wasm: *const Wasm) FunctionImportId { + return pack(.{ .zcu_import = zcu_import }, wasm); + } - if (wasm.initial_memory) |initial_memory| { - if (!std.mem.isAlignedGeneric(u64, initial_memory, page_size)) { - var err = try diags.addErrorWithNotes(0); - try err.addMsg("Initial memory must be {d}-byte aligned", .{page_size}); - } - if (memory_ptr > initial_memory) { - var err = try diags.addErrorWithNotes(0); - try err.addMsg("Initial memory too small, must be at least {d} bytes", .{memory_ptr}); - } - if (initial_memory > max_memory_allowed) { - var err = try diags.addErrorWithNotes(0); - try err.addMsg("Initial memory exceeds maximum memory {d}", .{max_memory_allowed}); + /// This function is allowed O(N) lookup because it is only called during + /// diagnostic generation. + pub fn sourceLocation(id: FunctionImportId, wasm: *const Wasm) SourceLocation { + switch (id.unpack(wasm)) { + .object_function_import => |obj_func_index| { + // TODO binary search + for (wasm.objects.items, 0..) |o, i| { + if (o.function_imports.off <= @intFromEnum(obj_func_index) and + o.function_imports.off + o.function_imports.len > @intFromEnum(obj_func_index)) + { + return .pack(.{ .object_index = @enumFromInt(i) }, wasm); + } + } else unreachable; + }, + .zcu_import => return .zig_object_nofile, // TODO give a better source location } - memory_ptr = initial_memory; } - memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size); - // In case we do not import memory, but define it ourselves, - // set the minimum amount of pages on the memory section. - wasm.memories.limits.min = @as(u32, @intCast(memory_ptr / page_size)); - log.debug("Total memory pages: {d}", .{wasm.memories.limits.min}); - if (wasm.globals.get(wasm.preloaded_strings.__heap_end)) |loc| { - const symbol = wasm.symbolLocSymbol(loc); - symbol.virtual_address = @as(u32, @intCast(memory_ptr)); + pub fn importName(id: FunctionImportId, wasm: *const Wasm) String { + return switch (unpack(id, wasm)) { + inline .object_function_import, .zcu_import => |i| i.importName(wasm), + }; } - if (wasm.max_memory) |max_memory| { - if (!std.mem.isAlignedGeneric(u64, max_memory, page_size)) { - var err = try diags.addErrorWithNotes(0); - try err.addMsg("Maximum memory must be {d}-byte aligned", .{page_size}); - } - if (memory_ptr > max_memory) { - var err = try diags.addErrorWithNotes(0); - try err.addMsg("Maximum memory too small, must be at least {d} bytes", .{memory_ptr}); - } - if (max_memory > max_memory_allowed) { - var err = try diags.addErrorWithNotes(0); - try err.addMsg("Maximum memory exceeds maximum amount {d}", .{max_memory_allowed}); - } - wasm.memories.limits.max = @as(u32, @intCast(max_memory / page_size)); - wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_HAS_MAX); - if (shared_memory) { - wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_IS_SHARED); + pub fn moduleName(id: FunctionImportId, wasm: *const Wasm) OptionalString { + return switch (unpack(id, wasm)) { + inline .object_function_import, .zcu_import => |i| i.moduleName(wasm), + }; + } + + pub fn functionType(id: FunctionImportId, wasm: *Wasm) FunctionType.Index { + return switch (unpack(id, wasm)) { + inline .object_function_import, .zcu_import => |i| i.functionType(wasm), + }; + } + + /// Asserts not emitting an object, and `Wasm.import_symbols` is false. + pub fn undefinedAllowed(id: FunctionImportId, wasm: *const Wasm) bool { + assert(!wasm.import_symbols); + assert(wasm.base.comp.config.output_mode != .Obj); + return switch (unpack(id, wasm)) { + .object_function_import => |i| { + const import = i.value(wasm); + return import.flags.binding == .strong and import.module_name != .none; + }, + .zcu_import => |i| { + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const ext = ip.getNav(i.ptr(wasm).*).getResolvedExtern(ip).?; + return !ext.is_weak_linkage and ext.lib_name != .none; + }, + }; + } +}; + +/// 0. Index into `object_global_imports`. +/// 1. Index into `imports`. +pub const GlobalImportId = enum(u32) { + _, + + pub const Unpacked = union(enum) { + object_global_import: GlobalImport.Index, + zcu_import: ZcuImportIndex, + }; + + pub fn pack(unpacked: Unpacked, wasm: *const Wasm) GlobalImportId { + return switch (unpacked) { + .object_global_import => |i| @enumFromInt(@intFromEnum(i)), + .zcu_import => |i| @enumFromInt(@intFromEnum(i) + wasm.object_global_imports.entries.len), + }; + } + + pub fn unpack(id: GlobalImportId, wasm: *const Wasm) Unpacked { + const i = @intFromEnum(id); + if (i < wasm.object_global_imports.entries.len) return .{ .object_global_import = @enumFromInt(i) }; + const zcu_import_i = i - wasm.object_global_imports.entries.len; + return .{ .zcu_import = @enumFromInt(zcu_import_i) }; + } + + pub fn fromObject(object_global_import: GlobalImport.Index, wasm: *const Wasm) GlobalImportId { + return pack(.{ .object_global_import = object_global_import }, wasm); + } + + /// This function is allowed O(N) lookup because it is only called during + /// diagnostic generation. + pub fn sourceLocation(id: GlobalImportId, wasm: *const Wasm) SourceLocation { + switch (id.unpack(wasm)) { + .object_global_import => |obj_global_index| { + // TODO binary search + for (wasm.objects.items, 0..) |o, i| { + if (o.global_imports.off <= @intFromEnum(obj_global_index) and + o.global_imports.off + o.global_imports.len > @intFromEnum(obj_global_index)) + { + return .pack(.{ .object_index = @enumFromInt(i) }, wasm); + } + } else unreachable; + }, + .zcu_import => return .zig_object_nofile, // TODO give a better source location } - log.debug("Maximum memory pages: {?d}", .{wasm.memories.limits.max}); } -} -/// From a given object's index and the index of the segment, returns the corresponding -/// index of the segment within the final data section. When the segment does not yet -/// exist, a new one will be initialized and appended. The new index will be returned in that case. -pub fn getMatchingSegment(wasm: *Wasm, object_id: ObjectId, symbol_index: Symbol.Index) !Segment.Index { - const comp = wasm.base.comp; - const gpa = comp.gpa; - const diags = &wasm.base.comp.link_diags; - const symbol = objectSymbols(wasm, object_id)[@intFromEnum(symbol_index)]; - const index: Segment.Index = @enumFromInt(wasm.segments.items.len); - const shared_memory = comp.config.shared_memory; + pub fn importName(id: GlobalImportId, wasm: *const Wasm) String { + return switch (unpack(id, wasm)) { + inline .object_global_import, .zcu_import => |i| i.importName(wasm), + }; + } - switch (symbol.tag) { - .data => { - const segment_info = objectSegmentInfo(wasm, object_id)[symbol.index]; - const merge_segment = comp.config.output_mode != .Obj; - const result = try wasm.data_segments.getOrPut(gpa, segment_info.outputName(merge_segment)); - if (!result.found_existing) { - result.value_ptr.* = index; - var flags: u32 = 0; - if (shared_memory) { - flags |= @intFromEnum(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE); - } - try wasm.segments.append(gpa, .{ - .alignment = .@"1", - .size = 0, - .offset = 0, - .flags = flags, - }); - try wasm.segment_info.putNoClobber(gpa, index, .{ - .name = try gpa.dupe(u8, segment_info.name), - .alignment = segment_info.alignment, - .flags = segment_info.flags, - }); - return index; - } else return result.value_ptr.*; - }, - .function => return wasm.code_section_index.unwrap() orelse blk: { - wasm.code_section_index = index.toOptional(); - try wasm.appendDummySegment(); - break :blk index; - }, - .section => { - const section_name = wasm.objectSymbol(object_id, symbol_index).name; - - inline for (@typeInfo(CustomSections).@"struct".fields) |field| { - if (@field(wasm.custom_sections, field.name).name == section_name) { - const field_ptr = &@field(wasm.custom_sections, field.name).index; - return field_ptr.unwrap() orelse { - field_ptr.* = index.toOptional(); - try wasm.appendDummySegment(); - return index; - }; - } - } else { - return diags.failParse(objectPath(wasm, object_id), "unknown section: {s}", .{ - wasm.stringSlice(section_name), - }); - } + pub fn moduleName(id: GlobalImportId, wasm: *const Wasm) OptionalString { + return switch (unpack(id, wasm)) { + inline .object_global_import, .zcu_import => |i| i.moduleName(wasm), + }; + } + + pub fn globalType(id: GlobalImportId, wasm: *Wasm) ObjectGlobal.Type { + return switch (unpack(id, wasm)) { + inline .object_global_import, .zcu_import => |i| i.globalType(wasm), + }; + } +}; + +/// 0. Index into `Wasm.object_data_imports`. +/// 1. Index into `Wasm.imports`. +pub const DataImportId = enum(u32) { + _, + + pub const Unpacked = union(enum) { + object_data_import: ObjectDataImport.Index, + zcu_import: ZcuImportIndex, + }; + + pub fn pack(unpacked: Unpacked, wasm: *const Wasm) DataImportId { + return switch (unpacked) { + .object_data_import => |i| @enumFromInt(@intFromEnum(i)), + .zcu_import => |i| @enumFromInt(@intFromEnum(i) + wasm.object_data_imports.entries.len), + }; + } + + pub fn unpack(id: DataImportId, wasm: *const Wasm) Unpacked { + const i = @intFromEnum(id); + if (i < wasm.object_data_imports.entries.len) return .{ .object_data_import = @enumFromInt(i) }; + const zcu_import_i = i - wasm.object_data_imports.entries.len; + return .{ .zcu_import = @enumFromInt(zcu_import_i) }; + } + + pub fn fromZcuImport(zcu_import: ZcuImportIndex, wasm: *const Wasm) DataImportId { + return pack(.{ .zcu_import = zcu_import }, wasm); + } + + pub fn fromObject(object_data_import: ObjectDataImport.Index, wasm: *const Wasm) DataImportId { + return pack(.{ .object_data_import = object_data_import }, wasm); + } + + pub fn sourceLocation(id: DataImportId, wasm: *const Wasm) SourceLocation { + switch (id.unpack(wasm)) { + .object_data_import => |obj_data_index| { + // TODO binary search + for (wasm.objects.items, 0..) |o, i| { + if (o.data_imports.off <= @intFromEnum(obj_data_index) and + o.data_imports.off + o.data_imports.len > @intFromEnum(obj_data_index)) + { + return .pack(.{ .object_index = @enumFromInt(i) }, wasm); + } + } else unreachable; + }, + .zcu_import => return .zig_object_nofile, // TODO give a better source location + } + } +}; + +/// Index into `Wasm.symbol_table`. +pub const SymbolTableIndex = enum(u32) { + _, + + pub fn key(i: @This(), wasm: *const Wasm) *String { + return &wasm.symbol_table.keys()[@intFromEnum(i)]; + } +}; + +pub const OutReloc = struct { + tag: Object.RelocationType, + offset: u32, + pointee: Pointee, + addend: i32, + + pub const Pointee = union { + symbol_index: SymbolTableIndex, + type_index: FunctionType.Index, + }; + + pub const Slice = extern struct { + /// Index into `out_relocs`. + off: u32, + len: u32, + + pub fn slice(s: Slice, wasm: *const Wasm) []OutReloc { + return wasm.relocations.items[s.off..][0..s.len]; + } + }; +}; + +pub const ObjectRelocation = struct { + tag: Tag, + /// Offset of the value to rewrite relative to the relevant section's contents. + /// When `offset` is zero, its position is immediately after the id and size of the section. + offset: u32, + pointee: Pointee, + /// Populated only for `memory_addr_*`, `function_offset_i32` and `section_offset_i32`. + addend: i32, + + pub const Tag = enum(u8) { + // These use `Pointee.function`. + function_index_i32, + function_index_leb, + function_offset_i32, + function_offset_i64, + table_index_i32, + table_index_i64, + table_index_rel_sleb, + table_index_rel_sleb64, + table_index_sleb, + table_index_sleb64, + // These use `Pointee.symbol_name`. + function_import_index_i32, + function_import_index_leb, + function_import_offset_i32, + function_import_offset_i64, + table_import_index_i32, + table_import_index_i64, + table_import_index_rel_sleb, + table_import_index_rel_sleb64, + table_import_index_sleb, + table_import_index_sleb64, + // These use `Pointee.global`. + global_index_i32, + global_index_leb, + // These use `Pointee.symbol_name`. + global_import_index_i32, + global_import_index_leb, + // These use `Pointee.data`. + memory_addr_i32, + memory_addr_i64, + memory_addr_leb, + memory_addr_leb64, + memory_addr_locrel_i32, + memory_addr_rel_sleb, + memory_addr_rel_sleb64, + memory_addr_sleb, + memory_addr_sleb64, + memory_addr_tls_sleb, + memory_addr_tls_sleb64, + // These use `Pointee.symbol_name`. + memory_addr_import_i32, + memory_addr_import_i64, + memory_addr_import_leb, + memory_addr_import_leb64, + memory_addr_import_locrel_i32, + memory_addr_import_rel_sleb, + memory_addr_import_rel_sleb64, + memory_addr_import_sleb, + memory_addr_import_sleb64, + memory_addr_import_tls_sleb, + memory_addr_import_tls_sleb64, + /// Uses `Pointee.section`. + section_offset_i32, + /// Uses `Pointee.table`. + table_number_leb, + /// Uses `Pointee.symbol_name`. + table_import_number_leb, + /// Uses `Pointee.type_index`. + type_index_leb, + + pub fn fromType(t: Object.RelocationType) Tag { + return switch (t) { + .event_index_leb => unreachable, + .function_index_i32 => .function_index_i32, + .function_index_leb => .function_index_leb, + .function_offset_i32 => .function_offset_i32, + .function_offset_i64 => .function_offset_i64, + .global_index_i32 => .global_index_i32, + .global_index_leb => .global_index_leb, + .memory_addr_i32 => .memory_addr_i32, + .memory_addr_i64 => .memory_addr_i64, + .memory_addr_leb => .memory_addr_leb, + .memory_addr_leb64 => .memory_addr_leb64, + .memory_addr_locrel_i32 => .memory_addr_locrel_i32, + .memory_addr_rel_sleb => .memory_addr_rel_sleb, + .memory_addr_rel_sleb64 => .memory_addr_rel_sleb64, + .memory_addr_sleb => .memory_addr_sleb, + .memory_addr_sleb64 => .memory_addr_sleb64, + .memory_addr_tls_sleb => .memory_addr_tls_sleb, + .memory_addr_tls_sleb64 => .memory_addr_tls_sleb64, + .section_offset_i32 => .section_offset_i32, + .table_index_i32 => .table_index_i32, + .table_index_i64 => .table_index_i64, + .table_index_rel_sleb => .table_index_rel_sleb, + .table_index_rel_sleb64 => .table_index_rel_sleb64, + .table_index_sleb => .table_index_sleb, + .table_index_sleb64 => .table_index_sleb64, + .table_number_leb => .table_number_leb, + .type_index_leb => .type_index_leb, + }; + } + + pub fn fromTypeImport(t: Object.RelocationType) Tag { + return switch (t) { + .event_index_leb => unreachable, + .function_index_i32 => .function_import_index_i32, + .function_index_leb => .function_import_index_leb, + .function_offset_i32 => .function_import_offset_i32, + .function_offset_i64 => .function_import_offset_i64, + .global_index_i32 => .global_import_index_i32, + .global_index_leb => .global_import_index_leb, + .memory_addr_i32 => .memory_addr_import_i32, + .memory_addr_i64 => .memory_addr_import_i64, + .memory_addr_leb => .memory_addr_import_leb, + .memory_addr_leb64 => .memory_addr_import_leb64, + .memory_addr_locrel_i32 => .memory_addr_import_locrel_i32, + .memory_addr_rel_sleb => .memory_addr_import_rel_sleb, + .memory_addr_rel_sleb64 => .memory_addr_import_rel_sleb64, + .memory_addr_sleb => .memory_addr_import_sleb, + .memory_addr_sleb64 => .memory_addr_import_sleb64, + .memory_addr_tls_sleb => .memory_addr_import_tls_sleb, + .memory_addr_tls_sleb64 => .memory_addr_import_tls_sleb64, + .section_offset_i32 => unreachable, + .table_index_i32 => .table_import_index_i32, + .table_index_i64 => .table_import_index_i64, + .table_index_rel_sleb => .table_import_index_rel_sleb, + .table_index_rel_sleb64 => .table_import_index_rel_sleb64, + .table_index_sleb => .table_import_index_sleb, + .table_index_sleb64 => .table_import_index_sleb64, + .table_number_leb => .table_import_number_leb, + .type_index_leb => unreachable, + }; + } + }; + + pub const Pointee = union { + symbol_name: String, + data: ObjectData.Index, + type_index: FunctionType.Index, + section: ObjectSectionIndex, + function: ObjectFunctionIndex, + global: ObjectGlobalIndex, + table: ObjectTableIndex, + }; + + pub const Slice = extern struct { + /// Index into `relocations`. + off: u32, + len: u32, + + const empty: Slice = .{ .off = 0, .len = 0 }; + + pub fn tags(s: Slice, wasm: *const Wasm) []const ObjectRelocation.Tag { + return wasm.object_relocations.items(.tag)[s.off..][0..s.len]; + } + + pub fn offsets(s: Slice, wasm: *const Wasm) []const u32 { + return wasm.object_relocations.items(.offset)[s.off..][0..s.len]; + } + + pub fn pointees(s: Slice, wasm: *const Wasm) []const Pointee { + return wasm.object_relocations.items(.pointee)[s.off..][0..s.len]; + } + + pub fn addends(s: Slice, wasm: *const Wasm) []const i32 { + return wasm.object_relocations.items(.addend)[s.off..][0..s.len]; + } + }; + + pub const IterableSlice = struct { + slice: Slice, + /// Offset at which point to stop iterating. + end: u32, + + const empty: IterableSlice = .{ .slice = .empty, .end = 0 }; + + fn init(relocs: Slice, offset: u32, size: u32, wasm: *const Wasm) IterableSlice { + const offsets = relocs.offsets(wasm); + const start = std.sort.lowerBound(u32, offsets, offset, order); + return .{ + .slice = .{ + .off = @intCast(relocs.off + start), + .len = @intCast(relocs.len - start), + }, + .end = offset + size, + }; + } + + fn order(lhs: u32, rhs: u32) std.math.Order { + return std.math.order(lhs, rhs); + } + }; +}; + +pub const MemoryImport = extern struct { + module_name: String, + limits_min: u32, + limits_max: u32, + source_location: SourceLocation, + limits_has_max: bool, + limits_is_shared: bool, + padding: [2]u8 = .{ 0, 0 }, + + pub fn limits(mi: *const MemoryImport) std.wasm.Limits { + return .{ + .flags = .{ + .has_max = mi.limits_has_max, + .is_shared = mi.limits_is_shared, + }, + .min = mi.limits_min, + .max = mi.limits_max, + }; + } +}; + +pub const Alignment = InternPool.Alignment; + +pub const InitFunc = extern struct { + priority: u32, + function_index: ObjectFunctionIndex, + + pub fn lessThan(ctx: void, lhs: InitFunc, rhs: InitFunc) bool { + _ = ctx; + if (lhs.priority == rhs.priority) { + return @intFromEnum(lhs.function_index) < @intFromEnum(rhs.function_index); + } else { + return lhs.priority < rhs.priority; + } + } +}; + +pub const Comdat = struct { + name: String, + /// Must be zero, no flags are currently defined by the tool-convention. + flags: u32, + symbols: Comdat.Symbol.Slice, + + pub const Symbol = struct { + kind: Comdat.Symbol.Type, + /// Index of the data segment/function/global/event/table within a WASM module. + /// The object must not be an import. + index: u32, + + pub const Slice = struct { + /// Index into Wasm object_comdat_symbols + off: u32, + len: u32, + }; + + pub const Type = enum(u8) { + data = 0, + function = 1, + global = 2, + event = 3, + table = 4, + section = 5, + }; + }; +}; + +/// Stored as a u8 so it can reuse the string table mechanism. +pub const Feature = packed struct(u8) { + prefix: Prefix, + /// Type of the feature, must be unique in the sequence of features. + tag: Tag, + + pub const sentinel: Feature = @bitCast(@as(u8, 0)); + + /// Stored identically to `String`. The bytes are reinterpreted as `Feature` + /// elements. Elements must be sorted before string-interning. + pub const Set = enum(u32) { + _, + + pub fn fromString(s: String) Set { + return @enumFromInt(@intFromEnum(s)); + } + + pub fn string(s: Set) String { + return @enumFromInt(@intFromEnum(s)); + } + + pub fn slice(s: Set, wasm: *const Wasm) [:sentinel]const Feature { + return @ptrCast(string(s).slice(wasm)); + } + }; + + /// Unlike `std.Target.wasm.Feature` this also contains linker-features such as shared-mem. + /// Additionally the name uses convention matching the wasm binary format. + pub const Tag = enum(u6) { + atomics, + @"bulk-memory", + @"exception-handling", + @"extended-const", + @"half-precision", + multimemory, + multivalue, + @"mutable-globals", + @"nontrapping-fptoint", + @"reference-types", + @"relaxed-simd", + @"sign-ext", + simd128, + @"tail-call", + @"shared-mem", + + pub fn fromCpuFeature(feature: std.Target.wasm.Feature) Tag { + return @enumFromInt(@intFromEnum(feature)); + } + + pub fn toCpuFeature(tag: Tag) ?std.Target.wasm.Feature { + return if (@intFromEnum(tag) < @typeInfo(std.Target.wasm.Feature).@"enum".fields.len) + @enumFromInt(@intFromEnum(tag)) + else + null; + } + + pub const format = @compileError("use @tagName instead"); + }; + + /// Provides information about the usage of the feature. + pub const Prefix = enum(u2) { + /// Reserved so that a 0-byte Feature is invalid and therefore can be a sentinel. + invalid, + /// Object uses this feature, and the link fails if feature is not in + /// the allowed set. + @"+", + /// Object does not use this feature, and the link fails if this + /// feature is in the allowed set. + @"-", + /// Object uses this feature, and the link fails if this feature is not + /// in the allowed set, or if any object does not use this feature. + @"=", + }; + + pub fn format(feature: Feature, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void { + _ = opt; + _ = fmt; + try writer.print("{s} {s}", .{ @tagName(feature.prefix), @tagName(feature.tag) }); + } + + pub fn lessThan(_: void, a: Feature, b: Feature) bool { + assert(a != b); + const a_int: u8 = @bitCast(a); + const b_int: u8 = @bitCast(b); + return a_int < b_int; + } +}; + +pub fn open( + arena: Allocator, + comp: *Compilation, + emit: Path, + options: link.File.OpenOptions, +) !*Wasm { + // TODO: restore saved linker state, don't truncate the file, and + // participate in incremental compilation. + return createEmpty(arena, comp, emit, options); +} + +pub fn createEmpty( + arena: Allocator, + comp: *Compilation, + emit: Path, + options: link.File.OpenOptions, +) !*Wasm { + const target = comp.root_mod.resolved_target.result; + assert(target.ofmt == .wasm); + + const use_lld = build_options.have_llvm and comp.config.use_lld; + const use_llvm = comp.config.use_llvm; + const output_mode = comp.config.output_mode; + const wasi_exec_model = comp.config.wasi_exec_model; + + // If using LLD to link, this code should produce an object file so that it + // can be passed to LLD. + // If using LLVM to generate the object file for the zig compilation unit, + // we need a place to put the object file so that it can be subsequently + // handled. + const zcu_object_sub_path = if (!use_lld and !use_llvm) + null + else + try std.fmt.allocPrint(arena, "{s}.o", .{emit.sub_path}); + + const wasm = try arena.create(Wasm); + wasm.* = .{ + .base = .{ + .tag = .wasm, + .comp = comp, + .emit = emit, + .zcu_object_sub_path = zcu_object_sub_path, + // Garbage collection is so crucial to WebAssembly that we design + // the linker around the assumption that it will be on in the vast + // majority of cases, and therefore express "no garbage collection" + // in terms of setting the no_strip and must_link flags on all + // symbols. + .gc_sections = options.gc_sections orelse (output_mode != .Obj), + .print_gc_sections = options.print_gc_sections, + .stack_size = options.stack_size orelse switch (target.os.tag) { + .freestanding => 1 * 1024 * 1024, // 1 MiB + else => 16 * 1024 * 1024, // 16 MiB + }, + .allow_shlib_undefined = options.allow_shlib_undefined orelse false, + .file = null, + .disable_lld_caching = options.disable_lld_caching, + .build_id = options.build_id, + }, + .name = undefined, + .string_table = .empty, + .string_bytes = .empty, + .import_table = options.import_table, + .export_table = options.export_table, + .import_symbols = options.import_symbols, + .export_symbol_names = options.export_symbol_names, + .global_base = options.global_base, + .initial_memory = options.initial_memory, + .max_memory = options.max_memory, + + .entry_name = undefined, + .dump_argv_list = .empty, + .object_host_name = .none, + .preloaded_strings = undefined, + }; + if (use_llvm and comp.config.have_zcu) { + wasm.llvm_object = try LlvmObject.create(arena, comp); + } + errdefer wasm.base.destroy(); + + if (options.object_host_name) |name| wasm.object_host_name = (try wasm.internString(name)).toOptional(); + + inline for (@typeInfo(PreloadedStrings).@"struct".fields) |field| { + @field(wasm.preloaded_strings, field.name) = try wasm.internString(field.name); + } + + wasm.entry_name = switch (options.entry) { + .disabled => .none, + .default => if (output_mode != .Exe) .none else defaultEntrySymbolName(&wasm.preloaded_strings, wasi_exec_model).toOptional(), + .enabled => defaultEntrySymbolName(&wasm.preloaded_strings, wasi_exec_model).toOptional(), + .named => |name| (try wasm.internString(name)).toOptional(), + }; + + if (use_lld and (use_llvm or !comp.config.have_zcu)) { + // LLVM emits the object file (if any); LLD links it into the final product. + return wasm; + } + + // What path should this Wasm linker code output to? + // If using LLD to link, this code should produce an object file so that it + // can be passed to LLD. + const sub_path = if (use_lld) zcu_object_sub_path.? else emit.sub_path; + + wasm.base.file = try emit.root_dir.handle.createFile(sub_path, .{ + .truncate = true, + .read = true, + .mode = if (fs.has_executable_bit) + if (target.os.tag == .wasi and output_mode == .Exe) + fs.File.default_mode | 0b001_000_000 + else + fs.File.default_mode + else + 0, + }); + wasm.name = sub_path; + + return wasm; +} + +fn openParseObjectReportingFailure(wasm: *Wasm, path: Path) void { + const diags = &wasm.base.comp.link_diags; + const obj = link.openObject(path, false, false) catch |err| { + switch (diags.failParse(path, "failed to open object: {s}", .{@errorName(err)})) { + error.LinkFailure => return, + } + }; + wasm.parseObject(obj) catch |err| { + switch (diags.failParse(path, "failed to parse object: {s}", .{@errorName(err)})) { + error.LinkFailure => return, + } + }; +} + +fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void { + log.debug("parseObject {}", .{obj.path}); + const gpa = wasm.base.comp.gpa; + const gc_sections = wasm.base.gc_sections; + + defer obj.file.close(); + + try wasm.objects.ensureUnusedCapacity(gpa, 1); + const stat = try obj.file.stat(); + const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig; + + const file_contents = try gpa.alloc(u8, size); + defer gpa.free(file_contents); + + const n = try obj.file.preadAll(file_contents, 0); + if (n != file_contents.len) return error.UnexpectedEndOfFile; + + var ss: Object.ScratchSpace = .{}; + defer ss.deinit(gpa); + + const object = try Object.parse(wasm, file_contents, obj.path, null, wasm.object_host_name, &ss, obj.must_link, gc_sections); + wasm.objects.appendAssumeCapacity(object); +} + +fn parseArchive(wasm: *Wasm, obj: link.Input.Object) !void { + log.debug("parseArchive {}", .{obj.path}); + const gpa = wasm.base.comp.gpa; + const gc_sections = wasm.base.gc_sections; + + defer obj.file.close(); + + const stat = try obj.file.stat(); + const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig; + + const file_contents = try gpa.alloc(u8, size); + defer gpa.free(file_contents); + + const n = try obj.file.preadAll(file_contents, 0); + if (n != file_contents.len) return error.UnexpectedEndOfFile; + + var archive = try Archive.parse(gpa, file_contents); + defer archive.deinit(gpa); + + // In this case we must force link all embedded object files within the archive + // We loop over all symbols, and then group them by offset as the offset + // notates where the object file starts. + var offsets = std.AutoArrayHashMap(u32, void).init(gpa); + defer offsets.deinit(); + for (archive.toc.values()) |symbol_offsets| { + for (symbol_offsets.items) |sym_offset| { + try offsets.put(sym_offset, {}); + } + } + + var ss: Object.ScratchSpace = .{}; + defer ss.deinit(gpa); + + try wasm.objects.ensureUnusedCapacity(gpa, offsets.count()); + for (offsets.keys()) |file_offset| { + const object = try archive.parseObject(wasm, file_contents, file_offset, obj.path, wasm.object_host_name, &ss, obj.must_link, gc_sections); + wasm.objects.appendAssumeCapacity(object); + } +} + +pub fn deinit(wasm: *Wasm) void { + const gpa = wasm.base.comp.gpa; + if (wasm.llvm_object) |llvm_object| llvm_object.deinit(); + + wasm.navs_exe.deinit(gpa); + wasm.navs_obj.deinit(gpa); + wasm.uavs_exe.deinit(gpa); + wasm.uavs_obj.deinit(gpa); + wasm.overaligned_uavs.deinit(gpa); + wasm.zcu_funcs.deinit(gpa); + wasm.nav_exports.deinit(gpa); + wasm.uav_exports.deinit(gpa); + wasm.imports.deinit(gpa); + + wasm.flush_buffer.deinit(gpa); + + wasm.mir_instructions.deinit(gpa); + wasm.mir_extra.deinit(gpa); + wasm.all_zcu_locals.deinit(gpa); + + if (wasm.dwarf) |*dwarf| dwarf.deinit(); + + wasm.object_function_imports.deinit(gpa); + wasm.object_functions.deinit(gpa); + wasm.object_global_imports.deinit(gpa); + wasm.object_globals.deinit(gpa); + wasm.object_table_imports.deinit(gpa); + wasm.object_tables.deinit(gpa); + wasm.object_memory_imports.deinit(gpa); + wasm.object_memories.deinit(gpa); + wasm.object_relocations.deinit(gpa); + wasm.object_data_imports.deinit(gpa); + wasm.object_data_segments.deinit(gpa); + wasm.object_datas.deinit(gpa); + wasm.object_custom_segments.deinit(gpa); + wasm.object_init_funcs.deinit(gpa); + wasm.object_comdats.deinit(gpa); + wasm.object_relocations_table.deinit(gpa); + wasm.object_comdat_symbols.deinit(gpa); + wasm.objects.deinit(gpa); + + wasm.func_types.deinit(gpa); + wasm.function_exports.deinit(gpa); + wasm.hidden_function_exports.deinit(gpa); + wasm.function_imports.deinit(gpa); + wasm.functions.deinit(gpa); + wasm.globals.deinit(gpa); + wasm.global_exports.deinit(gpa); + wasm.global_imports.deinit(gpa); + wasm.table_imports.deinit(gpa); + wasm.tables.deinit(gpa); + wasm.data_imports.deinit(gpa); + wasm.data_segments.deinit(gpa); + wasm.symbol_table.deinit(gpa); + wasm.out_relocs.deinit(gpa); + wasm.uav_fixups.deinit(gpa); + wasm.nav_fixups.deinit(gpa); + wasm.func_table_fixups.deinit(gpa); + + wasm.zcu_indirect_function_set.deinit(gpa); + wasm.object_indirect_function_import_set.deinit(gpa); + wasm.object_indirect_function_set.deinit(gpa); + + wasm.string_bytes.deinit(gpa); + wasm.string_table.deinit(gpa); + wasm.dump_argv_list.deinit(gpa); + + wasm.params_scratch.deinit(gpa); + wasm.returns_scratch.deinit(gpa); + + wasm.error_name_bytes.deinit(gpa); + wasm.error_name_offs.deinit(gpa); + wasm.tag_name_bytes.deinit(gpa); + wasm.tag_name_offs.deinit(gpa); + + wasm.missing_exports.deinit(gpa); +} + +pub fn updateFunc(wasm: *Wasm, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness); + + dev.check(.wasm_backend); + + const zcu = pt.zcu; + const gpa = zcu.gpa; + try wasm.functions.ensureUnusedCapacity(gpa, 1); + try wasm.zcu_funcs.ensureUnusedCapacity(gpa, 1); + + const ip = &zcu.intern_pool; + const owner_nav = zcu.funcInfo(func_index).owner_nav; + log.debug("updateFunc {}", .{ip.getNav(owner_nav).fqn.fmt(ip)}); + + const zds: ZcuDataStarts = .init(wasm); + + // This converts AIR to MIR but does not yet lower to wasm code. + // That lowering happens during `flush`, after garbage collection, which + // can affect function and global indexes, which affects the LEB integer + // encoding, which affects the output binary size. + const function = try CodeGen.function(wasm, pt, func_index, air, liveness); + wasm.zcu_funcs.putAssumeCapacity(func_index, .{ .function = function }); + wasm.functions.putAssumeCapacity(.pack(wasm, .{ .zcu_func = @enumFromInt(wasm.zcu_funcs.entries.len - 1) }), {}); + + try zds.finish(wasm, pt); +} + +// Generate code for the "Nav", storing it in memory to be later written to +// the file on flush(). +pub fn updateNav(wasm: *Wasm, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void { + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (wasm.llvm_object) |llvm_object| return llvm_object.updateNav(pt, nav_index); + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); + const comp = wasm.base.comp; + const gpa = comp.gpa; + const is_obj = comp.config.output_mode == .Obj; + const target = &comp.root_mod.resolved_target.result; + + const nav_init, const chased_nav_index = switch (ip.indexToKey(nav.status.fully_resolved.val)) { + .func => return, // global const which is a function alias + .@"extern" => |ext| { + if (is_obj) { + assert(!wasm.navs_obj.contains(ext.owner_nav)); + } else { + assert(!wasm.navs_exe.contains(ext.owner_nav)); + } + const name = try wasm.internString(ext.name.toSlice(ip)); + if (ext.lib_name.toSlice(ip)) |ext_name| _ = try wasm.internString(ext_name); + try wasm.imports.ensureUnusedCapacity(gpa, 1); + try wasm.function_imports.ensureUnusedCapacity(gpa, 1); + try wasm.data_imports.ensureUnusedCapacity(gpa, 1); + const zcu_import = wasm.addZcuImportReserved(ext.owner_nav); + if (ip.isFunctionType(nav.typeOf(ip))) { + wasm.function_imports.putAssumeCapacity(name, .fromZcuImport(zcu_import, wasm)); + // Ensure there is a corresponding function type table entry. + const fn_info = zcu.typeToFunc(.fromInterned(ext.ty)).?; + _ = try internFunctionType(wasm, fn_info.cc, fn_info.param_types.get(ip), .fromInterned(fn_info.return_type), target); + } else { + wasm.data_imports.putAssumeCapacity(name, .fromZcuImport(zcu_import, wasm)); + } + return; + }, + .variable => |variable| .{ variable.init, variable.owner_nav }, + else => .{ nav.status.fully_resolved.val, nav_index }, + }; + //log.debug("updateNav {} {d}", .{ nav.fqn.fmt(ip), chased_nav_index }); + assert(!wasm.imports.contains(chased_nav_index)); + + if (nav_init != .none and !Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(zcu)) { + if (is_obj) { + assert(!wasm.navs_obj.contains(chased_nav_index)); + } else { + assert(!wasm.navs_exe.contains(chased_nav_index)); + } + return; + } + + if (is_obj) { + const zcu_data_starts: ZcuDataStarts = .initObj(wasm); + const navs_i = try refNavObj(wasm, chased_nav_index); + const zcu_data = try lowerZcuData(wasm, pt, nav_init); + navs_i.value(wasm).* = zcu_data; + try zcu_data_starts.finishObj(wasm, pt); + } else { + const zcu_data_starts: ZcuDataStarts = .initExe(wasm); + const navs_i = try refNavExe(wasm, chased_nav_index); + const zcu_data = try lowerZcuData(wasm, pt, nav_init); + navs_i.value(wasm).code = zcu_data.code; + try zcu_data_starts.finishExe(wasm, pt); + } +} + +pub fn updateLineNumber(wasm: *Wasm, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void { + const comp = wasm.base.comp; + const diags = &comp.link_diags; + if (wasm.dwarf) |*dw| { + dw.updateLineNumber(pt.zcu, ti_id) catch |err| switch (err) { + error.Overflow => return error.Overflow, + error.OutOfMemory => return error.OutOfMemory, + else => |e| return diags.fail("failed to update dwarf line numbers: {s}", .{@errorName(e)}), + }; + } +} + +pub fn deleteExport( + wasm: *Wasm, + exported: Zcu.Exported, + name: InternPool.NullTerminatedString, +) void { + if (wasm.llvm_object != null) return; + + const zcu = wasm.base.comp.zcu.?; + const ip = &zcu.intern_pool; + const name_slice = name.toSlice(ip); + const export_name = wasm.getExistingString(name_slice).?; + switch (exported) { + .nav => |nav_index| { + log.debug("deleteExport '{s}' nav={d}", .{ name_slice, @intFromEnum(nav_index) }); + assert(wasm.nav_exports.swapRemove(.{ .nav_index = nav_index, .name = export_name })); }, - else => unreachable, + .uav => |uav_index| assert(wasm.uav_exports.swapRemove(.{ .uav_index = uav_index, .name = export_name })), } } -/// Appends a new segment with default field values -fn appendDummySegment(wasm: *Wasm) !void { - const gpa = wasm.base.comp.gpa; - try wasm.segments.append(gpa, .{ - .alignment = .@"1", - .size = 0, - .offset = 0, - .flags = 0, - }); +pub fn updateExports( + wasm: *Wasm, + pt: Zcu.PerThread, + exported: Zcu.Exported, + export_indices: []const Zcu.Export.Index, +) !void { + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices); + + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + for (export_indices) |export_idx| { + const exp = export_idx.ptr(zcu); + const name_slice = exp.opts.name.toSlice(ip); + const name = try wasm.internString(name_slice); + switch (exported) { + .nav => |nav_index| { + log.debug("updateExports '{s}' nav={d}", .{ name_slice, @intFromEnum(nav_index) }); + try wasm.nav_exports.put(gpa, .{ .nav_index = nav_index, .name = name }, export_idx); + }, + .uav => |uav_index| try wasm.uav_exports.put(gpa, .{ .uav_index = uav_index, .name = name }, export_idx), + } + } } pub fn loadInput(wasm: *Wasm, input: link.Input) !void { @@ -2596,7 +3323,9 @@ pub fn loadInput(wasm: *Wasm, input: link.Input) !void { .res => unreachable, .dso_exact => unreachable, .dso => unreachable, - .object, .archive => |obj| try argv.append(gpa, try obj.path.toString(comp.arena)), + .object, .archive => |obj| { + try argv.append(gpa, try obj.path.toString(comp.arena)); + }, } } @@ -2612,791 +3341,472 @@ pub fn loadInput(wasm: *Wasm, input: link.Input) !void { pub fn flush(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const comp = wasm.base.comp; const use_lld = build_options.have_llvm and comp.config.use_lld; + const diags = &comp.link_diags; if (use_lld) { - return wasm.linkWithLLD(arena, tid, prog_node); + return wasm.linkWithLLD(arena, tid, prog_node) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to link with LLD: {s}", .{@errorName(e)}), + }; } return wasm.flushModule(arena, tid, prog_node); } -pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn prelink(wasm: *Wasm, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); - const comp = wasm.base.comp; - const diags = &comp.link_diags; - if (wasm.llvm_object) |llvm_object| { - try wasm.base.emitLlvmObject(arena, llvm_object, prog_node); - const use_lld = build_options.have_llvm and comp.config.use_lld; - if (use_lld) return; - } - - if (comp.verbose_link) Compilation.dump_argv(wasm.dump_argv_list.items); - - const sub_prog_node = prog_node.start("Wasm Flush", 0); + const sub_prog_node = prog_node.start("Wasm Prelink", 0); defer sub_prog_node.end(); - const module_obj_path: ?Path = if (wasm.base.zcu_object_sub_path) |path| .{ - .root_dir = wasm.base.emit.root_dir, - .sub_path = if (fs.path.dirname(wasm.base.emit.sub_path)) |dirname| - try fs.path.join(arena, &.{ dirname, path }) - else - path, - } else null; - - if (wasm.zig_object) |zig_object| try zig_object.flushModule(wasm, tid); - - if (module_obj_path) |path| openParseObjectReportingFailure(wasm, path); - - if (wasm.zig_object != null) { - try wasm.resolveSymbolsInObject(.zig_object); - } - if (diags.hasErrors()) return error.FlushFailure; - for (0..wasm.objects.items.len) |object_index| { - try wasm.resolveSymbolsInObject(@enumFromInt(object_index)); - } - if (diags.hasErrors()) return error.FlushFailure; - - var emit_features_count: u32 = 0; - var enabled_features: [@typeInfo(Feature.Tag).@"enum".fields.len]bool = undefined; - try wasm.validateFeatures(&enabled_features, &emit_features_count); - try wasm.resolveSymbolsInArchives(); - if (diags.hasErrors()) return error.FlushFailure; - try wasm.resolveLazySymbols(); - try wasm.checkUndefinedSymbols(); - try wasm.checkExportNames(); - - try wasm.setupInitFunctions(); - if (diags.hasErrors()) return error.FlushFailure; - try wasm.setupStart(); - - try wasm.markReferences(); - try wasm.setupImports(); - try wasm.mergeSections(); - try wasm.mergeTypes(); - try wasm.allocateAtoms(); - try wasm.setupMemory(); - if (diags.hasErrors()) return error.FlushFailure; - wasm.allocateVirtualAddresses(); - wasm.mapFunctionTable(); - try wasm.initializeCallCtorsFunction(); - try wasm.setupInitMemoryFunction(); - try wasm.setupTLSRelocationsFunction(); - try wasm.initializeTLSFunction(); - try wasm.setupStartSection(); - try wasm.setupExports(); - try wasm.writeToFile(enabled_features, emit_features_count, arena); - if (diags.hasErrors()) return error.FlushFailure; -} - -/// Writes the WebAssembly in-memory module to the file -fn writeToFile( - wasm: *Wasm, - enabled_features: [@typeInfo(Feature.Tag).@"enum".fields.len]bool, - feature_count: u32, - arena: Allocator, -) !void { const comp = wasm.base.comp; - const diags = &comp.link_diags; const gpa = comp.gpa; - const use_llvm = comp.config.use_llvm; - const use_lld = build_options.have_llvm and comp.config.use_lld; - const shared_memory = comp.config.shared_memory; - const import_memory = comp.config.import_memory; - const export_memory = comp.config.export_memory; + const rdynamic = comp.config.rdynamic; + const is_obj = comp.config.output_mode == .Obj; - // Size of each section header - const header_size = 5 + 1; - // The amount of sections that will be written - var section_count: u32 = 0; - // Index of the code section. Used to tell relocation table where the section lives. - var code_section_index: ?u32 = null; - // Index of the data section. Used to tell relocation table where the section lives. - var data_section_index: ?u32 = null; - const is_obj = comp.config.output_mode == .Obj or (!use_llvm and use_lld); - - var binary_bytes = std.ArrayList(u8).init(gpa); - defer binary_bytes.deinit(); - const binary_writer = binary_bytes.writer(); - - // We write the magic bytes at the end so they will only be written - // if everything succeeded as expected. So populate with 0's for now. - try binary_writer.writeAll(&[_]u8{0} ** 8); - // (Re)set file pointer to 0 - try wasm.base.file.?.setEndPos(0); - try wasm.base.file.?.seekTo(0); - - // Type section - if (wasm.func_types.items.len != 0) { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - log.debug("Writing type section. Count: ({d})", .{wasm.func_types.items.len}); - for (wasm.func_types.items) |func_type| { - try leb.writeUleb128(binary_writer, std.wasm.function_type); - try leb.writeUleb128(binary_writer, @as(u32, @intCast(func_type.params.len))); - for (func_type.params) |param_ty| { - try leb.writeUleb128(binary_writer, std.wasm.valtype(param_ty)); + assert(wasm.missing_exports.entries.len == 0); + for (wasm.export_symbol_names) |exp_name| { + const exp_name_interned = try wasm.internString(exp_name); + if (wasm.object_function_imports.getPtr(exp_name_interned)) |import| { + if (import.resolution != .unresolved) { + import.flags.exported = true; + continue; } - try leb.writeUleb128(binary_writer, @as(u32, @intCast(func_type.returns.len))); - for (func_type.returns) |ret_ty| { - try leb.writeUleb128(binary_writer, std.wasm.valtype(ret_ty)); + } + if (wasm.object_global_imports.getPtr(exp_name_interned)) |import| { + if (import.resolution != .unresolved) { + import.flags.exported = true; + continue; } } - - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .type, - @intCast(binary_bytes.items.len - header_offset - header_size), - @intCast(wasm.func_types.items.len), - ); - section_count += 1; + if (wasm.object_table_imports.getPtr(exp_name_interned)) |import| { + if (import.resolution != .unresolved) { + import.flags.exported = true; + continue; + } + } + try wasm.missing_exports.put(gpa, exp_name_interned, {}); } - // Import section - if (wasm.imports.count() != 0 or import_memory) { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - - var it = wasm.imports.iterator(); - while (it.next()) |entry| { - assert(wasm.symbolLocSymbol(entry.key_ptr.*).isUndefined()); - const import = entry.value_ptr.*; - try wasm.emitImport(binary_writer, import); + if (wasm.entry_name.unwrap()) |entry_name| { + if (wasm.object_function_imports.getPtr(entry_name)) |import| { + if (import.resolution != .unresolved) { + import.flags.exported = true; + wasm.entry_resolution = import.resolution; + } } + } - if (import_memory) { - const mem_imp: Import = .{ - .module_name = wasm.host_name, - .name = if (is_obj) wasm.preloaded_strings.__linear_memory else wasm.preloaded_strings.memory, - .kind = .{ .memory = wasm.memories.limits }, - }; - try wasm.emitImport(binary_writer, mem_imp); + if (comp.zcu != null) { + // Zig always depends on a stack pointer global. + // If emitting an object, it's an import. Otherwise, the linker synthesizes it. + if (is_obj) { + @panic("TODO"); + } else { + try wasm.globals.put(gpa, .__stack_pointer, {}); + assert(wasm.globals.entries.len - 1 == @intFromEnum(GlobalIndex.stack_pointer)); } - - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .import, - @intCast(binary_bytes.items.len - header_offset - header_size), - @intCast(wasm.imports.count() + @intFromBool(import_memory)), - ); - section_count += 1; } - // Function section - if (wasm.functions.count() != 0) { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - for (wasm.functions.values()) |function| { - try leb.writeUleb128(binary_writer, function.func.type_index); + // These loops do both recursive marking of alive symbols well as checking for undefined symbols. + // At the end, output functions and globals will be populated. + for (wasm.object_function_imports.keys(), wasm.object_function_imports.values(), 0..) |name, *import, i| { + if (import.flags.isIncluded(rdynamic)) { + try markFunctionImport(wasm, name, import, @enumFromInt(i)); } - - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .function, - @intCast(binary_bytes.items.len - header_offset - header_size), - @intCast(wasm.functions.count()), - ); - section_count += 1; } - - // Table section - if (wasm.tables.items.len > 0) { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - - for (wasm.tables.items) |table| { - try leb.writeUleb128(binary_writer, std.wasm.reftype(table.reftype)); - try emitLimits(binary_writer, table.limits); + // Also treat init functions as roots. + for (wasm.object_init_funcs.items) |init_func| { + const func = init_func.function_index.ptr(wasm); + if (func.object_index.ptr(wasm).is_included) { + try markFunction(wasm, init_func.function_index, false); } - - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .table, - @intCast(binary_bytes.items.len - header_offset - header_size), - @intCast(wasm.tables.items.len), - ); - section_count += 1; } + wasm.functions_end_prelink = @intCast(wasm.functions.entries.len); - // Memory section - if (!import_memory) { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - - try emitLimits(binary_writer, wasm.memories.limits); - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .memory, - @intCast(binary_bytes.items.len - header_offset - header_size), - 1, // wasm currently only supports 1 linear memory segment - ); - section_count += 1; + for (wasm.object_global_imports.keys(), wasm.object_global_imports.values(), 0..) |name, *import, i| { + if (import.flags.isIncluded(rdynamic)) { + try markGlobalImport(wasm, name, import, @enumFromInt(i)); + } } + wasm.globals_end_prelink = @intCast(wasm.globals.entries.len); + wasm.global_exports_len = @intCast(wasm.global_exports.items.len); - // Global section (used to emit stack pointer) - if (wasm.wasm_globals.items.len > 0) { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - - for (wasm.wasm_globals.items) |global| { - try binary_writer.writeByte(std.wasm.valtype(global.global_type.valtype)); - try binary_writer.writeByte(@intFromBool(global.global_type.mutable)); - try emitInit(binary_writer, global.init); + for (wasm.object_table_imports.keys(), wasm.object_table_imports.values(), 0..) |name, *import, i| { + if (import.flags.isIncluded(rdynamic)) { + try markTableImport(wasm, name, import, @enumFromInt(i)); } - - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .global, - @intCast(binary_bytes.items.len - header_offset - header_size), - @intCast(wasm.wasm_globals.items.len), - ); - section_count += 1; } - // Export section - if (wasm.exports.items.len != 0 or export_memory) { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - - for (wasm.exports.items) |exp| { - const name = wasm.stringSlice(exp.name); - try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len))); - try binary_writer.writeAll(name); - try leb.writeUleb128(binary_writer, @intFromEnum(exp.kind)); - try leb.writeUleb128(binary_writer, exp.index); + for (wasm.object_data_imports.keys(), wasm.object_data_imports.values(), 0..) |name, *import, i| { + if (import.flags.isIncluded(rdynamic)) { + try markDataImport(wasm, name, import, @enumFromInt(i)); } - - if (export_memory) { - try leb.writeUleb128(binary_writer, @as(u32, @intCast("memory".len))); - try binary_writer.writeAll("memory"); - try binary_writer.writeByte(std.wasm.externalKind(.memory)); - try leb.writeUleb128(binary_writer, @as(u32, 0)); - } - - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .@"export", - @intCast(binary_bytes.items.len - header_offset - header_size), - @intCast(wasm.exports.items.len + @intFromBool(export_memory)), - ); - section_count += 1; } - if (wasm.entry) |entry_index| { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .start, - @intCast(binary_bytes.items.len - header_offset - header_size), - entry_index, - ); + // This is a wild ass guess at how to merge memories, haven't checked yet + // what the proper way to do this is. + for (wasm.object_memory_imports.values()) |*memory_import| { + wasm.memories.limits.min = @min(wasm.memories.limits.min, memory_import.limits_min); + wasm.memories.limits.max = @max(wasm.memories.limits.max, memory_import.limits_max); + wasm.memories.limits.flags.has_max = wasm.memories.limits.flags.has_max or memory_import.limits_has_max; } - // element section (function table) - if (wasm.function_table.count() > 0) { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - - const table_loc = wasm.globals.get(wasm.preloaded_strings.__indirect_function_table).?; - const table_sym = wasm.symbolLocSymbol(table_loc); - - const flags: u32 = if (table_sym.index == 0) 0x0 else 0x02; // passive with implicit 0-index table or set table index manually - try leb.writeUleb128(binary_writer, flags); - if (flags == 0x02) { - try leb.writeUleb128(binary_writer, table_sym.index); - } - try emitInit(binary_writer, .{ .i32_const = 1 }); // We start at index 1, so unresolved function pointers are invalid - if (flags == 0x02) { - try leb.writeUleb128(binary_writer, @as(u8, 0)); // represents funcref - } - try leb.writeUleb128(binary_writer, @as(u32, @intCast(wasm.function_table.count()))); - var symbol_it = wasm.function_table.keyIterator(); - while (symbol_it.next()) |symbol_loc_ptr| { - const sym = wasm.symbolLocSymbol(symbol_loc_ptr.*); - std.debug.assert(sym.isAlive()); - std.debug.assert(sym.index < wasm.functions.count() + wasm.imported_functions_count); - try leb.writeUleb128(binary_writer, sym.index); - } - - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .element, - @intCast(binary_bytes.items.len - header_offset - header_size), - 1, - ); - section_count += 1; - } - - // When the shared-memory option is enabled, we *must* emit the 'data count' section. - const data_segments_count = wasm.data_segments.count() - @intFromBool(wasm.data_segments.contains(".bss") and !import_memory); - if (data_segments_count != 0 and shared_memory) { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .data_count, - @intCast(binary_bytes.items.len - header_offset - header_size), - @intCast(data_segments_count), - ); - } + wasm.function_imports_len_prelink = @intCast(wasm.function_imports.entries.len); + wasm.data_imports_len_prelink = @intCast(wasm.data_imports.entries.len); +} - // Code section - if (wasm.code_section_index != .none) { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - const start_offset = binary_bytes.items.len - 5; // minus 5 so start offset is 5 to include entry count +pub fn markFunctionImport( + wasm: *Wasm, + name: String, + import: *FunctionImport, + func_index: FunctionImport.Index, +) link.File.FlushError!void { + if (import.flags.alive) return; + import.flags.alive = true; - var func_it = wasm.functions.iterator(); - while (func_it.next()) |entry| { - const sym_loc: SymbolLoc = .{ .index = entry.value_ptr.sym_index, .file = entry.key_ptr.file }; - const atom_index = wasm.symbol_atom.get(sym_loc).?; - const atom = wasm.getAtomPtr(atom_index); + const comp = wasm.base.comp; + const gpa = comp.gpa; - if (!is_obj) { - atom.resolveRelocs(wasm); - } - atom.offset = @intCast(binary_bytes.items.len - start_offset); - try leb.writeUleb128(binary_writer, atom.size); - try binary_writer.writeAll(atom.code.items); + try wasm.functions.ensureUnusedCapacity(gpa, 1); + + if (import.resolution == .unresolved) { + if (name == wasm.preloaded_strings.__wasm_init_memory) { + try wasm.resolveFunctionSynthetic(import, .__wasm_init_memory, &.{}, &.{}); + } else if (name == wasm.preloaded_strings.__wasm_apply_global_tls_relocs) { + try wasm.resolveFunctionSynthetic(import, .__wasm_apply_global_tls_relocs, &.{}, &.{}); + } else if (name == wasm.preloaded_strings.__wasm_call_ctors) { + try wasm.resolveFunctionSynthetic(import, .__wasm_call_ctors, &.{}, &.{}); + } else if (name == wasm.preloaded_strings.__wasm_init_tls) { + try wasm.resolveFunctionSynthetic(import, .__wasm_init_tls, &.{.i32}, &.{}); + } else { + try wasm.function_imports.put(gpa, name, .fromObject(func_index, wasm)); } + } else { + try markFunction(wasm, import.resolution.unpack(wasm).object_function, import.flags.exported); + } +} - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .code, - @intCast(binary_bytes.items.len - header_offset - header_size), - @intCast(wasm.functions.count()), - ); - code_section_index = section_count; - section_count += 1; - } - - // Data section - if (data_segments_count != 0) { - const header_offset = try reserveVecSectionHeader(&binary_bytes); - - var it = wasm.data_segments.iterator(); - var segment_count: u32 = 0; - while (it.next()) |entry| { - // do not output 'bss' section unless we import memory and therefore - // want to guarantee the data is zero initialized - if (!import_memory and std.mem.eql(u8, entry.key_ptr.*, ".bss")) continue; - const segment_index = entry.value_ptr.*; - const segment = wasm.segmentPtr(segment_index); - if (segment.size == 0) continue; // do not emit empty segments - segment_count += 1; - var atom_index = wasm.atoms.get(segment_index).?; - - try leb.writeUleb128(binary_writer, segment.flags); - if (segment.flags & @intFromEnum(Wasm.Segment.Flag.WASM_DATA_SEGMENT_HAS_MEMINDEX) != 0) { - try leb.writeUleb128(binary_writer, @as(u32, 0)); // memory is always index 0 as we only have 1 memory entry - } - // when a segment is passive, it's initialized during runtime. - if (!segment.isPassive()) { - try emitInit(binary_writer, .{ .i32_const = @as(i32, @bitCast(segment.offset)) }); - } - // offset into data section - try leb.writeUleb128(binary_writer, segment.size); - - // fill in the offset table and the data segments - var current_offset: u32 = 0; - while (true) { - const atom = wasm.getAtomPtr(atom_index); - if (!is_obj) { - atom.resolveRelocs(wasm); - } +/// Recursively mark alive everything referenced by the function. +fn markFunction(wasm: *Wasm, i: ObjectFunctionIndex, override_export: bool) link.File.FlushError!void { + const comp = wasm.base.comp; + const gpa = comp.gpa; + const gop = try wasm.functions.getOrPut(gpa, .fromObjectFunction(wasm, i)); + if (gop.found_existing) return; - // Pad with zeroes to ensure all segments are aligned - if (current_offset != atom.offset) { - const diff = atom.offset - current_offset; - try binary_writer.writeByteNTimes(0, diff); - current_offset += diff; - } - assert(current_offset == atom.offset); - assert(atom.code.items.len == atom.size); - try binary_writer.writeAll(atom.code.items); + const rdynamic = comp.config.rdynamic; + const is_obj = comp.config.output_mode == .Obj; + const function = i.ptr(wasm); + markObject(wasm, function.object_index); - current_offset += atom.size; - if (atom.prev != .null) { - atom_index = atom.prev; - } else { - // also pad with zeroes when last atom to ensure - // segments are aligned. - if (current_offset != segment.size) { - try binary_writer.writeByteNTimes(0, segment.size - current_offset); - current_offset += segment.size - current_offset; - } - break; - } - } - assert(current_offset == segment.size); + if (!is_obj and (override_export or function.flags.isExported(rdynamic))) { + const symbol_name = function.name.unwrap().?; + if (!override_export and function.flags.visibility_hidden) { + try wasm.hidden_function_exports.put(gpa, symbol_name, @enumFromInt(gop.index)); + } else { + try wasm.function_exports.put(gpa, symbol_name, @enumFromInt(gop.index)); } - - try writeVecSectionHeader( - binary_bytes.items, - header_offset, - .data, - @intCast(binary_bytes.items.len - header_offset - header_size), - @intCast(segment_count), - ); - data_section_index = section_count; - section_count += 1; } - if (is_obj) { - // relocations need to point to the index of a symbol in the final symbol table. To save memory, - // we never store all symbols in a single table, but store a location reference instead. - // This means that for a relocatable object file, we need to generate one and provide it to the relocation sections. - var symbol_table = std.AutoArrayHashMap(SymbolLoc, u32).init(arena); - try wasm.emitLinkSection(&binary_bytes, &symbol_table); - if (code_section_index) |code_index| { - try wasm.emitCodeRelocations(&binary_bytes, code_index, symbol_table); - } - if (data_section_index) |data_index| { - try wasm.emitDataRelocations(&binary_bytes, data_index, symbol_table); - } - } else if (comp.config.debug_format != .strip) { - try wasm.emitNameSection(&binary_bytes, arena); - } - - if (comp.config.debug_format != .strip) { - // The build id must be computed on the main sections only, - // so we have to do it now, before the debug sections. - switch (wasm.base.build_id) { - .none => {}, - .fast => { - var id: [16]u8 = undefined; - std.crypto.hash.sha3.TurboShake128(null).hash(binary_bytes.items, &id, .{}); - var uuid: [36]u8 = undefined; - _ = try std.fmt.bufPrint(&uuid, "{s}-{s}-{s}-{s}-{s}", .{ - std.fmt.fmtSliceHexLower(id[0..4]), - std.fmt.fmtSliceHexLower(id[4..6]), - std.fmt.fmtSliceHexLower(id[6..8]), - std.fmt.fmtSliceHexLower(id[8..10]), - std.fmt.fmtSliceHexLower(id[10..]), - }); - try emitBuildIdSection(&binary_bytes, &uuid); - }, - .hexstring => |hs| { - var buffer: [32 * 2]u8 = undefined; - const str = std.fmt.bufPrint(&buffer, "{s}", .{ - std.fmt.fmtSliceHexLower(hs.toSlice()), - }) catch unreachable; - try emitBuildIdSection(&binary_bytes, str); - }, - else => |mode| { - var err = try diags.addErrorWithNotes(0); - try err.addMsg("build-id '{s}' is not supported for WebAssembly", .{@tagName(mode)}); - }, - } + try wasm.markRelocations(function.relocations(wasm)); +} - var debug_bytes = std.ArrayList(u8).init(gpa); - defer debug_bytes.deinit(); +fn markObject(wasm: *Wasm, i: ObjectIndex) void { + i.ptr(wasm).is_included = true; +} - inline for (@typeInfo(CustomSections).@"struct".fields) |field| { - if (@field(wasm.custom_sections, field.name).index.unwrap()) |index| { - var atom = wasm.getAtomPtr(wasm.atoms.get(index).?); - while (true) { - atom.resolveRelocs(wasm); - try debug_bytes.appendSlice(atom.code.items); - if (atom.prev == .null) break; - atom = wasm.getAtomPtr(atom.prev); - } - try emitDebugSection(&binary_bytes, debug_bytes.items, field.name); - debug_bytes.clearRetainingCapacity(); - } - } +/// Recursively mark alive everything referenced by the global. +fn markGlobalImport( + wasm: *Wasm, + name: String, + import: *GlobalImport, + global_index: GlobalImport.Index, +) link.File.FlushError!void { + if (import.flags.alive) return; + import.flags.alive = true; - try emitProducerSection(&binary_bytes); - if (feature_count > 0) { - try emitFeaturesSection(&binary_bytes, &enabled_features, feature_count); - } - } + const comp = wasm.base.comp; + const gpa = comp.gpa; - // Only when writing all sections executed properly we write the magic - // bytes. This allows us to easily detect what went wrong while generating - // the final binary. - { - const src = std.wasm.magic ++ std.wasm.version; - binary_bytes.items[0..src.len].* = src; + try wasm.globals.ensureUnusedCapacity(gpa, 1); + + if (import.resolution == .unresolved) { + if (name == wasm.preloaded_strings.__heap_base) { + import.resolution = .__heap_base; + wasm.globals.putAssumeCapacity(.__heap_base, {}); + } else if (name == wasm.preloaded_strings.__heap_end) { + import.resolution = .__heap_end; + wasm.globals.putAssumeCapacity(.__heap_end, {}); + } else if (name == wasm.preloaded_strings.__stack_pointer) { + import.resolution = .__stack_pointer; + wasm.globals.putAssumeCapacity(.__stack_pointer, {}); + } else if (name == wasm.preloaded_strings.__tls_align) { + import.resolution = .__tls_align; + wasm.globals.putAssumeCapacity(.__tls_align, {}); + } else if (name == wasm.preloaded_strings.__tls_base) { + import.resolution = .__tls_base; + wasm.globals.putAssumeCapacity(.__tls_base, {}); + } else if (name == wasm.preloaded_strings.__tls_size) { + import.resolution = .__tls_size; + wasm.globals.putAssumeCapacity(.__tls_size, {}); + } else { + try wasm.global_imports.put(gpa, name, .fromObject(global_index, wasm)); + } + } else { + try markGlobal(wasm, import.resolution.unpack(wasm).object_global, import.flags.exported); } - - // finally, write the entire binary into the file. - var iovec = [_]std.posix.iovec_const{.{ - .base = binary_bytes.items.ptr, - .len = binary_bytes.items.len, - }}; - try wasm.base.file.?.writevAll(&iovec); -} - -fn emitDebugSection(binary_bytes: *std.ArrayList(u8), data: []const u8, name: []const u8) !void { - if (data.len == 0) return; - const header_offset = try reserveCustomSectionHeader(binary_bytes); - const writer = binary_bytes.writer(); - try leb.writeUleb128(writer, @as(u32, @intCast(name.len))); - try writer.writeAll(name); - - const start = binary_bytes.items.len - header_offset; - log.debug("Emit debug section: '{s}' start=0x{x:0>8} end=0x{x:0>8}", .{ name, start, start + data.len }); - try writer.writeAll(data); - - try writeCustomSectionHeader( - binary_bytes.items, - header_offset, - @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), - ); } -fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { - const header_offset = try reserveCustomSectionHeader(binary_bytes); +fn markGlobal(wasm: *Wasm, i: ObjectGlobalIndex, override_export: bool) link.File.FlushError!void { + const comp = wasm.base.comp; + const gpa = comp.gpa; + const gop = try wasm.globals.getOrPut(gpa, .fromObjectGlobal(wasm, i)); + if (gop.found_existing) return; - const writer = binary_bytes.writer(); - const producers = "producers"; - try leb.writeUleb128(writer, @as(u32, @intCast(producers.len))); - try writer.writeAll(producers); + const rdynamic = comp.config.rdynamic; + const is_obj = comp.config.output_mode == .Obj; + const global = i.ptr(wasm); - try leb.writeUleb128(writer, @as(u32, 2)); // 2 fields: Language + processed-by + if (!is_obj and (override_export or global.flags.isExported(rdynamic))) try wasm.global_exports.append(gpa, .{ + .name = global.name.unwrap().?, + .global_index = @enumFromInt(gop.index), + }); - // used for the Zig version - var version_buf: [100]u8 = undefined; - const version = try std.fmt.bufPrint(&version_buf, "{}", .{build_options.semver}); + try wasm.markRelocations(global.relocations(wasm)); +} - // language field - { - const language = "language"; - try leb.writeUleb128(writer, @as(u32, @intCast(language.len))); - try writer.writeAll(language); +fn markTableImport( + wasm: *Wasm, + name: String, + import: *TableImport, + table_index: TableImport.Index, +) link.File.FlushError!void { + if (import.flags.alive) return; + import.flags.alive = true; - // field_value_count (TODO: Parse object files for producer sections to detect their language) - try leb.writeUleb128(writer, @as(u32, 1)); + const comp = wasm.base.comp; + const gpa = comp.gpa; - // versioned name - { - try leb.writeUleb128(writer, @as(u32, 3)); // len of "Zig" - try writer.writeAll("Zig"); + try wasm.tables.ensureUnusedCapacity(gpa, 1); - try leb.writeUleb128(writer, @as(u32, @intCast(version.len))); - try writer.writeAll(version); + if (import.resolution == .unresolved) { + if (name == wasm.preloaded_strings.__indirect_function_table) { + import.resolution = .__indirect_function_table; + wasm.tables.putAssumeCapacity(.__indirect_function_table, {}); + } else { + try wasm.table_imports.put(gpa, name, table_index); } + } else { + wasm.tables.putAssumeCapacity(import.resolution, {}); + // Tables have no relocations. } +} - // processed-by field - { - const processed_by = "processed-by"; - try leb.writeUleb128(writer, @as(u32, @intCast(processed_by.len))); - try writer.writeAll(processed_by); - - // field_value_count (TODO: Parse object files for producer sections to detect other used tools) - try leb.writeUleb128(writer, @as(u32, 1)); - - // versioned name - { - try leb.writeUleb128(writer, @as(u32, 3)); // len of "Zig" - try writer.writeAll("Zig"); +fn markDataSegment(wasm: *Wasm, segment_index: ObjectDataSegment.Index) link.File.FlushError!void { + const comp = wasm.base.comp; + const segment = segment_index.ptr(wasm); + if (segment.flags.alive) return; + segment.flags.alive = true; - try leb.writeUleb128(writer, @as(u32, @intCast(version.len))); - try writer.writeAll(version); - } - } + wasm.any_passive_inits = wasm.any_passive_inits or segment.flags.is_passive or + (comp.config.import_memory and !wasm.isBss(segment.name)); - try writeCustomSectionHeader( - binary_bytes.items, - header_offset, - @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), - ); + try wasm.data_segments.put(comp.gpa, .pack(wasm, .{ .object = segment_index }), {}); + try wasm.markRelocations(segment.relocations(wasm)); } -fn emitBuildIdSection(binary_bytes: *std.ArrayList(u8), build_id: []const u8) !void { - const header_offset = try reserveCustomSectionHeader(binary_bytes); - - const writer = binary_bytes.writer(); - const hdr_build_id = "build_id"; - try leb.writeUleb128(writer, @as(u32, @intCast(hdr_build_id.len))); - try writer.writeAll(hdr_build_id); +pub fn markDataImport( + wasm: *Wasm, + name: String, + import: *ObjectDataImport, + data_index: ObjectDataImport.Index, +) link.File.FlushError!void { + if (import.flags.alive) return; + import.flags.alive = true; - try leb.writeUleb128(writer, @as(u32, 1)); - try leb.writeUleb128(writer, @as(u32, @intCast(build_id.len))); - try writer.writeAll(build_id); + const comp = wasm.base.comp; + const gpa = comp.gpa; - try writeCustomSectionHeader( - binary_bytes.items, - header_offset, - @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), - ); + if (import.resolution == .unresolved) { + if (name == wasm.preloaded_strings.__heap_base) { + import.resolution = .__heap_base; + wasm.data_segments.putAssumeCapacity(.__heap_base, {}); + } else if (name == wasm.preloaded_strings.__heap_end) { + import.resolution = .__heap_end; + wasm.data_segments.putAssumeCapacity(.__heap_end, {}); + } else { + try wasm.data_imports.put(gpa, name, .fromObject(data_index, wasm)); + } + } else if (import.resolution.objectDataSegment(wasm)) |segment_index| { + try markDataSegment(wasm, segment_index); + } } -fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []const bool, features_count: u32) !void { - const header_offset = try reserveCustomSectionHeader(binary_bytes); +fn markRelocations(wasm: *Wasm, relocs: ObjectRelocation.IterableSlice) link.File.FlushError!void { + const gpa = wasm.base.comp.gpa; + for (relocs.slice.tags(wasm), relocs.slice.pointees(wasm), relocs.slice.offsets(wasm)) |tag, pointee, offset| { + if (offset >= relocs.end) break; + switch (tag) { + .function_import_index_leb, + .function_import_index_i32, + .function_import_offset_i32, + .function_import_offset_i64, + => { + const name = pointee.symbol_name; + const i: FunctionImport.Index = @enumFromInt(wasm.object_function_imports.getIndex(name).?); + try markFunctionImport(wasm, name, i.value(wasm), i); + }, + .table_import_index_sleb, + .table_import_index_i32, + .table_import_index_sleb64, + .table_import_index_i64, + .table_import_index_rel_sleb, + .table_import_index_rel_sleb64, + => { + const name = pointee.symbol_name; + try wasm.object_indirect_function_import_set.put(gpa, name, {}); + const i: FunctionImport.Index = @enumFromInt(wasm.object_function_imports.getIndex(name).?); + try markFunctionImport(wasm, name, i.value(wasm), i); + }, + .global_import_index_leb, .global_import_index_i32 => { + const name = pointee.symbol_name; + const i: GlobalImport.Index = @enumFromInt(wasm.object_global_imports.getIndex(name).?); + try markGlobalImport(wasm, name, i.value(wasm), i); + }, + .table_import_number_leb => { + const name = pointee.symbol_name; + const i: TableImport.Index = @enumFromInt(wasm.object_table_imports.getIndex(name).?); + try markTableImport(wasm, name, i.value(wasm), i); + }, + .memory_addr_import_leb, + .memory_addr_import_sleb, + .memory_addr_import_i32, + .memory_addr_import_rel_sleb, + .memory_addr_import_leb64, + .memory_addr_import_sleb64, + .memory_addr_import_i64, + .memory_addr_import_rel_sleb64, + .memory_addr_import_tls_sleb, + .memory_addr_import_locrel_i32, + .memory_addr_import_tls_sleb64, + => { + const name = pointee.symbol_name; + const i = ObjectDataImport.Index.fromSymbolName(wasm, name).?; + try markDataImport(wasm, name, i.value(wasm), i); + }, + + .function_index_leb, + .function_index_i32, + .function_offset_i32, + .function_offset_i64, + => try markFunction(wasm, pointee.function.chaseWeak(wasm), false), + .table_index_sleb, + .table_index_i32, + .table_index_sleb64, + .table_index_i64, + .table_index_rel_sleb, + .table_index_rel_sleb64, + => { + const function = pointee.function; + try wasm.object_indirect_function_set.put(gpa, function, {}); + try markFunction(wasm, function.chaseWeak(wasm), false); + }, + .global_index_leb, + .global_index_i32, + => try markGlobal(wasm, pointee.global.chaseWeak(wasm), false), + .table_number_leb, + => try markTable(wasm, pointee.table.chaseWeak(wasm)), + + .section_offset_i32 => { + log.warn("TODO: ensure section {d} is included in output", .{pointee.section}); + }, - const writer = binary_bytes.writer(); - const target_features = "target_features"; - try leb.writeUleb128(writer, @as(u32, @intCast(target_features.len))); - try writer.writeAll(target_features); + .memory_addr_leb, + .memory_addr_sleb, + .memory_addr_i32, + .memory_addr_rel_sleb, + .memory_addr_leb64, + .memory_addr_sleb64, + .memory_addr_i64, + .memory_addr_rel_sleb64, + .memory_addr_tls_sleb, + .memory_addr_locrel_i32, + .memory_addr_tls_sleb64, + => try markDataSegment(wasm, pointee.data.ptr(wasm).segment), - try leb.writeUleb128(writer, features_count); - for (enabled_features, 0..) |enabled, feature_index| { - if (enabled) { - const feature: Feature = .{ .prefix = .used, .tag = @as(Feature.Tag, @enumFromInt(feature_index)) }; - try leb.writeUleb128(writer, @intFromEnum(feature.prefix)); - var buf: [100]u8 = undefined; - const string = try std.fmt.bufPrint(&buf, "{}", .{feature.tag}); - try leb.writeUleb128(writer, @as(u32, @intCast(string.len))); - try writer.writeAll(string); + .type_index_leb => continue, } } +} - try writeCustomSectionHeader( - binary_bytes.items, - header_offset, - @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), - ); +fn markTable(wasm: *Wasm, i: ObjectTableIndex) link.File.FlushError!void { + try wasm.tables.put(wasm.base.comp.gpa, .fromObjectTable(i), {}); } -fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem.Allocator) !void { +pub fn flushModule( + wasm: *Wasm, + arena: Allocator, + tid: Zcu.PerThread.Id, + prog_node: std.Progress.Node, +) link.File.FlushError!void { + // The goal is to never use this because it's only needed if we need to + // write to InternPool, but flushModule is too late to be writing to the + // InternPool. + _ = tid; const comp = wasm.base.comp; - const import_memory = comp.config.import_memory; - const Name = struct { - index: u32, - name: []const u8, - - fn lessThan(context: void, lhs: @This(), rhs: @This()) bool { - _ = context; - return lhs.index < rhs.index; - } - }; + const use_lld = build_options.have_llvm and comp.config.use_lld; + const diags = &comp.link_diags; + const gpa = comp.gpa; - // we must de-duplicate symbols that point to the same function - var funcs = std.AutoArrayHashMap(u32, Name).init(arena); - try funcs.ensureUnusedCapacity(wasm.functions.count() + wasm.imported_functions_count); - var globals = try std.ArrayList(Name).initCapacity(arena, wasm.wasm_globals.items.len + wasm.imported_globals_count); - var segments = try std.ArrayList(Name).initCapacity(arena, wasm.data_segments.count()); - - for (wasm.resolved_symbols.keys()) |sym_loc| { - const symbol = wasm.symbolLocSymbol(sym_loc).*; - if (symbol.isDead()) { - continue; - } - const name = wasm.symbolLocName(sym_loc); - switch (symbol.tag) { - .function => { - const gop = funcs.getOrPutAssumeCapacity(symbol.index); - if (!gop.found_existing) { - gop.value_ptr.* = .{ .index = symbol.index, .name = name }; - } - }, - .global => globals.appendAssumeCapacity(.{ .index = symbol.index, .name = name }), - else => {}, - } - } - // data segments are already 'ordered' - var data_segment_index: u32 = 0; - for (wasm.data_segments.keys()) |key| { - // bss section is not emitted when this condition holds true, so we also - // do not output a name for it. - if (!import_memory and std.mem.eql(u8, key, ".bss")) continue; - segments.appendAssumeCapacity(.{ .index = data_segment_index, .name = key }); - data_segment_index += 1; + if (wasm.llvm_object) |llvm_object| { + try wasm.base.emitLlvmObject(arena, llvm_object, prog_node); + if (use_lld) return; } - mem.sort(Name, funcs.values(), {}, Name.lessThan); - mem.sort(Name, globals.items, {}, Name.lessThan); - - const header_offset = try reserveCustomSectionHeader(binary_bytes); - const writer = binary_bytes.writer(); - try leb.writeUleb128(writer, @as(u32, @intCast("name".len))); - try writer.writeAll("name"); + if (comp.verbose_link) Compilation.dump_argv(wasm.dump_argv_list.items); - try wasm.emitNameSubsection(.function, funcs.values(), writer); - try wasm.emitNameSubsection(.global, globals.items, writer); - try wasm.emitNameSubsection(.data_segment, segments.items, writer); + if (wasm.base.zcu_object_sub_path) |path| { + const module_obj_path: Path = .{ + .root_dir = wasm.base.emit.root_dir, + .sub_path = if (fs.path.dirname(wasm.base.emit.sub_path)) |dirname| + try fs.path.join(arena, &.{ dirname, path }) + else + path, + }; + openParseObjectReportingFailure(wasm, module_obj_path); + try prelink(wasm, prog_node); + } - try writeCustomSectionHeader( - binary_bytes.items, - header_offset, - @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), - ); -} + const tracy = trace(@src()); + defer tracy.end(); -fn emitNameSubsection(wasm: *Wasm, section_id: std.wasm.NameSubsection, names: anytype, writer: anytype) !void { - const gpa = wasm.base.comp.gpa; + const sub_prog_node = prog_node.start("Wasm Flush", 0); + defer sub_prog_node.end(); - // We must emit subsection size, so first write to a temporary list - var section_list = std.ArrayList(u8).init(gpa); - defer section_list.deinit(); - const sub_writer = section_list.writer(); + const functions_end_zcu: u32 = @intCast(wasm.functions.entries.len); + defer wasm.functions.shrinkRetainingCapacity(functions_end_zcu); - try leb.writeUleb128(sub_writer, @as(u32, @intCast(names.len))); - for (names) |name| { - log.debug("Emit symbol '{s}' type({s})", .{ name.name, @tagName(section_id) }); - try leb.writeUleb128(sub_writer, name.index); - try leb.writeUleb128(sub_writer, @as(u32, @intCast(name.name.len))); - try sub_writer.writeAll(name.name); - } + const globals_end_zcu: u32 = @intCast(wasm.globals.entries.len); + defer wasm.globals.shrinkRetainingCapacity(globals_end_zcu); - // From now, write to the actual writer - try leb.writeUleb128(writer, @intFromEnum(section_id)); - try leb.writeUleb128(writer, @as(u32, @intCast(section_list.items.len))); - try writer.writeAll(section_list.items); -} + const function_exports_end_zcu: u32 = @intCast(wasm.function_exports.entries.len); + defer wasm.function_exports.shrinkRetainingCapacity(function_exports_end_zcu); -fn emitLimits(writer: anytype, limits: std.wasm.Limits) !void { - try writer.writeByte(limits.flags); - try leb.writeUleb128(writer, limits.min); - if (limits.hasFlag(.WASM_LIMITS_FLAG_HAS_MAX)) { - try leb.writeUleb128(writer, limits.max); - } -} + const hidden_function_exports_end_zcu: u32 = @intCast(wasm.hidden_function_exports.entries.len); + defer wasm.hidden_function_exports.shrinkRetainingCapacity(hidden_function_exports_end_zcu); -fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void { - switch (init_expr) { - .i32_const => |val| { - try writer.writeByte(std.wasm.opcode(.i32_const)); - try leb.writeIleb128(writer, val); - }, - .i64_const => |val| { - try writer.writeByte(std.wasm.opcode(.i64_const)); - try leb.writeIleb128(writer, val); - }, - .f32_const => |val| { - try writer.writeByte(std.wasm.opcode(.f32_const)); - try writer.writeInt(u32, @bitCast(val), .little); - }, - .f64_const => |val| { - try writer.writeByte(std.wasm.opcode(.f64_const)); - try writer.writeInt(u64, @bitCast(val), .little); - }, - .global_get => |val| { - try writer.writeByte(std.wasm.opcode(.global_get)); - try leb.writeUleb128(writer, val); - }, - } - try writer.writeByte(std.wasm.opcode(.end)); -} + wasm.flush_buffer.clear(); + try wasm.flush_buffer.missing_exports.reinit(gpa, wasm.missing_exports.keys(), &.{}); + try wasm.flush_buffer.function_imports.reinit(gpa, wasm.function_imports.keys(), wasm.function_imports.values()); + try wasm.flush_buffer.global_imports.reinit(gpa, wasm.global_imports.keys(), wasm.global_imports.values()); + try wasm.flush_buffer.data_imports.reinit(gpa, wasm.data_imports.keys(), wasm.data_imports.values()); -fn emitImport(wasm: *Wasm, writer: anytype, import: Import) !void { - const module_name = wasm.stringSlice(import.module_name); - try leb.writeUleb128(writer, @as(u32, @intCast(module_name.len))); - try writer.writeAll(module_name); - - const name = wasm.stringSlice(import.name); - try leb.writeUleb128(writer, @as(u32, @intCast(name.len))); - try writer.writeAll(name); - - try writer.writeByte(@intFromEnum(import.kind)); - switch (import.kind) { - .function => |type_index| try leb.writeUleb128(writer, type_index), - .global => |global_type| { - try leb.writeUleb128(writer, std.wasm.valtype(global_type.valtype)); - try writer.writeByte(@intFromBool(global_type.mutable)); - }, - .table => |table| { - try leb.writeUleb128(writer, std.wasm.reftype(table.reftype)); - try emitLimits(writer, table.limits); - }, - .memory => |limits| { - try emitLimits(writer, limits); - }, - } + return wasm.flush_buffer.finish(wasm) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.LinkFailure => return error.LinkFailure, + else => |e| return diags.fail("failed to flush wasm: {s}", .{@errorName(e)}), + }; } fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { @@ -3406,6 +3816,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: defer tracy.end(); const comp = wasm.base.comp; + const diags = &comp.link_diags; const shared_memory = comp.config.shared_memory; const export_memory = comp.config.export_memory; const import_memory = comp.config.import_memory; @@ -3459,7 +3870,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: } try man.addOptionalFile(module_obj_path); try man.addOptionalFilePath(compiler_rt_path); - man.hash.addOptionalBytes(wasm.optionalStringSlice(wasm.entry_name)); + man.hash.addOptionalBytes(wasm.entry_name.slice(wasm)); man.hash.add(wasm.base.stack_size); man.hash.add(wasm.base.build_id); man.hash.add(import_memory); @@ -3608,7 +4019,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: try argv.append("--export-dynamic"); } - if (wasm.optionalStringSlice(wasm.entry_name)) |entry_name| { + if (wasm.entry_name.slice(wasm)) |entry_name| { try argv.appendSlice(&.{ "--entry", entry_name }); } else { try argv.append("--no-entry"); @@ -3750,14 +4161,12 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: switch (term) { .Exited => |code| { if (code != 0) { - const diags = &comp.link_diags; diags.lockAndParseLldStderr(linker_command, stderr); - return error.LLDReportedFailure; + return error.LinkFailure; } }, else => { - log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr }); - return error.LLDCrashed; + return diags.fail("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr }); }, } @@ -3771,7 +4180,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: if (comp.clang_passthrough_mode) { std.process.exit(exit_code); } else { - return error.LLDReportedFailure; + return diags.fail("{s} returned exit code {d}:\n{s}", .{ argv.items[0], exit_code }); } } } @@ -3811,969 +4220,507 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: } } -fn reserveVecSectionHeader(bytes: *std.ArrayList(u8)) !u32 { - // section id + fixed leb contents size + fixed leb vector length - const header_size = 1 + 5 + 5; - const offset = @as(u32, @intCast(bytes.items.len)); - try bytes.appendSlice(&[_]u8{0} ** header_size); - return offset; +fn defaultEntrySymbolName( + preloaded_strings: *const PreloadedStrings, + wasi_exec_model: std.builtin.WasiExecModel, +) String { + return switch (wasi_exec_model) { + .reactor => preloaded_strings._initialize, + .command => preloaded_strings._start, + }; +} + +pub fn internOptionalString(wasm: *Wasm, optional_bytes: ?[]const u8) Allocator.Error!OptionalString { + const bytes = optional_bytes orelse return .none; + const string = try internString(wasm, bytes); + return string.toOptional(); +} + +pub fn internString(wasm: *Wasm, bytes: []const u8) Allocator.Error!String { + assert(mem.indexOfScalar(u8, bytes, 0) == null); + wasm.string_bytes_lock.lock(); + defer wasm.string_bytes_lock.unlock(); + const gpa = wasm.base.comp.gpa; + const gop = try wasm.string_table.getOrPutContextAdapted( + gpa, + @as([]const u8, bytes), + @as(String.TableIndexAdapter, .{ .bytes = wasm.string_bytes.items }), + @as(String.TableContext, .{ .bytes = wasm.string_bytes.items }), + ); + if (gop.found_existing) return gop.key_ptr.*; + + try wasm.string_bytes.ensureUnusedCapacity(gpa, bytes.len + 1); + const new_off: String = @enumFromInt(wasm.string_bytes.items.len); + + wasm.string_bytes.appendSliceAssumeCapacity(bytes); + wasm.string_bytes.appendAssumeCapacity(0); + + gop.key_ptr.* = new_off; + + return new_off; } -fn reserveCustomSectionHeader(bytes: *std.ArrayList(u8)) !u32 { - // unlike regular section, we don't emit the count - const header_size = 1 + 5; - const offset = @as(u32, @intCast(bytes.items.len)); - try bytes.appendSlice(&[_]u8{0} ** header_size); - return offset; +// TODO implement instead by appending to string_bytes +pub fn internStringFmt(wasm: *Wasm, comptime format: []const u8, args: anytype) Allocator.Error!String { + var buffer: [32]u8 = undefined; + const slice = std.fmt.bufPrint(&buffer, format, args) catch unreachable; + return internString(wasm, slice); } -fn writeVecSectionHeader(buffer: []u8, offset: u32, section: std.wasm.Section, size: u32, items: u32) !void { - var buf: [1 + 5 + 5]u8 = undefined; - buf[0] = @intFromEnum(section); - leb.writeUnsignedFixed(5, buf[1..6], size); - leb.writeUnsignedFixed(5, buf[6..], items); - buffer[offset..][0..buf.len].* = buf; +pub fn getExistingString(wasm: *const Wasm, bytes: []const u8) ?String { + assert(mem.indexOfScalar(u8, bytes, 0) == null); + return wasm.string_table.getKeyAdapted(bytes, @as(String.TableIndexAdapter, .{ + .bytes = wasm.string_bytes.items, + })); } -fn writeCustomSectionHeader(buffer: []u8, offset: u32, size: u32) !void { - var buf: [1 + 5]u8 = undefined; - buf[0] = 0; // 0 = 'custom' section - leb.writeUnsignedFixed(5, buf[1..6], size); - buffer[offset..][0..buf.len].* = buf; +pub fn internValtypeList(wasm: *Wasm, valtype_list: []const std.wasm.Valtype) Allocator.Error!ValtypeList { + return .fromString(try internString(wasm, @ptrCast(valtype_list))); } -fn emitLinkSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table: *std.AutoArrayHashMap(SymbolLoc, u32)) !void { - const offset = try reserveCustomSectionHeader(binary_bytes); - const writer = binary_bytes.writer(); - // emit "linking" custom section name - const section_name = "linking"; - try leb.writeUleb128(writer, section_name.len); - try writer.writeAll(section_name); - - // meta data version, which is currently '2' - try leb.writeUleb128(writer, @as(u32, 2)); - - // For each subsection type (found in Subsection) we can emit a section. - // Currently, we only support emitting segment info and the symbol table. - try wasm.emitSymbolTable(binary_bytes, symbol_table); - try wasm.emitSegmentInfo(binary_bytes); - - const size: u32 = @intCast(binary_bytes.items.len - offset - 6); - try writeCustomSectionHeader(binary_bytes.items, offset, size); +pub fn getExistingValtypeList(wasm: *const Wasm, valtype_list: []const std.wasm.Valtype) ?ValtypeList { + return .fromString(getExistingString(wasm, @ptrCast(valtype_list)) orelse return null); } -fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table: *std.AutoArrayHashMap(SymbolLoc, u32)) !void { - const writer = binary_bytes.writer(); - - try leb.writeUleb128(writer, @intFromEnum(SubsectionType.WASM_SYMBOL_TABLE)); - const table_offset = binary_bytes.items.len; - - var symbol_count: u32 = 0; - for (wasm.resolved_symbols.keys()) |sym_loc| { - const symbol = wasm.symbolLocSymbol(sym_loc).*; - if (symbol.tag == .dead) continue; // Do not emit dead symbols - try symbol_table.putNoClobber(sym_loc, symbol_count); - symbol_count += 1; - log.debug("Emit symbol: {}", .{symbol}); - try leb.writeUleb128(writer, @intFromEnum(symbol.tag)); - try leb.writeUleb128(writer, symbol.flags); - - const sym_name = wasm.symbolLocName(sym_loc); - switch (symbol.tag) { - .data => { - try leb.writeUleb128(writer, @as(u32, @intCast(sym_name.len))); - try writer.writeAll(sym_name); - - if (symbol.isDefined()) { - try leb.writeUleb128(writer, symbol.index); - const atom_index = wasm.symbol_atom.get(sym_loc).?; - const atom = wasm.getAtom(atom_index); - try leb.writeUleb128(writer, @as(u32, atom.offset)); - try leb.writeUleb128(writer, @as(u32, atom.size)); - } - }, - .section => { - try leb.writeUleb128(writer, symbol.index); - }, - else => { - try leb.writeUleb128(writer, symbol.index); - if (symbol.isDefined()) { - try leb.writeUleb128(writer, @as(u32, @intCast(sym_name.len))); - try writer.writeAll(sym_name); - } - }, - } - } - - var buf: [10]u8 = undefined; - leb.writeUnsignedFixed(5, buf[0..5], @intCast(binary_bytes.items.len - table_offset + 5)); - leb.writeUnsignedFixed(5, buf[5..], symbol_count); - try binary_bytes.insertSlice(table_offset, &buf); +pub fn addFuncType(wasm: *Wasm, ft: FunctionType) Allocator.Error!FunctionType.Index { + const gpa = wasm.base.comp.gpa; + const gop = try wasm.func_types.getOrPut(gpa, ft); + return @enumFromInt(gop.index); } -fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void { - const writer = binary_bytes.writer(); - try leb.writeUleb128(writer, @intFromEnum(SubsectionType.WASM_SEGMENT_INFO)); - const segment_offset = binary_bytes.items.len; - - try leb.writeUleb128(writer, @as(u32, @intCast(wasm.segment_info.count()))); - for (wasm.segment_info.values()) |segment_info| { - log.debug("Emit segment: {s} align({d}) flags({b})", .{ - segment_info.name, - segment_info.alignment, - segment_info.flags, - }); - try leb.writeUleb128(writer, @as(u32, @intCast(segment_info.name.len))); - try writer.writeAll(segment_info.name); - try leb.writeUleb128(writer, segment_info.alignment.toLog2Units()); - try leb.writeUleb128(writer, segment_info.flags); - } - - var buf: [5]u8 = undefined; - leb.writeUnsignedFixed(5, &buf, @as(u32, @intCast(binary_bytes.items.len - segment_offset))); - try binary_bytes.insertSlice(segment_offset, &buf); +pub fn getExistingFuncType(wasm: *const Wasm, ft: FunctionType) ?FunctionType.Index { + const index = wasm.func_types.getIndex(ft) orelse return null; + return @enumFromInt(index); } -pub fn getUleb128Size(uint_value: anytype) u32 { - const T = @TypeOf(uint_value); - const U = if (@typeInfo(T).int.bits < 8) u8 else T; - var value = @as(U, @intCast(uint_value)); - - var size: u32 = 0; - while (value != 0) : (size += 1) { - value >>= 7; - } - return size; +pub fn getExistingFuncType2(wasm: *const Wasm, params: []const std.wasm.Valtype, returns: []const std.wasm.Valtype) FunctionType.Index { + return getExistingFuncType(wasm, .{ + .params = getExistingValtypeList(wasm, params).?, + .returns = getExistingValtypeList(wasm, returns).?, + }).?; } -/// For each relocatable section, emits a custom "relocation." section -fn emitCodeRelocations( +pub fn internFunctionType( wasm: *Wasm, - binary_bytes: *std.ArrayList(u8), - section_index: u32, - symbol_table: std.AutoArrayHashMap(SymbolLoc, u32), -) !void { - const code_index = wasm.code_section_index.unwrap() orelse return; - const writer = binary_bytes.writer(); - const header_offset = try reserveCustomSectionHeader(binary_bytes); - - // write custom section information - const name = "reloc.CODE"; - try leb.writeUleb128(writer, @as(u32, @intCast(name.len))); - try writer.writeAll(name); - try leb.writeUleb128(writer, section_index); - const reloc_start = binary_bytes.items.len; - - var count: u32 = 0; - var atom: *Atom = wasm.getAtomPtr(wasm.atoms.get(code_index).?); - // for each atom, we calculate the uleb size and append that - var size_offset: u32 = 5; // account for code section size leb128 - while (true) { - size_offset += getUleb128Size(atom.size); - for (atom.relocs.items) |relocation| { - count += 1; - const sym_loc: SymbolLoc = .{ .file = atom.file, .index = @enumFromInt(relocation.index) }; - const symbol_index = symbol_table.get(sym_loc).?; - try leb.writeUleb128(writer, @intFromEnum(relocation.relocation_type)); - const offset = atom.offset + relocation.offset + size_offset; - try leb.writeUleb128(writer, offset); - try leb.writeUleb128(writer, symbol_index); - if (relocation.relocation_type.addendIsPresent()) { - try leb.writeIleb128(writer, relocation.addend); - } - log.debug("Emit relocation: {}", .{relocation}); - } - if (atom.prev == .null) break; - atom = wasm.getAtomPtr(atom.prev); - } - if (count == 0) return; - var buf: [5]u8 = undefined; - leb.writeUnsignedFixed(5, &buf, count); - try binary_bytes.insertSlice(reloc_start, &buf); - const size: u32 = @intCast(binary_bytes.items.len - header_offset - 6); - try writeCustomSectionHeader(binary_bytes.items, header_offset, size); + cc: std.builtin.CallingConvention, + params: []const InternPool.Index, + return_type: Zcu.Type, + target: *const std.Target, +) Allocator.Error!FunctionType.Index { + try convertZcuFnType(wasm.base.comp, cc, params, return_type, target, &wasm.params_scratch, &wasm.returns_scratch); + return wasm.addFuncType(.{ + .params = try wasm.internValtypeList(wasm.params_scratch.items), + .returns = try wasm.internValtypeList(wasm.returns_scratch.items), + }); } -fn emitDataRelocations( +pub fn getExistingFunctionType( wasm: *Wasm, - binary_bytes: *std.ArrayList(u8), - section_index: u32, - symbol_table: std.AutoArrayHashMap(SymbolLoc, u32), -) !void { - if (wasm.data_segments.count() == 0) return; - const writer = binary_bytes.writer(); - const header_offset = try reserveCustomSectionHeader(binary_bytes); - - // write custom section information - const name = "reloc.DATA"; - try leb.writeUleb128(writer, @as(u32, @intCast(name.len))); - try writer.writeAll(name); - try leb.writeUleb128(writer, section_index); - const reloc_start = binary_bytes.items.len; - - var count: u32 = 0; - // for each atom, we calculate the uleb size and append that - var size_offset: u32 = 5; // account for code section size leb128 - for (wasm.data_segments.values()) |segment_index| { - var atom: *Atom = wasm.getAtomPtr(wasm.atoms.get(segment_index).?); - while (true) { - size_offset += getUleb128Size(atom.size); - for (atom.relocs.items) |relocation| { - count += 1; - const sym_loc: SymbolLoc = .{ .file = atom.file, .index = @enumFromInt(relocation.index) }; - const symbol_index = symbol_table.get(sym_loc).?; - try leb.writeUleb128(writer, @intFromEnum(relocation.relocation_type)); - const offset = atom.offset + relocation.offset + size_offset; - try leb.writeUleb128(writer, offset); - try leb.writeUleb128(writer, symbol_index); - if (relocation.relocation_type.addendIsPresent()) { - try leb.writeIleb128(writer, relocation.addend); - } - log.debug("Emit relocation: {}", .{relocation}); - } - if (atom.prev == .null) break; - atom = wasm.getAtomPtr(atom.prev); - } - } - if (count == 0) return; - - var buf: [5]u8 = undefined; - leb.writeUnsignedFixed(5, &buf, count); - try binary_bytes.insertSlice(reloc_start, &buf); - const size = @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)); - try writeCustomSectionHeader(binary_bytes.items, header_offset, size); + cc: std.builtin.CallingConvention, + params: []const InternPool.Index, + return_type: Zcu.Type, + target: *const std.Target, +) ?FunctionType.Index { + convertZcuFnType(wasm.base.comp, cc, params, return_type, target, &wasm.params_scratch, &wasm.returns_scratch) catch |err| switch (err) { + error.OutOfMemory => return null, + }; + return wasm.getExistingFuncType(.{ + .params = wasm.getExistingValtypeList(wasm.params_scratch.items) orelse return null, + .returns = wasm.getExistingValtypeList(wasm.returns_scratch.items) orelse return null, + }); } -fn hasPassiveInitializationSegments(wasm: *const Wasm) bool { - const comp = wasm.base.comp; - const import_memory = comp.config.import_memory; - - var it = wasm.data_segments.iterator(); - while (it.next()) |entry| { - const segment = wasm.segmentPtr(entry.value_ptr.*); - if (segment.needsPassiveInitialization(import_memory, entry.key_ptr.*)) { - return true; - } - } - return false; +pub fn addExpr(wasm: *Wasm, bytes: []const u8) Allocator.Error!Expr { + const gpa = wasm.base.comp.gpa; + // We can't use string table deduplication here since these expressions can + // have null bytes in them however it may be interesting to explore since + // it is likely for globals to share initialization values. Then again + // there may not be very many globals in total. + try wasm.string_bytes.appendSlice(gpa, bytes); + return @enumFromInt(wasm.string_bytes.items.len - bytes.len); } -/// Searches for a matching function signature. When no matching signature is found, -/// a new entry will be made. The value returned is the index of the type within `wasm.func_types`. -pub fn putOrGetFuncType(wasm: *Wasm, func_type: std.wasm.Type) !u32 { - if (wasm.getTypeIndex(func_type)) |index| { - return index; - } - - // functype does not exist. +pub fn addRelocatableDataPayload(wasm: *Wasm, bytes: []const u8) Allocator.Error!DataPayload { const gpa = wasm.base.comp.gpa; - const index: u32 = @intCast(wasm.func_types.items.len); - const params = try gpa.dupe(std.wasm.Valtype, func_type.params); - errdefer gpa.free(params); - const returns = try gpa.dupe(std.wasm.Valtype, func_type.returns); - errdefer gpa.free(returns); - try wasm.func_types.append(gpa, .{ - .params = params, - .returns = returns, - }); - return index; + try wasm.string_bytes.appendSlice(gpa, bytes); + return .{ + .off = @enumFromInt(wasm.string_bytes.items.len - bytes.len), + .len = @intCast(bytes.len), + }; } -/// For the given `nav`, stores the corresponding type representing the function signature. -/// Asserts declaration has an associated `Atom`. -/// Returns the index into the list of types. -pub fn storeNavType(wasm: *Wasm, nav: InternPool.Nav.Index, func_type: std.wasm.Type) !u32 { - return wasm.zig_object.?.storeDeclType(wasm.base.comp.gpa, nav, func_type); +pub fn uavSymbolIndex(wasm: *Wasm, ip_index: InternPool.Index) Allocator.Error!SymbolTableIndex { + const comp = wasm.base.comp; + assert(comp.config.output_mode == .Obj); + const gpa = comp.gpa; + const name = try wasm.internStringFmt("__anon_{d}", .{@intFromEnum(ip_index)}); + const gop = try wasm.symbol_table.getOrPut(gpa, name); + gop.value_ptr.* = {}; + return @enumFromInt(gop.index); } -/// Returns the symbol index of the error name table. -/// -/// When the symbol does not yet exist, it will create a new one instead. -pub fn getErrorTableSymbol(wasm: *Wasm, pt: Zcu.PerThread) !u32 { - const sym_index = try wasm.zig_object.?.getErrorTableSymbol(wasm, pt); - return @intFromEnum(sym_index); +pub fn navSymbolIndex(wasm: *Wasm, nav_index: InternPool.Nav.Index) Allocator.Error!SymbolTableIndex { + const comp = wasm.base.comp; + assert(comp.config.output_mode == .Obj); + const zcu = comp.zcu.?; + const ip = &zcu.intern_pool; + const gpa = comp.gpa; + const nav = ip.getNav(nav_index); + const name = try wasm.internString(nav.fqn.toSlice(ip)); + const gop = try wasm.symbol_table.getOrPut(gpa, name); + gop.value_ptr.* = {}; + return @enumFromInt(gop.index); } -/// For a given `InternPool.DeclIndex` returns its corresponding `Atom.Index`. -/// When the index was not found, a new `Atom` will be created, and its index will be returned. -/// The newly created Atom is empty with default fields as specified by `Atom.empty`. -pub fn getOrCreateAtomForNav(wasm: *Wasm, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !Atom.Index { - return wasm.zig_object.?.getOrCreateAtomForNav(wasm, pt, nav); +pub fn errorNameTableSymbolIndex(wasm: *Wasm) Allocator.Error!SymbolTableIndex { + const comp = wasm.base.comp; + assert(comp.config.output_mode == .Obj); + const gpa = comp.gpa; + const gop = try wasm.symbol_table.getOrPut(gpa, wasm.preloaded_strings.__zig_error_name_table); + gop.value_ptr.* = {}; + return @enumFromInt(gop.index); } -/// Verifies all resolved symbols and checks whether itself needs to be marked alive, -/// as well as any of its references. -fn markReferences(wasm: *Wasm) !void { - const tracy = trace(@src()); - defer tracy.end(); - - const do_garbage_collect = wasm.base.gc_sections; +pub fn stackPointerSymbolIndex(wasm: *Wasm) Allocator.Error!SymbolTableIndex { const comp = wasm.base.comp; - - for (wasm.resolved_symbols.keys()) |sym_loc| { - const sym = wasm.symbolLocSymbol(sym_loc); - if (sym.isExported(comp.config.rdynamic) or sym.isNoStrip() or !do_garbage_collect) { - try wasm.mark(sym_loc); - continue; - } - - // Debug sections may require to be parsed and marked when it contains - // relocations to alive symbols. - if (sym.tag == .section and comp.config.debug_format != .strip) { - const object_id = sym_loc.file.unwrap() orelse continue; // Incremental debug info is done independently - _ = try wasm.parseSymbolIntoAtom(object_id, sym_loc.index); - sym.mark(); - } - } + assert(comp.config.output_mode == .Obj); + const gpa = comp.gpa; + const gop = try wasm.symbol_table.getOrPut(gpa, wasm.preloaded_strings.__stack_pointer); + gop.value_ptr.* = {}; + return @enumFromInt(gop.index); } -/// Marks a symbol as 'alive' recursively so itself and any references it contains to -/// other symbols will not be omit from the binary. -fn mark(wasm: *Wasm, loc: SymbolLoc) !void { - const symbol = wasm.symbolLocSymbol(loc); - if (symbol.isAlive()) { - // Symbol is already marked alive, including its references. - // This means we can skip it so we don't end up marking the same symbols - // multiple times. - return; - } - symbol.mark(); - gc_log.debug("Marked symbol '{s}'", .{wasm.symbolLocName(loc)}); - if (symbol.isUndefined()) { - // undefined symbols do not have an associated `Atom` and therefore also - // do not contain relocations. - return; - } - - const atom_index = if (loc.file.unwrap()) |object_id| - try wasm.parseSymbolIntoAtom(object_id, loc.index) - else - wasm.symbol_atom.get(loc) orelse return; - - const atom = wasm.getAtom(atom_index); - for (atom.relocs.items) |reloc| { - const target_loc: SymbolLoc = .{ .index = @enumFromInt(reloc.index), .file = loc.file }; - try wasm.mark(wasm.symbolLocFinalLoc(target_loc)); - } +pub fn tagNameSymbolIndex(wasm: *Wasm, ip_index: InternPool.Index) Allocator.Error!SymbolTableIndex { + const comp = wasm.base.comp; + assert(comp.config.output_mode == .Obj); + const gpa = comp.gpa; + const name = try wasm.internStringFmt("__zig_tag_name_{d}", .{@intFromEnum(ip_index)}); + const gop = try wasm.symbol_table.getOrPut(gpa, name); + gop.value_ptr.* = {}; + return @enumFromInt(gop.index); } -fn defaultEntrySymbolName( - preloaded_strings: *const PreloadedStrings, - wasi_exec_model: std.builtin.WasiExecModel, -) String { - return switch (wasi_exec_model) { - .reactor => preloaded_strings._initialize, - .command => preloaded_strings._start, - }; +pub fn symbolNameIndex(wasm: *Wasm, name: String) Allocator.Error!SymbolTableIndex { + const comp = wasm.base.comp; + assert(comp.config.output_mode == .Obj); + const gpa = comp.gpa; + const gop = try wasm.symbol_table.getOrPut(gpa, name); + gop.value_ptr.* = {}; + return @enumFromInt(gop.index); } -pub const Atom = struct { - /// Represents the index of the file this atom was generated from. - /// This is `none` when the atom was generated by a synthetic linker symbol. - file: OptionalObjectId, - /// symbol index of the symbol representing this atom - sym_index: Symbol.Index, - /// Size of the atom, used to calculate section sizes in the final binary - size: u32 = 0, - /// List of relocations belonging to this atom - relocs: std.ArrayListUnmanaged(Relocation) = .empty, - /// Contains the binary data of an atom, which can be non-relocated - code: std.ArrayListUnmanaged(u8) = .empty, - /// For code this is 1, for data this is set to the highest value of all segments - alignment: Wasm.Alignment = .@"1", - /// Offset into the section where the atom lives, this already accounts - /// for alignment. - offset: u32 = 0, - /// The original offset within the object file. This value is subtracted from - /// relocation offsets to determine where in the `data` to rewrite the value - original_offset: u32 = 0, - /// Previous atom in relation to this atom. - /// is null when this atom is the first in its order - prev: Atom.Index = .null, - /// Contains atoms local to a decl, all managed by this `Atom`. - /// When the parent atom is being freed, it will also do so for all local atoms. - locals: std.ArrayListUnmanaged(Atom.Index) = .empty, - - /// Represents the index of an Atom where `null` is considered - /// an invalid atom. - pub const Index = enum(u32) { - null = std.math.maxInt(u32), - _, - }; - - /// Frees all resources owned by this `Atom`. - pub fn deinit(atom: *Atom, gpa: std.mem.Allocator) void { - atom.relocs.deinit(gpa); - atom.code.deinit(gpa); - atom.locals.deinit(gpa); - atom.* = undefined; - } - - /// Sets the length of relocations and code to '0', - /// effectively resetting them and allowing them to be re-populated. - pub fn clear(atom: *Atom) void { - atom.relocs.clearRetainingCapacity(); - atom.code.clearRetainingCapacity(); - } - - pub fn format(atom: Atom, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { - _ = fmt; - _ = options; - try writer.print("Atom{{ .sym_index = {d}, .alignment = {d}, .size = {d}, .offset = 0x{x:0>8} }}", .{ - @intFromEnum(atom.sym_index), - atom.alignment, - atom.size, - atom.offset, - }); - } +pub fn refUavObj(wasm: *Wasm, ip_index: InternPool.Index, orig_ptr_ty: InternPool.Index) !UavsObjIndex { + const comp = wasm.base.comp; + const zcu = comp.zcu.?; + const ip = &zcu.intern_pool; + const gpa = comp.gpa; + assert(comp.config.output_mode == .Obj); - /// Returns the location of the symbol that represents this `Atom` - pub fn symbolLoc(atom: Atom) Wasm.SymbolLoc { - return .{ - .file = atom.file, - .index = atom.sym_index, - }; + if (orig_ptr_ty != .none) { + const abi_alignment = Zcu.Type.fromInterned(ip.typeOf(ip_index)).abiAlignment(zcu); + const explicit_alignment = ip.indexToKey(orig_ptr_ty).ptr_type.flags.alignment; + if (explicit_alignment.compare(.gt, abi_alignment)) { + const gop = try wasm.overaligned_uavs.getOrPut(gpa, ip_index); + gop.value_ptr.* = if (gop.found_existing) gop.value_ptr.maxStrict(explicit_alignment) else explicit_alignment; + } } - /// Resolves the relocations within the atom, writing the new value - /// at the calculated offset. - pub fn resolveRelocs(atom: *Atom, wasm: *const Wasm) void { - if (atom.relocs.items.len == 0) return; - const symbol_name = wasm.symbolLocName(atom.symbolLoc()); - log.debug("Resolving relocs in atom '{s}' count({d})", .{ - symbol_name, - atom.relocs.items.len, - }); - - for (atom.relocs.items) |reloc| { - const value = atom.relocationValue(reloc, wasm); - log.debug("Relocating '{s}' referenced in '{s}' offset=0x{x:0>8} value={d}", .{ - wasm.symbolLocName(.{ - .file = atom.file, - .index = @enumFromInt(reloc.index), - }), - symbol_name, - reloc.offset, - value, - }); + const gop = try wasm.uavs_obj.getOrPut(gpa, ip_index); + if (!gop.found_existing) gop.value_ptr.* = .{ + // Lowering the value is delayed to avoid recursion. + .code = undefined, + .relocs = undefined, + }; + return @enumFromInt(gop.index); +} - switch (reloc.relocation_type) { - .R_WASM_TABLE_INDEX_I32, - .R_WASM_FUNCTION_OFFSET_I32, - .R_WASM_GLOBAL_INDEX_I32, - .R_WASM_MEMORY_ADDR_I32, - .R_WASM_SECTION_OFFSET_I32, - => std.mem.writeInt(u32, atom.code.items[reloc.offset - atom.original_offset ..][0..4], @as(u32, @truncate(value)), .little), - .R_WASM_TABLE_INDEX_I64, - .R_WASM_MEMORY_ADDR_I64, - => std.mem.writeInt(u64, atom.code.items[reloc.offset - atom.original_offset ..][0..8], value, .little), - .R_WASM_GLOBAL_INDEX_LEB, - .R_WASM_EVENT_INDEX_LEB, - .R_WASM_FUNCTION_INDEX_LEB, - .R_WASM_MEMORY_ADDR_LEB, - .R_WASM_MEMORY_ADDR_SLEB, - .R_WASM_TABLE_INDEX_SLEB, - .R_WASM_TABLE_NUMBER_LEB, - .R_WASM_TYPE_INDEX_LEB, - .R_WASM_MEMORY_ADDR_TLS_SLEB, - => leb.writeUnsignedFixed(5, atom.code.items[reloc.offset - atom.original_offset ..][0..5], @as(u32, @truncate(value))), - .R_WASM_MEMORY_ADDR_LEB64, - .R_WASM_MEMORY_ADDR_SLEB64, - .R_WASM_TABLE_INDEX_SLEB64, - .R_WASM_MEMORY_ADDR_TLS_SLEB64, - => leb.writeUnsignedFixed(10, atom.code.items[reloc.offset - atom.original_offset ..][0..10], value), - } - } - } +pub fn refUavExe(wasm: *Wasm, ip_index: InternPool.Index, orig_ptr_ty: InternPool.Index) !UavsExeIndex { + const comp = wasm.base.comp; + const zcu = comp.zcu.?; + const ip = &zcu.intern_pool; + const gpa = comp.gpa; + assert(comp.config.output_mode != .Obj); - /// From a given `relocation` will return the new value to be written. - /// All values will be represented as a `u64` as all values can fit within it. - /// The final value must be casted to the correct size. - fn relocationValue(atom: Atom, relocation: Relocation, wasm: *const Wasm) u64 { - const target_loc = wasm.symbolLocFinalLoc(.{ - .file = atom.file, - .index = @enumFromInt(relocation.index), - }); - const symbol = wasm.symbolLocSymbol(target_loc); - if (relocation.relocation_type != .R_WASM_TYPE_INDEX_LEB and - symbol.tag != .section and - symbol.isDead()) - { - const val = atom.tombstone(wasm) orelse relocation.addend; - return @bitCast(val); - } - switch (relocation.relocation_type) { - .R_WASM_FUNCTION_INDEX_LEB => return symbol.index, - .R_WASM_TABLE_NUMBER_LEB => return symbol.index, - .R_WASM_TABLE_INDEX_I32, - .R_WASM_TABLE_INDEX_I64, - .R_WASM_TABLE_INDEX_SLEB, - .R_WASM_TABLE_INDEX_SLEB64, - => return wasm.function_table.get(.{ .file = atom.file, .index = @enumFromInt(relocation.index) }) orelse 0, - .R_WASM_TYPE_INDEX_LEB => { - const object_id = atom.file.unwrap() orelse return relocation.index; - const original_type = objectFuncTypes(wasm, object_id)[relocation.index]; - return wasm.getTypeIndex(original_type).?; - }, - .R_WASM_GLOBAL_INDEX_I32, - .R_WASM_GLOBAL_INDEX_LEB, - => return symbol.index, - .R_WASM_MEMORY_ADDR_I32, - .R_WASM_MEMORY_ADDR_I64, - .R_WASM_MEMORY_ADDR_LEB, - .R_WASM_MEMORY_ADDR_LEB64, - .R_WASM_MEMORY_ADDR_SLEB, - .R_WASM_MEMORY_ADDR_SLEB64, - => { - std.debug.assert(symbol.tag == .data); - if (symbol.isUndefined()) { - return 0; - } - const va: i33 = @intCast(symbol.virtual_address); - return @intCast(va + relocation.addend); - }, - .R_WASM_EVENT_INDEX_LEB => return symbol.index, - .R_WASM_SECTION_OFFSET_I32 => { - const target_atom_index = wasm.symbol_atom.get(target_loc).?; - const target_atom = wasm.getAtom(target_atom_index); - const rel_value: i33 = @intCast(target_atom.offset); - return @intCast(rel_value + relocation.addend); - }, - .R_WASM_FUNCTION_OFFSET_I32 => { - if (symbol.isUndefined()) { - const val = atom.tombstone(wasm) orelse relocation.addend; - return @bitCast(val); - } - const target_atom_index = wasm.symbol_atom.get(target_loc).?; - const target_atom = wasm.getAtom(target_atom_index); - const rel_value: i33 = @intCast(target_atom.offset); - return @intCast(rel_value + relocation.addend); - }, - .R_WASM_MEMORY_ADDR_TLS_SLEB, - .R_WASM_MEMORY_ADDR_TLS_SLEB64, - => { - const va: i33 = @intCast(symbol.virtual_address); - return @intCast(va + relocation.addend); - }, + if (orig_ptr_ty != .none) { + const abi_alignment = Zcu.Type.fromInterned(ip.typeOf(ip_index)).abiAlignment(zcu); + const explicit_alignment = ip.indexToKey(orig_ptr_ty).ptr_type.flags.alignment; + if (explicit_alignment.compare(.gt, abi_alignment)) { + const gop = try wasm.overaligned_uavs.getOrPut(gpa, ip_index); + gop.value_ptr.* = if (gop.found_existing) gop.value_ptr.maxStrict(explicit_alignment) else explicit_alignment; } } - // For a given `Atom` returns whether it has a tombstone value or not. - /// This defines whether we want a specific value when a section is dead. - fn tombstone(atom: Atom, wasm: *const Wasm) ?i64 { - const atom_name = wasm.symbolLocSymbol(atom.symbolLoc()).name; - if (atom_name == wasm.custom_sections.@".debug_ranges".name or - atom_name == wasm.custom_sections.@".debug_loc".name) - { - return -2; - } else if (std.mem.startsWith(u8, wasm.stringSlice(atom_name), ".debug_")) { - return -1; - } else { - return null; - } + const gop = try wasm.uavs_exe.getOrPut(gpa, ip_index); + if (gop.found_existing) { + gop.value_ptr.count += 1; + } else { + gop.value_ptr.* = .{ + // Lowering the value is delayed to avoid recursion. + .code = undefined, + .count = 1, + }; } -}; + return @enumFromInt(gop.index); +} -pub const Relocation = struct { - /// Represents the type of the `Relocation` - relocation_type: RelocationType, - /// Offset of the value to rewrite relative to the relevant section's contents. - /// When `offset` is zero, its position is immediately after the id and size of the section. - offset: u32, - /// The index of the symbol used. - /// When the type is `R_WASM_TYPE_INDEX_LEB`, it represents the index of the type. - index: u32, - /// Addend to add to the address. - /// This field is only non-zero for `R_WASM_MEMORY_ADDR_*`, `R_WASM_FUNCTION_OFFSET_I32` and `R_WASM_SECTION_OFFSET_I32`. - addend: i32 = 0, - - /// All possible relocation types currently existing. - /// This enum is exhaustive as the spec is WIP and new types - /// can be added which means that a generated binary will be invalid, - /// so instead we will show an error in such cases. - pub const RelocationType = enum(u8) { - R_WASM_FUNCTION_INDEX_LEB = 0, - R_WASM_TABLE_INDEX_SLEB = 1, - R_WASM_TABLE_INDEX_I32 = 2, - R_WASM_MEMORY_ADDR_LEB = 3, - R_WASM_MEMORY_ADDR_SLEB = 4, - R_WASM_MEMORY_ADDR_I32 = 5, - R_WASM_TYPE_INDEX_LEB = 6, - R_WASM_GLOBAL_INDEX_LEB = 7, - R_WASM_FUNCTION_OFFSET_I32 = 8, - R_WASM_SECTION_OFFSET_I32 = 9, - R_WASM_EVENT_INDEX_LEB = 10, - R_WASM_GLOBAL_INDEX_I32 = 13, - R_WASM_MEMORY_ADDR_LEB64 = 14, - R_WASM_MEMORY_ADDR_SLEB64 = 15, - R_WASM_MEMORY_ADDR_I64 = 16, - R_WASM_TABLE_INDEX_SLEB64 = 18, - R_WASM_TABLE_INDEX_I64 = 19, - R_WASM_TABLE_NUMBER_LEB = 20, - R_WASM_MEMORY_ADDR_TLS_SLEB = 21, - R_WASM_MEMORY_ADDR_TLS_SLEB64 = 25, - - /// Returns true for relocation types where the `addend` field is present. - pub fn addendIsPresent(self: RelocationType) bool { - return switch (self) { - .R_WASM_MEMORY_ADDR_LEB, - .R_WASM_MEMORY_ADDR_SLEB, - .R_WASM_MEMORY_ADDR_I32, - .R_WASM_MEMORY_ADDR_LEB64, - .R_WASM_MEMORY_ADDR_SLEB64, - .R_WASM_MEMORY_ADDR_I64, - .R_WASM_MEMORY_ADDR_TLS_SLEB, - .R_WASM_MEMORY_ADDR_TLS_SLEB64, - .R_WASM_FUNCTION_OFFSET_I32, - .R_WASM_SECTION_OFFSET_I32, - => true, - else => false, - }; - } +pub fn refNavObj(wasm: *Wasm, nav_index: InternPool.Nav.Index) !NavsObjIndex { + const comp = wasm.base.comp; + const gpa = comp.gpa; + assert(comp.config.output_mode != .Obj); + const gop = try wasm.navs_obj.getOrPut(gpa, nav_index); + if (!gop.found_existing) gop.value_ptr.* = .{ + // Lowering the value is delayed to avoid recursion. + .code = undefined, + .relocs = undefined, }; + return @enumFromInt(gop.index); +} - /// Verifies the relocation type of a given `Relocation` and returns - /// true when the relocation references a function call or address to a function. - pub fn isFunction(self: Relocation) bool { - return switch (self.relocation_type) { - .R_WASM_FUNCTION_INDEX_LEB, - .R_WASM_TABLE_INDEX_SLEB, - => true, - else => false, +pub fn refNavExe(wasm: *Wasm, nav_index: InternPool.Nav.Index) !NavsExeIndex { + const comp = wasm.base.comp; + const gpa = comp.gpa; + assert(comp.config.output_mode != .Obj); + const gop = try wasm.navs_exe.getOrPut(gpa, nav_index); + if (gop.found_existing) { + gop.value_ptr.count += 1; + } else { + gop.value_ptr.* = .{ + // Lowering the value is delayed to avoid recursion. + .code = undefined, + .count = 0, }; } + return @enumFromInt(gop.index); +} - pub fn format(self: Relocation, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { - _ = fmt; - _ = options; - try writer.print("{s} offset=0x{x:0>6} symbol={d}", .{ - @tagName(self.relocation_type), - self.offset, - self.index, - }); - } -}; - -/// Unlike the `Import` object defined by the wasm spec, and existing -/// in the std.wasm namespace, this construct saves the 'module name' and 'name' -/// of the import using offsets into a string table, rather than the slices itself. -/// This saves us (potentially) 24 bytes per import on 64bit machines. -pub const Import = struct { - module_name: String, - name: String, - kind: std.wasm.Import.Kind, -}; - -/// Unlike the `Export` object defined by the wasm spec, and existing -/// in the std.wasm namespace, this construct saves the 'name' -/// of the export using offsets into a string table, rather than the slice itself. -/// This saves us (potentially) 12 bytes per export on 64bit machines. -pub const Export = struct { - name: String, - index: u32, - kind: std.wasm.ExternalKind, -}; - -pub const SubsectionType = enum(u8) { - WASM_SEGMENT_INFO = 5, - WASM_INIT_FUNCS = 6, - WASM_COMDAT_INFO = 7, - WASM_SYMBOL_TABLE = 8, -}; - -pub const Alignment = @import("../InternPool.zig").Alignment; - -pub const NamedSegment = struct { - /// Segment's name, encoded as UTF-8 bytes. - name: []const u8, - /// The required alignment of the segment, encoded as a power of 2 - alignment: Alignment, - /// Bitfield containing flags for a segment - flags: u32, - - pub fn isTLS(segment: NamedSegment) bool { - return segment.flags & @intFromEnum(Flags.WASM_SEG_FLAG_TLS) != 0; - } - - /// Returns the name as how it will be output into the final object - /// file or binary. When `merge_segments` is true, this will return the - /// short name. i.e. ".rodata". When false, it returns the entire name instead. - pub fn outputName(segment: NamedSegment, merge_segments: bool) []const u8 { - if (segment.isTLS()) { - return ".tdata"; - } else if (!merge_segments) { - return segment.name; - } else if (std.mem.startsWith(u8, segment.name, ".rodata.")) { - return ".rodata"; - } else if (std.mem.startsWith(u8, segment.name, ".text.")) { - return ".text"; - } else if (std.mem.startsWith(u8, segment.name, ".data.")) { - return ".data"; - } else if (std.mem.startsWith(u8, segment.name, ".bss.")) { - return ".bss"; - } - return segment.name; - } - - pub const Flags = enum(u32) { - WASM_SEG_FLAG_STRINGS = 0x1, - WASM_SEG_FLAG_TLS = 0x2, - }; -}; - -pub const InitFunc = struct { - /// Priority of the init function - priority: u32, - /// The symbol index of init function (not the function index). - symbol_index: u32, -}; - -pub const Comdat = struct { - name: []const u8, - /// Must be zero, no flags are currently defined by the tool-convention. - flags: u32, - symbols: []const ComdatSym, -}; - -pub const ComdatSym = struct { - kind: @This().Type, - /// Index of the data segment/function/global/event/table within a WASM module. - /// The object must not be an import. - index: u32, - - pub const Type = enum(u8) { - WASM_COMDAT_DATA = 0, - WASM_COMDAT_FUNCTION = 1, - WASM_COMDAT_GLOBAL = 2, - WASM_COMDAT_EVENT = 3, - WASM_COMDAT_TABLE = 4, - WASM_COMDAT_SECTION = 5, - }; -}; - -pub const Feature = struct { - /// Provides information about the usage of the feature. - /// - '0x2b' (+): Object uses this feature, and the link fails if feature is not in the allowed set. - /// - '0x2d' (-): Object does not use this feature, and the link fails if this feature is in the allowed set. - /// - '0x3d' (=): Object uses this feature, and the link fails if this feature is not in the allowed set, - /// or if any object does not use this feature. - prefix: Prefix, - /// Type of the feature, must be unique in the sequence of features. - tag: Tag, - - /// Unlike `std.Target.wasm.Feature` this also contains linker-features such as shared-mem - pub const Tag = enum { - atomics, - bulk_memory, - exception_handling, - extended_const, - half_precision, - multimemory, - multivalue, - mutable_globals, - nontrapping_fptoint, - reference_types, - relaxed_simd, - sign_ext, - simd128, - tail_call, - shared_mem, - - /// From a given cpu feature, returns its linker feature - pub fn fromCpuFeature(feature: std.Target.wasm.Feature) Tag { - return @as(Tag, @enumFromInt(@intFromEnum(feature))); - } - - pub fn format(tag: Tag, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void { - _ = fmt; - _ = opt; - try writer.writeAll(switch (tag) { - .atomics => "atomics", - .bulk_memory => "bulk-memory", - .exception_handling => "exception-handling", - .extended_const => "extended-const", - .half_precision => "half-precision", - .multimemory => "multimemory", - .multivalue => "multivalue", - .mutable_globals => "mutable-globals", - .nontrapping_fptoint => "nontrapping-fptoint", - .reference_types => "reference-types", - .relaxed_simd => "relaxed-simd", - .sign_ext => "sign-ext", - .simd128 => "simd128", - .tail_call => "tail-call", - .shared_mem => "shared-mem", - }); - } - }; - - pub const Prefix = enum(u8) { - used = '+', - disallowed = '-', - required = '=', - }; - - pub fn format(feature: Feature, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void { - _ = opt; - _ = fmt; - try writer.print("{c} {}", .{ feature.prefix, feature.tag }); - } -}; +/// Asserts it is called after `Flush.data_segments` is fully populated and sorted. +pub fn uavAddr(wasm: *Wasm, uav_index: UavsExeIndex) u32 { + assert(wasm.flush_buffer.memory_layout_finished); + const comp = wasm.base.comp; + assert(comp.config.output_mode != .Obj); + const ds_id: DataSegmentId = .pack(wasm, .{ .uav_exe = uav_index }); + return wasm.flush_buffer.data_segments.get(ds_id).?; +} -pub const known_features = std.StaticStringMap(Feature.Tag).initComptime(.{ - .{ "atomics", .atomics }, - .{ "bulk-memory", .bulk_memory }, - .{ "exception-handling", .exception_handling }, - .{ "extended-const", .extended_const }, - .{ "half-precision", .half_precision }, - .{ "multimemory", .multimemory }, - .{ "multivalue", .multivalue }, - .{ "mutable-globals", .mutable_globals }, - .{ "nontrapping-fptoint", .nontrapping_fptoint }, - .{ "reference-types", .reference_types }, - .{ "relaxed-simd", .relaxed_simd }, - .{ "sign-ext", .sign_ext }, - .{ "simd128", .simd128 }, - .{ "tail-call", .tail_call }, - .{ "shared-mem", .shared_mem }, -}); - -/// Parses an object file into atoms, for code and data sections -fn parseSymbolIntoAtom(wasm: *Wasm, object_id: ObjectId, symbol_index: Symbol.Index) !Atom.Index { - const object = wasm.objectById(object_id) orelse - return wasm.zig_object.?.parseSymbolIntoAtom(wasm, symbol_index); +/// Asserts it is called after `Flush.data_segments` is fully populated and sorted. +pub fn navAddr(wasm: *Wasm, nav_index: InternPool.Nav.Index) u32 { + assert(wasm.flush_buffer.memory_layout_finished); const comp = wasm.base.comp; - const gpa = comp.gpa; - const symbol = &object.symtable[@intFromEnum(symbol_index)]; - const relocatable_data: Object.RelocatableData = switch (symbol.tag) { - .function => object.relocatable_data.get(.code).?[symbol.index - object.imported_functions_count], - .data => object.relocatable_data.get(.data).?[symbol.index], - .section => blk: { - const data = object.relocatable_data.get(.custom).?; - for (data) |dat| { - if (dat.section_index == symbol.index) { - break :blk dat; + assert(comp.config.output_mode != .Obj); + if (wasm.navs_exe.getIndex(nav_index)) |i| { + const navs_exe_index: NavsExeIndex = @enumFromInt(i); + log.debug("navAddr {s} {}", .{ navs_exe_index.name(wasm), nav_index }); + const ds_id: DataSegmentId = .pack(wasm, .{ .nav_exe = navs_exe_index }); + return wasm.flush_buffer.data_segments.get(ds_id).?; + } + const zcu = comp.zcu.?; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); + if (nav.getResolvedExtern(ip)) |ext| { + if (wasm.getExistingString(ext.name.toSlice(ip))) |symbol_name| { + if (wasm.object_data_imports.getPtr(symbol_name)) |import| { + switch (import.resolution.unpack(wasm)) { + .unresolved => unreachable, + .object => |object_data_index| { + const object_data = object_data_index.ptr(wasm); + const ds_id: DataSegmentId = .fromObjectDataSegment(wasm, object_data.segment); + const segment_base_addr = wasm.flush_buffer.data_segments.get(ds_id).?; + return segment_base_addr + object_data.offset; + }, + .__zig_error_names => @panic("TODO"), + .__zig_error_name_table => @panic("TODO"), + .__heap_base => @panic("TODO"), + .__heap_end => @panic("TODO"), + .uav_exe => @panic("TODO"), + .uav_obj => @panic("TODO"), + .nav_exe => @panic("TODO"), + .nav_obj => @panic("TODO"), } } - unreachable; - }, - else => unreachable, - }; - const final_index = try wasm.getMatchingSegment(object_id, symbol_index); - const atom_index = try wasm.createAtom(symbol_index, object_id.toOptional()); - try wasm.appendAtomAtIndex(final_index, atom_index); - - const atom = wasm.getAtomPtr(atom_index); - atom.size = relocatable_data.size; - atom.alignment = relocatable_data.getAlignment(object); - atom.code = std.ArrayListUnmanaged(u8).fromOwnedSlice(relocatable_data.data[0..relocatable_data.size]); - atom.original_offset = relocatable_data.offset; - - const segment = wasm.segmentPtr(final_index); - if (relocatable_data.type == .data) { //code section and custom sections are 1-byte aligned - segment.alignment = segment.alignment.max(atom.alignment); - } - - if (object.relocations.get(relocatable_data.section_index)) |relocations| { - const start = searchRelocStart(relocations, relocatable_data.offset); - const len = searchRelocEnd(relocations[start..], relocatable_data.offset + atom.size); - atom.relocs = std.ArrayListUnmanaged(Wasm.Relocation).fromOwnedSlice(relocations[start..][0..len]); - for (atom.relocs.items) |reloc| { - switch (reloc.relocation_type) { - .R_WASM_TABLE_INDEX_I32, - .R_WASM_TABLE_INDEX_I64, - .R_WASM_TABLE_INDEX_SLEB, - .R_WASM_TABLE_INDEX_SLEB64, - => { - try wasm.function_table.put(gpa, .{ - .file = object_id.toOptional(), - .index = @enumFromInt(reloc.index), - }, 0); - }, - .R_WASM_GLOBAL_INDEX_I32, - .R_WASM_GLOBAL_INDEX_LEB, - => { - const sym = object.symtable[reloc.index]; - if (sym.tag != .global) { - try wasm.got_symbols.append(gpa, .{ - .file = object_id.toOptional(), - .index = @enumFromInt(reloc.index), - }); - } - }, - else => {}, - } } } + // Otherwise it's a zero bit type; any address will do. + return 0; +} - return atom_index; +/// Asserts it is called after `Flush.data_segments` is fully populated and sorted. +pub fn errorNameTableAddr(wasm: *Wasm) u32 { + assert(wasm.flush_buffer.memory_layout_finished); + const comp = wasm.base.comp; + assert(comp.config.output_mode != .Obj); + return wasm.flush_buffer.data_segments.get(.__zig_error_name_table).?; } -fn searchRelocStart(relocs: []const Wasm.Relocation, address: u32) usize { - var min: usize = 0; - var max: usize = relocs.len; - while (min < max) { - const index = (min + max) / 2; - const curr = relocs[index]; - if (curr.offset < address) { - min = index + 1; +fn convertZcuFnType( + comp: *Compilation, + cc: std.builtin.CallingConvention, + params: []const InternPool.Index, + return_type: Zcu.Type, + target: *const std.Target, + params_buffer: *std.ArrayListUnmanaged(std.wasm.Valtype), + returns_buffer: *std.ArrayListUnmanaged(std.wasm.Valtype), +) Allocator.Error!void { + params_buffer.clearRetainingCapacity(); + returns_buffer.clearRetainingCapacity(); + + const gpa = comp.gpa; + const zcu = comp.zcu.?; + + if (CodeGen.firstParamSRet(cc, return_type, zcu, target)) { + try params_buffer.append(gpa, .i32); // memory address is always a 32-bit handle + } else if (return_type.hasRuntimeBitsIgnoreComptime(zcu)) { + if (cc == .wasm_watc) { + const res_classes = abi.classifyType(return_type, zcu); + assert(res_classes[0] == .direct and res_classes[1] == .none); + const scalar_type = abi.scalarType(return_type, zcu); + try returns_buffer.append(gpa, CodeGen.typeToValtype(scalar_type, zcu, target)); } else { - max = index; + try returns_buffer.append(gpa, CodeGen.typeToValtype(return_type, zcu, target)); } + } else if (return_type.isError(zcu)) { + try returns_buffer.append(gpa, .i32); } - return min; -} -fn searchRelocEnd(relocs: []const Wasm.Relocation, address: u32) usize { - for (relocs, 0..relocs.len) |reloc, index| { - if (reloc.offset > address) { - return index; + // param types + for (params) |param_type_ip| { + const param_type = Zcu.Type.fromInterned(param_type_ip); + if (!param_type.hasRuntimeBitsIgnoreComptime(zcu)) continue; + + switch (cc) { + .wasm_watc => { + const param_classes = abi.classifyType(param_type, zcu); + if (param_classes[1] == .none) { + if (param_classes[0] == .direct) { + const scalar_type = abi.scalarType(param_type, zcu); + try params_buffer.append(gpa, CodeGen.typeToValtype(scalar_type, zcu, target)); + } else { + try params_buffer.append(gpa, CodeGen.typeToValtype(param_type, zcu, target)); + } + } else { + // i128/f128 + try params_buffer.append(gpa, .i64); + try params_buffer.append(gpa, .i64); + } + }, + else => try params_buffer.append(gpa, CodeGen.typeToValtype(param_type, zcu, target)), } } - return relocs.len; } -pub fn internString(wasm: *Wasm, bytes: []const u8) error{OutOfMemory}!String { - const gpa = wasm.base.comp.gpa; - const gop = try wasm.string_table.getOrPutContextAdapted( - gpa, - @as([]const u8, bytes), - @as(String.TableIndexAdapter, .{ .bytes = wasm.string_bytes.items }), - @as(String.TableContext, .{ .bytes = wasm.string_bytes.items }), - ); - if (gop.found_existing) return gop.key_ptr.*; - - try wasm.string_bytes.ensureUnusedCapacity(gpa, bytes.len + 1); - const new_off: String = @enumFromInt(wasm.string_bytes.items.len); +pub fn isBss(wasm: *const Wasm, optional_name: OptionalString) bool { + const s = optional_name.slice(wasm) orelse return false; + return mem.eql(u8, s, ".bss") or mem.startsWith(u8, s, ".bss."); +} - wasm.string_bytes.appendSliceAssumeCapacity(bytes); - wasm.string_bytes.appendAssumeCapacity(0); +/// After this function is called, there may be additional entries in +/// `Wasm.uavs_obj`, `Wasm.uavs_exe`, `Wasm.navs_obj`, and `Wasm.navs_exe` +/// which have uninitialized code and relocations. This function is +/// non-recursive, so callers must coordinate additional calls to populate +/// those entries. +fn lowerZcuData(wasm: *Wasm, pt: Zcu.PerThread, ip_index: InternPool.Index) !ZcuDataObj { + const code_start: u32 = @intCast(wasm.string_bytes.items.len); + const relocs_start: u32 = @intCast(wasm.out_relocs.len); + const uav_fixups_start: u32 = @intCast(wasm.uav_fixups.items.len); + const nav_fixups_start: u32 = @intCast(wasm.nav_fixups.items.len); + const func_table_fixups_start: u32 = @intCast(wasm.func_table_fixups.items.len); + wasm.string_bytes_lock.lock(); + + try codegen.generateSymbol(&wasm.base, pt, .unneeded, .fromInterned(ip_index), &wasm.string_bytes, .none); + + const code_len: u32 = @intCast(wasm.string_bytes.items.len - code_start); + const relocs_len: u32 = @intCast(wasm.out_relocs.len - relocs_start); + const any_fixups = + uav_fixups_start != wasm.uav_fixups.items.len or + nav_fixups_start != wasm.nav_fixups.items.len or + func_table_fixups_start != wasm.func_table_fixups.items.len; + wasm.string_bytes_lock.unlock(); + + const naive_code: DataPayload = .{ + .off = @enumFromInt(code_start), + .len = code_len, + }; - gop.key_ptr.* = new_off; + // Only nonzero init values need to take up space in the output. + // If any fixups are present, we still need the string bytes allocated since + // that is the staging area for the fixups. + const code: DataPayload = if (!any_fixups and std.mem.allEqual(u8, naive_code.slice(wasm), 0)) c: { + wasm.string_bytes.shrinkRetainingCapacity(code_start); + // Indicate empty by making off and len the same value, however, still + // transmit the data size by using the size as that value. + break :c .{ + .off = .none, + .len = naive_code.len, + }; + } else c: { + wasm.any_passive_inits = wasm.any_passive_inits or wasm.base.comp.config.import_memory; + break :c naive_code; + }; - return new_off; + return .{ + .code = code, + .relocs = .{ + .off = relocs_start, + .len = relocs_len, + }, + }; } -pub fn getExistingString(wasm: *const Wasm, bytes: []const u8) ?String { - return wasm.string_table.getKeyAdapted(bytes, @as(String.TableIndexAdapter, .{ - .bytes = wasm.string_bytes.items, - })); +fn pointerAlignment(wasm: *const Wasm) Alignment { + const target = &wasm.base.comp.root_mod.resolved_target.result; + return switch (target.cpu.arch) { + .wasm32 => .@"4", + .wasm64 => .@"8", + else => unreachable, + }; } -pub fn stringSlice(wasm: *const Wasm, index: String) [:0]const u8 { - const slice = wasm.string_bytes.items[@intFromEnum(index)..]; - return slice[0..mem.indexOfScalar(u8, slice, 0).? :0]; +fn pointerSize(wasm: *const Wasm) u32 { + const target = &wasm.base.comp.root_mod.resolved_target.result; + return switch (target.cpu.arch) { + .wasm32 => 4, + .wasm64 => 8, + else => unreachable, + }; } -pub fn optionalStringSlice(wasm: *const Wasm, index: OptionalString) ?[:0]const u8 { - return stringSlice(wasm, index.unwrap() orelse return null); +fn addZcuImportReserved(wasm: *Wasm, nav_index: InternPool.Nav.Index) ZcuImportIndex { + const gop = wasm.imports.getOrPutAssumeCapacity(nav_index); + gop.value_ptr.* = {}; + return @enumFromInt(gop.index); } -pub fn castToString(wasm: *const Wasm, index: u32) String { - assert(index == 0 or wasm.string_bytes.items[index - 1] == 0); - return @enumFromInt(index); +fn resolveFunctionSynthetic( + wasm: *Wasm, + import: *FunctionImport, + res: FunctionImport.Resolution, + params: []const std.wasm.Valtype, + returns: []const std.wasm.Valtype, +) link.File.FlushError!void { + import.resolution = res; + wasm.functions.putAssumeCapacity(res, {}); + // This is not only used for type-checking but also ensures the function + // type index is interned so that it is guaranteed to exist during `flush`. + const correct_func_type = try addFuncType(wasm, .{ + .params = try internValtypeList(wasm, params), + .returns = try internValtypeList(wasm, returns), + }); + if (import.type != correct_func_type) { + const diags = &wasm.base.comp.link_diags; + return import.source_location.fail(diags, "synthetic function {s} {} imported with incorrect signature {}", .{ + @tagName(res), correct_func_type.fmt(wasm), import.type.fmt(wasm), + }); + } } -fn segmentPtr(wasm: *const Wasm, index: Segment.Index) *Segment { - return &wasm.segments.items[@intFromEnum(index)]; +pub fn addFunction( + wasm: *Wasm, + resolution: FunctionImport.Resolution, + params: []const std.wasm.Valtype, + returns: []const std.wasm.Valtype, +) Allocator.Error!void { + wasm.functions.putAssumeCapacity(resolution, {}); + _ = try wasm.addFuncType(.{ + .params = try wasm.internValtypeList(params), + .returns = try wasm.internValtypeList(returns), + }); } diff --git a/src/link/Wasm/Archive.zig b/src/link/Wasm/Archive.zig index c2078fa5252e..3ecdedce8a6e 100644 --- a/src/link/Wasm/Archive.zig +++ b/src/link/Wasm/Archive.zig @@ -142,8 +142,18 @@ pub fn parse(gpa: Allocator, file_contents: []const u8) !Archive { /// From a given file offset, starts reading for a file header. /// When found, parses the object file into an `Object` and returns it. -pub fn parseObject(archive: Archive, wasm: *Wasm, file_contents: []const u8, path: Path) !Object { - const header = mem.bytesAsValue(Header, file_contents[0..@sizeOf(Header)]); +pub fn parseObject( + archive: Archive, + wasm: *Wasm, + file_contents: []const u8, + object_offset: u32, + path: Path, + host_name: Wasm.OptionalString, + scratch_space: *Object.ScratchSpace, + must_link: bool, + gc_sections: bool, +) !Object { + const header = mem.bytesAsValue(Header, file_contents[object_offset..][0..@sizeOf(Header)]); if (!mem.eql(u8, &header.fmag, ARFMAG)) return error.BadHeaderDelimiter; const name_or_index = try header.nameOrIndex(); @@ -157,8 +167,9 @@ pub fn parseObject(archive: Archive, wasm: *Wasm, file_contents: []const u8, pat }; const object_file_size = try header.parsedSize(); + const contents = file_contents[object_offset + @sizeOf(Header) ..][0..object_file_size]; - return Object.create(wasm, file_contents[@sizeOf(Header)..][0..object_file_size], path, object_name); + return Object.parse(wasm, contents, path, object_name, host_name, scratch_space, must_link, gc_sections); } const Archive = @This(); diff --git a/src/link/Wasm/Flush.zig b/src/link/Wasm/Flush.zig new file mode 100644 index 000000000000..f862bcf42725 --- /dev/null +++ b/src/link/Wasm/Flush.zig @@ -0,0 +1,1975 @@ +//! Temporary, dynamically allocated structures used only during flush. +//! Could be constructed fresh each time, or kept around between updates to reduce heap allocations. + +const Flush = @This(); +const Wasm = @import("../Wasm.zig"); +const Object = @import("Object.zig"); +const Zcu = @import("../../Zcu.zig"); +const Alignment = Wasm.Alignment; +const String = Wasm.String; +const Relocation = Wasm.Relocation; +const InternPool = @import("../../InternPool.zig"); + +const build_options = @import("build_options"); + +const std = @import("std"); +const Allocator = std.mem.Allocator; +const mem = std.mem; +const leb = std.leb; +const log = std.log.scoped(.link); +const assert = std.debug.assert; + +/// Ordered list of data segments that will appear in the final binary. +/// When sorted, to-be-merged segments will be made adjacent. +/// Values are virtual address. +data_segments: std.AutoArrayHashMapUnmanaged(Wasm.DataSegmentId, u32) = .empty, +/// Each time a `data_segment` offset equals zero it indicates a new group, and +/// the next element in this array will contain the total merged segment size. +/// Value is the virtual memory address of the end of the segment. +data_segment_groups: std.ArrayListUnmanaged(DataSegmentGroup) = .empty, + +binary_bytes: std.ArrayListUnmanaged(u8) = .empty, +missing_exports: std.AutoArrayHashMapUnmanaged(String, void) = .empty, +function_imports: std.AutoArrayHashMapUnmanaged(String, Wasm.FunctionImportId) = .empty, +global_imports: std.AutoArrayHashMapUnmanaged(String, Wasm.GlobalImportId) = .empty, +data_imports: std.AutoArrayHashMapUnmanaged(String, Wasm.DataImportId) = .empty, + +indirect_function_table: std.AutoArrayHashMapUnmanaged(Wasm.OutputFunctionIndex, void) = .empty, + +/// A subset of the full interned function type list created only during flush. +func_types: std.AutoArrayHashMapUnmanaged(Wasm.FunctionType.Index, void) = .empty, + +/// For debug purposes only. +memory_layout_finished: bool = false, + +/// Index into `func_types`. +pub const FuncTypeIndex = enum(u32) { + _, + + pub fn fromTypeIndex(i: Wasm.FunctionType.Index, f: *const Flush) FuncTypeIndex { + return @enumFromInt(f.func_types.getIndex(i).?); + } +}; + +/// Index into `indirect_function_table`. +const IndirectFunctionTableIndex = enum(u32) { + _, + + fn fromObjectFunctionHandlingWeak(wasm: *const Wasm, index: Wasm.ObjectFunctionIndex) IndirectFunctionTableIndex { + return fromOutputFunctionIndex(&wasm.flush_buffer, .fromObjectFunctionHandlingWeak(wasm, index)); + } + + fn fromSymbolName(wasm: *const Wasm, name: String) IndirectFunctionTableIndex { + return fromOutputFunctionIndex(&wasm.flush_buffer, .fromSymbolName(wasm, name)); + } + + fn fromOutputFunctionIndex(f: *const Flush, i: Wasm.OutputFunctionIndex) IndirectFunctionTableIndex { + return @enumFromInt(f.indirect_function_table.getIndex(i).?); + } + + fn fromZcuIndirectFunctionSetIndex(i: Wasm.ZcuIndirectFunctionSetIndex) IndirectFunctionTableIndex { + // These are the same since those are added to the table first. + return @enumFromInt(@intFromEnum(i)); + } + + fn toAbi(i: IndirectFunctionTableIndex) u32 { + return @intFromEnum(i) + 1; + } +}; + +const DataSegmentGroup = struct { + first_segment: Wasm.DataSegmentId, + end_addr: u32, +}; + +pub fn clear(f: *Flush) void { + f.data_segments.clearRetainingCapacity(); + f.data_segment_groups.clearRetainingCapacity(); + f.binary_bytes.clearRetainingCapacity(); + f.indirect_function_table.clearRetainingCapacity(); + f.func_types.clearRetainingCapacity(); + f.memory_layout_finished = false; +} + +pub fn deinit(f: *Flush, gpa: Allocator) void { + f.data_segments.deinit(gpa); + f.data_segment_groups.deinit(gpa); + f.binary_bytes.deinit(gpa); + f.missing_exports.deinit(gpa); + f.function_imports.deinit(gpa); + f.global_imports.deinit(gpa); + f.data_imports.deinit(gpa); + f.indirect_function_table.deinit(gpa); + f.func_types.deinit(gpa); + f.* = undefined; +} + +pub fn finish(f: *Flush, wasm: *Wasm) !void { + const comp = wasm.base.comp; + const shared_memory = comp.config.shared_memory; + const diags = &comp.link_diags; + const gpa = comp.gpa; + const import_memory = comp.config.import_memory; + const export_memory = comp.config.export_memory; + const target = &comp.root_mod.resolved_target.result; + const is64 = switch (target.cpu.arch) { + .wasm32 => false, + .wasm64 => true, + else => unreachable, + }; + const is_obj = comp.config.output_mode == .Obj; + const allow_undefined = is_obj or wasm.import_symbols; + + const entry_name = if (wasm.entry_resolution.isNavOrUnresolved(wasm)) wasm.entry_name else .none; + + if (comp.zcu) |zcu| { + const ip: *const InternPool = &zcu.intern_pool; // No mutations allowed! + + // Detect any intrinsics that were called; they need to have dependencies on the symbols marked. + // Likewise detect `@tagName` calls so those functions can be included in the output and synthesized. + for (wasm.mir_instructions.items(.tag), wasm.mir_instructions.items(.data)) |tag, *data| switch (tag) { + .call_intrinsic => { + const symbol_name = try wasm.internString(@tagName(data.intrinsic)); + const i: Wasm.FunctionImport.Index = @enumFromInt(wasm.object_function_imports.getIndex(symbol_name) orelse { + return diags.fail("missing compiler runtime intrinsic '{s}' (undefined linker symbol)", .{ + @tagName(data.intrinsic), + }); + }); + try wasm.markFunctionImport(symbol_name, i.value(wasm), i); + }, + .call_tag_name => { + assert(ip.indexToKey(data.ip_index) == .enum_type); + const gop = try wasm.zcu_funcs.getOrPut(gpa, data.ip_index); + if (!gop.found_existing) { + wasm.tag_name_table_ref_count += 1; + const int_tag_ty = Zcu.Type.fromInterned(data.ip_index).intTagType(zcu); + gop.value_ptr.* = .{ .tag_name = .{ + .symbol_name = try wasm.internStringFmt("__zig_tag_name_{d}", .{@intFromEnum(data.ip_index)}), + .type_index = try wasm.internFunctionType(.Unspecified, &.{int_tag_ty.ip_index}, .slice_const_u8_sentinel_0, target), + .table_index = @intCast(wasm.tag_name_offs.items.len), + } }; + try wasm.functions.put(gpa, .fromZcuFunc(wasm, @enumFromInt(gop.index)), {}); + const tag_names = ip.loadEnumType(data.ip_index).names; + for (tag_names.get(ip)) |tag_name| { + const slice = tag_name.toSlice(ip); + try wasm.tag_name_offs.append(gpa, @intCast(wasm.tag_name_bytes.items.len)); + try wasm.tag_name_bytes.appendSlice(gpa, slice[0 .. slice.len + 1]); + } + } + }, + else => continue, + }; + + { + var i = wasm.function_imports_len_prelink; + while (i < f.function_imports.entries.len) { + const symbol_name = f.function_imports.keys()[i]; + if (wasm.object_function_imports.getIndex(symbol_name)) |import_index_usize| { + const import_index: Wasm.FunctionImport.Index = @enumFromInt(import_index_usize); + try wasm.markFunctionImport(symbol_name, import_index.value(wasm), import_index); + f.function_imports.swapRemoveAt(i); + continue; + } + i += 1; + } + } + + { + var i = wasm.data_imports_len_prelink; + while (i < f.data_imports.entries.len) { + const symbol_name = f.data_imports.keys()[i]; + if (wasm.object_data_imports.getIndex(symbol_name)) |import_index_usize| { + const import_index: Wasm.ObjectDataImport.Index = @enumFromInt(import_index_usize); + try wasm.markDataImport(symbol_name, import_index.value(wasm), import_index); + f.data_imports.swapRemoveAt(i); + continue; + } + i += 1; + } + } + + if (wasm.error_name_table_ref_count > 0) { + // Ensure Zcu error name structures are populated. + const full_error_names = ip.global_error_set.getNamesFromMainThread(); + try wasm.error_name_offs.ensureTotalCapacity(gpa, full_error_names.len + 1); + if (wasm.error_name_offs.items.len == 0) { + // Dummy entry at index 0 to avoid a sub instruction at `@errorName` sites. + wasm.error_name_offs.appendAssumeCapacity(0); + } + const new_error_names = full_error_names[wasm.error_name_offs.items.len - 1 ..]; + for (new_error_names) |error_name| { + wasm.error_name_offs.appendAssumeCapacity(@intCast(wasm.error_name_bytes.items.len)); + const s: [:0]const u8 = error_name.toSlice(ip); + try wasm.error_name_bytes.appendSlice(gpa, s[0 .. s.len + 1]); + } + } + + for (wasm.nav_exports.keys(), wasm.nav_exports.values()) |*nav_export, export_index| { + if (ip.isFunctionType(ip.getNav(nav_export.nav_index).typeOf(ip))) { + log.debug("flush export '{s}' nav={d}", .{ nav_export.name.slice(wasm), nav_export.nav_index }); + const function_index = Wasm.FunctionIndex.fromIpNav(wasm, nav_export.nav_index).?; + const explicit = f.missing_exports.swapRemove(nav_export.name); + const is_hidden = !explicit and switch (export_index.ptr(zcu).opts.visibility) { + .hidden => true, + .default, .protected => false, + }; + if (is_hidden) { + try wasm.hidden_function_exports.put(gpa, nav_export.name, function_index); + } else { + try wasm.function_exports.put(gpa, nav_export.name, function_index); + } + _ = f.function_imports.swapRemove(nav_export.name); + + if (nav_export.name.toOptional() == entry_name) + wasm.entry_resolution = .fromIpNav(wasm, nav_export.nav_index); + } else { + // This is a data export because Zcu currently has no way to + // export wasm globals. + _ = f.missing_exports.swapRemove(nav_export.name); + _ = f.data_imports.swapRemove(nav_export.name); + if (!is_obj) { + diags.addError("unable to export data symbol '{s}'; not emitting a relocatable", .{ + nav_export.name.slice(wasm), + }); + } + } + } + + for (f.missing_exports.keys()) |exp_name| { + diags.addError("manually specified export name '{s}' undefined", .{exp_name.slice(wasm)}); + } + } + + if (entry_name.unwrap()) |name| { + if (wasm.entry_resolution == .unresolved) { + var err = try diags.addErrorWithNotes(1); + try err.addMsg("entry symbol '{s}' missing", .{name.slice(wasm)}); + err.addNote("'-fno-entry' suppresses this error", .{}); + } + } + + if (!allow_undefined) { + for (f.function_imports.keys(), f.function_imports.values()) |name, function_import_id| { + if (function_import_id.undefinedAllowed(wasm)) continue; + const src_loc = function_import_id.sourceLocation(wasm); + src_loc.addError(wasm, "undefined function: {s}", .{name.slice(wasm)}); + } + for (f.global_imports.keys(), f.global_imports.values()) |name, global_import_id| { + const src_loc = global_import_id.sourceLocation(wasm); + src_loc.addError(wasm, "undefined global: {s}", .{name.slice(wasm)}); + } + for (wasm.table_imports.keys(), wasm.table_imports.values()) |name, table_import_id| { + const src_loc = table_import_id.value(wasm).source_location; + src_loc.addError(wasm, "undefined table: {s}", .{name.slice(wasm)}); + } + for (f.data_imports.keys(), f.data_imports.values()) |name, data_import_id| { + const src_loc = data_import_id.sourceLocation(wasm); + src_loc.addError(wasm, "undefined data: {s}", .{name.slice(wasm)}); + } + } + + if (diags.hasErrors()) return error.LinkFailure; + + // Merge indirect function tables. + try f.indirect_function_table.ensureUnusedCapacity(gpa, wasm.zcu_indirect_function_set.entries.len + + wasm.object_indirect_function_import_set.entries.len + wasm.object_indirect_function_set.entries.len); + // This one goes first so the indexes can be stable for MIR lowering. + for (wasm.zcu_indirect_function_set.keys()) |nav_index| + f.indirect_function_table.putAssumeCapacity(.fromIpNav(wasm, nav_index), {}); + for (wasm.object_indirect_function_import_set.keys()) |symbol_name| + f.indirect_function_table.putAssumeCapacity(.fromSymbolName(wasm, symbol_name), {}); + for (wasm.object_indirect_function_set.keys()) |object_function_index| + f.indirect_function_table.putAssumeCapacity(.fromObjectFunction(wasm, object_function_index), {}); + + if (wasm.object_init_funcs.items.len > 0) { + // Zig has no constructors so these are only for object file inputs. + mem.sortUnstable(Wasm.InitFunc, wasm.object_init_funcs.items, {}, Wasm.InitFunc.lessThan); + try wasm.functions.put(gpa, .__wasm_call_ctors, {}); + } + + // Merge and order the data segments. Depends on garbage collection so that + // unused segments can be omitted. + try f.data_segments.ensureUnusedCapacity(gpa, wasm.data_segments.entries.len + + wasm.uavs_obj.entries.len + wasm.navs_obj.entries.len + + wasm.uavs_exe.entries.len + wasm.navs_exe.entries.len + 4); + if (is_obj) assert(wasm.uavs_exe.entries.len == 0); + if (is_obj) assert(wasm.navs_exe.entries.len == 0); + if (!is_obj) assert(wasm.uavs_obj.entries.len == 0); + if (!is_obj) assert(wasm.navs_obj.entries.len == 0); + for (0..wasm.uavs_obj.entries.len) |uavs_index| f.data_segments.putAssumeCapacityNoClobber(.pack(wasm, .{ + .uav_obj = @enumFromInt(uavs_index), + }), @as(u32, undefined)); + for (0..wasm.navs_obj.entries.len) |navs_index| f.data_segments.putAssumeCapacityNoClobber(.pack(wasm, .{ + .nav_obj = @enumFromInt(navs_index), + }), @as(u32, undefined)); + for (0..wasm.uavs_exe.entries.len) |uavs_index| f.data_segments.putAssumeCapacityNoClobber(.pack(wasm, .{ + .uav_exe = @enumFromInt(uavs_index), + }), @as(u32, undefined)); + for (0..wasm.navs_exe.entries.len) |navs_index| f.data_segments.putAssumeCapacityNoClobber(.pack(wasm, .{ + .nav_exe = @enumFromInt(navs_index), + }), @as(u32, undefined)); + if (wasm.error_name_table_ref_count > 0) { + f.data_segments.putAssumeCapacity(.__zig_error_names, @as(u32, undefined)); + f.data_segments.putAssumeCapacity(.__zig_error_name_table, @as(u32, undefined)); + } + if (wasm.tag_name_table_ref_count > 0) { + f.data_segments.putAssumeCapacity(.__zig_tag_names, @as(u32, undefined)); + f.data_segments.putAssumeCapacity(.__zig_tag_name_table, @as(u32, undefined)); + } + for (wasm.data_segments.keys()) |data_id| f.data_segments.putAssumeCapacity(data_id, @as(u32, undefined)); + + try wasm.functions.ensureUnusedCapacity(gpa, 3); + + // Passive segments are used to avoid memory being reinitialized on each + // thread's instantiation. These passive segments are initialized and + // dropped in __wasm_init_memory, which is registered as the start function + // We also initialize bss segments (using memory.fill) as part of this + // function. + if (wasm.any_passive_inits) { + try wasm.addFunction(.__wasm_init_memory, &.{}, &.{}); + } + + try wasm.tables.ensureUnusedCapacity(gpa, 1); + + if (f.indirect_function_table.entries.len > 0) { + wasm.tables.putAssumeCapacity(.__indirect_function_table, {}); + } + + // Sort order: + // 0. Segment category (tls, data, zero) + // 1. Segment name prefix + // 2. Segment alignment + // 3. Reference count, descending (optimize for LEB encoding) + // 4. Segment name suffix + // 5. Segment ID interpreted as an integer (for determinism) + // + // TLS segments are intended to be merged with each other, and segments + // with a common prefix name are intended to be merged with each other. + // Sorting ensures the segments intended to be merged will be adjacent. + // + // Each Zcu Nav and Cau has an independent data segment ID in this logic. + // For the purposes of sorting, they are implicitly all named ".data". + const Sort = struct { + wasm: *const Wasm, + segments: []const Wasm.DataSegmentId, + pub fn lessThan(ctx: @This(), lhs: usize, rhs: usize) bool { + const lhs_segment = ctx.segments[lhs]; + const rhs_segment = ctx.segments[rhs]; + const lhs_category = @intFromEnum(lhs_segment.category(ctx.wasm)); + const rhs_category = @intFromEnum(rhs_segment.category(ctx.wasm)); + switch (std.math.order(lhs_category, rhs_category)) { + .lt => return true, + .gt => return false, + .eq => {}, + } + const lhs_segment_name = lhs_segment.name(ctx.wasm); + const rhs_segment_name = rhs_segment.name(ctx.wasm); + const lhs_prefix, const lhs_suffix = splitSegmentName(lhs_segment_name); + const rhs_prefix, const rhs_suffix = splitSegmentName(rhs_segment_name); + switch (mem.order(u8, lhs_prefix, rhs_prefix)) { + .lt => return true, + .gt => return false, + .eq => {}, + } + const lhs_alignment = lhs_segment.alignment(ctx.wasm); + const rhs_alignment = rhs_segment.alignment(ctx.wasm); + switch (lhs_alignment.order(rhs_alignment)) { + .lt => return false, + .gt => return true, + .eq => {}, + } + switch (std.math.order(lhs_segment.refCount(ctx.wasm), rhs_segment.refCount(ctx.wasm))) { + .lt => return false, + .gt => return true, + .eq => {}, + } + switch (mem.order(u8, lhs_suffix, rhs_suffix)) { + .lt => return true, + .gt => return false, + .eq => {}, + } + return @intFromEnum(lhs_segment) < @intFromEnum(rhs_segment); + } + }; + f.data_segments.sortUnstable(@as(Sort, .{ + .wasm = wasm, + .segments = f.data_segments.keys(), + })); + + const page_size = std.wasm.page_size; // 64kb + const stack_alignment: Alignment = .@"16"; // wasm's stack alignment as specified by tool-convention + const heap_alignment: Alignment = .@"16"; // wasm's heap alignment as specified by tool-convention + const pointer_alignment: Alignment = .@"4"; + // Always place the stack at the start by default unless the user specified the global-base flag. + const place_stack_first, var memory_ptr: u64 = if (wasm.global_base) |base| .{ false, base } else .{ true, 0 }; + + var virtual_addrs: VirtualAddrs = .{ + .stack_pointer = undefined, + .heap_base = undefined, + .heap_end = undefined, + .tls_base = null, + .tls_align = .none, + .tls_size = null, + .init_memory_flag = null, + }; + + if (place_stack_first and !is_obj) { + memory_ptr = stack_alignment.forward(memory_ptr); + memory_ptr += wasm.base.stack_size; + virtual_addrs.stack_pointer = @intCast(memory_ptr); + } + + const segment_ids = f.data_segments.keys(); + const segment_vaddrs = f.data_segments.values(); + assert(f.data_segment_groups.items.len == 0); + const data_vaddr: u32 = @intCast(memory_ptr); + if (segment_ids.len > 0) { + var seen_tls: enum { before, during, after } = .before; + var category: Wasm.DataSegmentId.Category = undefined; + var first_segment: Wasm.DataSegmentId = segment_ids[0]; + for (segment_ids, segment_vaddrs, 0..) |segment_id, *segment_vaddr, i| { + const alignment = segment_id.alignment(wasm); + category = segment_id.category(wasm); + const start_addr = alignment.forward(memory_ptr); + + const want_new_segment = b: { + if (is_obj) break :b false; + switch (seen_tls) { + .before => switch (category) { + .tls => { + virtual_addrs.tls_base = if (shared_memory) 0 else @intCast(start_addr); + virtual_addrs.tls_align = alignment; + seen_tls = .during; + break :b f.data_segment_groups.items.len > 0; + }, + else => {}, + }, + .during => switch (category) { + .tls => { + virtual_addrs.tls_align = virtual_addrs.tls_align.maxStrict(alignment); + virtual_addrs.tls_size = @intCast(memory_ptr - virtual_addrs.tls_base.?); + break :b false; + }, + else => { + seen_tls = .after; + break :b true; + }, + }, + .after => {}, + } + break :b i >= 1 and !wantSegmentMerge(wasm, segment_ids[i - 1], segment_id, category); + }; + if (want_new_segment) { + log.debug("new segment group at 0x{x} {} {s} {}", .{ start_addr, segment_id, segment_id.name(wasm), category }); + try f.data_segment_groups.append(gpa, .{ + .end_addr = @intCast(memory_ptr), + .first_segment = first_segment, + }); + first_segment = segment_id; + } + + const size = segment_id.size(wasm); + segment_vaddr.* = @intCast(start_addr); + log.debug("0x{x} {d} {s}", .{ start_addr, @intFromEnum(segment_id), segment_id.name(wasm) }); + memory_ptr = start_addr + size; + } + if (category != .zero) try f.data_segment_groups.append(gpa, .{ + .first_segment = first_segment, + .end_addr = @intCast(memory_ptr), + }); + if (category == .tls and seen_tls == .during) { + virtual_addrs.tls_size = @intCast(memory_ptr - virtual_addrs.tls_base.?); + } + } + + if (shared_memory and wasm.any_passive_inits) { + memory_ptr = pointer_alignment.forward(memory_ptr); + virtual_addrs.init_memory_flag = @intCast(memory_ptr); + memory_ptr += 4; + } + + if (!place_stack_first and !is_obj) { + memory_ptr = stack_alignment.forward(memory_ptr); + memory_ptr += wasm.base.stack_size; + virtual_addrs.stack_pointer = @intCast(memory_ptr); + } + + memory_ptr = heap_alignment.forward(memory_ptr); + virtual_addrs.heap_base = @intCast(memory_ptr); + + if (wasm.initial_memory) |initial_memory| { + if (!mem.isAlignedGeneric(u64, initial_memory, page_size)) { + diags.addError("initial memory value {d} is not {d}-byte aligned", .{ initial_memory, page_size }); + } + if (memory_ptr > initial_memory) { + diags.addError("initial memory value {d} insufficient; minimum {d}", .{ initial_memory, memory_ptr }); + } + if (initial_memory > std.math.maxInt(u32)) { + diags.addError("initial memory value {d} exceeds 32-bit address space", .{initial_memory}); + } + if (diags.hasErrors()) return error.LinkFailure; + memory_ptr = initial_memory; + } else { + memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size); + } + virtual_addrs.heap_end = @intCast(memory_ptr); + + // In case we do not import memory, but define it ourselves, set the + // minimum amount of pages on the memory section. + wasm.memories.limits.min = @intCast(memory_ptr / page_size); + log.debug("total memory pages: {d}", .{wasm.memories.limits.min}); + + if (wasm.max_memory) |max_memory| { + if (!mem.isAlignedGeneric(u64, max_memory, page_size)) { + diags.addError("maximum memory value {d} is not {d}-byte aligned", .{ max_memory, page_size }); + } + if (memory_ptr > max_memory) { + diags.addError("maximum memory value {d} insufficient; minimum {d}", .{ max_memory, memory_ptr }); + } + if (max_memory > std.math.maxInt(u32)) { + diags.addError("maximum memory value {d} exceeds 32-bit address space", .{max_memory}); + } + if (diags.hasErrors()) return error.LinkFailure; + wasm.memories.limits.max = @intCast(max_memory / page_size); + wasm.memories.limits.flags.has_max = true; + if (shared_memory) wasm.memories.limits.flags.is_shared = true; + log.debug("maximum memory pages: {?d}", .{wasm.memories.limits.max}); + } + f.memory_layout_finished = true; + + // When we have TLS GOT entries and shared memory is enabled, we must + // perform runtime relocations or else we don't create the function. + if (shared_memory and virtual_addrs.tls_base != null) { + // This logic that checks `any_tls_relocs` is missing the part where it + // also notices threadlocal globals from Zcu code. + if (wasm.any_tls_relocs) try wasm.addFunction(.__wasm_apply_global_tls_relocs, &.{}, &.{}); + try wasm.addFunction(.__wasm_init_tls, &.{.i32}, &.{}); + try wasm.globals.ensureUnusedCapacity(gpa, 3); + wasm.globals.putAssumeCapacity(.__tls_base, {}); + wasm.globals.putAssumeCapacity(.__tls_size, {}); + wasm.globals.putAssumeCapacity(.__tls_align, {}); + } + + var section_index: u32 = 0; + // Index of the code section. Used to tell relocation table where the section lives. + var code_section_index: ?u32 = null; + // Index of the data section. Used to tell relocation table where the section lives. + var data_section_index: ?u32 = null; + + const binary_bytes = &f.binary_bytes; + assert(binary_bytes.items.len == 0); + + try binary_bytes.appendSlice(gpa, &std.wasm.magic ++ &std.wasm.version); + assert(binary_bytes.items.len == 8); + + const binary_writer = binary_bytes.writer(gpa); + + // Type section. + for (f.function_imports.values()) |id| { + try f.func_types.put(gpa, id.functionType(wasm), {}); + } + for (wasm.functions.keys()) |function| { + try f.func_types.put(gpa, function.typeIndex(wasm), {}); + } + if (f.func_types.entries.len != 0) { + const header_offset = try reserveVecSectionHeader(gpa, binary_bytes); + for (f.func_types.keys()) |func_type_index| { + const func_type = func_type_index.ptr(wasm); + try leb.writeUleb128(binary_writer, std.wasm.function_type); + const params = func_type.params.slice(wasm); + try leb.writeUleb128(binary_writer, @as(u32, @intCast(params.len))); + for (params) |param_ty| { + try leb.writeUleb128(binary_writer, @intFromEnum(param_ty)); + } + const returns = func_type.returns.slice(wasm); + try leb.writeUleb128(binary_writer, @as(u32, @intCast(returns.len))); + for (returns) |ret_ty| { + try leb.writeUleb128(binary_writer, @intFromEnum(ret_ty)); + } + } + replaceVecSectionHeader(binary_bytes, header_offset, .type, @intCast(f.func_types.entries.len)); + section_index += 1; + } + + if (!is_obj) { + // TODO: sort function_imports by ref count descending for optimal LEB encodings + // TODO: sort global_imports by ref count descending for optimal LEB encodings + // TODO: sort output functions by ref count descending for optimal LEB encodings + } + + // Import section + { + var total_imports: usize = 0; + const header_offset = try reserveVecSectionHeader(gpa, binary_bytes); + + for (f.function_imports.values()) |id| { + const module_name = id.moduleName(wasm).slice(wasm).?; + try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len))); + try binary_writer.writeAll(module_name); + + const name = id.importName(wasm).slice(wasm); + try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len))); + try binary_writer.writeAll(name); + + try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.function)); + const type_index: FuncTypeIndex = .fromTypeIndex(id.functionType(wasm), f); + try leb.writeUleb128(binary_writer, @intFromEnum(type_index)); + } + total_imports += f.function_imports.entries.len; + + for (wasm.table_imports.values()) |id| { + const table_import = id.value(wasm); + const module_name = table_import.module_name.slice(wasm); + try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len))); + try binary_writer.writeAll(module_name); + + const name = table_import.name.slice(wasm); + try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len))); + try binary_writer.writeAll(name); + + try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.table)); + try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.RefType, table_import.flags.ref_type.to()))); + try emitLimits(gpa, binary_bytes, table_import.limits()); + } + total_imports += wasm.table_imports.entries.len; + + if (import_memory) { + const name = if (is_obj) wasm.preloaded_strings.__linear_memory else wasm.preloaded_strings.memory; + try emitMemoryImport(wasm, binary_bytes, name, &.{ + // TODO the import_memory option needs to specify from which module + .module_name = wasm.object_host_name.unwrap().?, + .limits_min = wasm.memories.limits.min, + .limits_max = wasm.memories.limits.max, + .limits_has_max = wasm.memories.limits.flags.has_max, + .limits_is_shared = wasm.memories.limits.flags.is_shared, + .source_location = .none, + }); + total_imports += 1; + } + + for (f.global_imports.values()) |id| { + const module_name = id.moduleName(wasm).slice(wasm).?; + try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len))); + try binary_writer.writeAll(module_name); + + const name = id.importName(wasm).slice(wasm); + try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len))); + try binary_writer.writeAll(name); + + try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.global)); + const global_type = id.globalType(wasm); + try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.Valtype, global_type.valtype))); + try binary_writer.writeByte(@intFromBool(global_type.mutable)); + } + total_imports += f.global_imports.entries.len; + + if (total_imports > 0) { + replaceVecSectionHeader(binary_bytes, header_offset, .import, @intCast(total_imports)); + section_index += 1; + } else { + binary_bytes.shrinkRetainingCapacity(header_offset); + } + } + + // Function section + if (wasm.functions.count() != 0) { + const header_offset = try reserveVecSectionHeader(gpa, binary_bytes); + for (wasm.functions.keys()) |function| { + const index: FuncTypeIndex = .fromTypeIndex(function.typeIndex(wasm), f); + try leb.writeUleb128(binary_writer, @intFromEnum(index)); + } + + replaceVecSectionHeader(binary_bytes, header_offset, .function, @intCast(wasm.functions.count())); + section_index += 1; + } + + // Table section + if (wasm.tables.entries.len > 0) { + const header_offset = try reserveVecSectionHeader(gpa, binary_bytes); + + for (wasm.tables.keys()) |table| { + try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.RefType, table.refType(wasm)))); + try emitLimits(gpa, binary_bytes, table.limits(wasm)); + } + + replaceVecSectionHeader(binary_bytes, header_offset, .table, @intCast(wasm.tables.entries.len)); + section_index += 1; + } + + // Memory section. wasm currently only supports 1 linear memory segment. + if (!import_memory) { + const header_offset = try reserveVecSectionHeader(gpa, binary_bytes); + try emitLimits(gpa, binary_bytes, wasm.memories.limits); + replaceVecSectionHeader(binary_bytes, header_offset, .memory, 1); + section_index += 1; + } + + // Global section. + const globals_len: u32 = @intCast(wasm.globals.entries.len); + if (globals_len > 0) { + const header_offset = try reserveVecSectionHeader(gpa, binary_bytes); + + for (wasm.globals.keys()) |global_resolution| { + switch (global_resolution.unpack(wasm)) { + .unresolved => unreachable, + .__heap_base => try appendGlobal(gpa, binary_bytes, 0, virtual_addrs.heap_base), + .__heap_end => try appendGlobal(gpa, binary_bytes, 0, virtual_addrs.heap_end), + .__stack_pointer => try appendGlobal(gpa, binary_bytes, 1, virtual_addrs.stack_pointer), + .__tls_align => try appendGlobal(gpa, binary_bytes, 0, @intCast(virtual_addrs.tls_align.toByteUnits().?)), + .__tls_base => try appendGlobal(gpa, binary_bytes, 1, virtual_addrs.tls_base.?), + .__tls_size => try appendGlobal(gpa, binary_bytes, 0, virtual_addrs.tls_size.?), + .object_global => |i| { + const global = i.ptr(wasm); + try binary_bytes.appendSlice(gpa, &.{ + @intFromEnum(@as(std.wasm.Valtype, global.flags.global_type.valtype.to())), + @intFromBool(global.flags.global_type.mutable), + }); + try emitExpr(wasm, binary_bytes, global.expr); + }, + .nav_exe => unreachable, // Zig source code currently cannot represent this. + .nav_obj => unreachable, // Zig source code currently cannot represent this. + } + } + + replaceVecSectionHeader(binary_bytes, header_offset, .global, globals_len); + section_index += 1; + } + + // Export section + { + const header_offset = try reserveVecSectionHeader(gpa, binary_bytes); + var exports_len: usize = 0; + + for (wasm.function_exports.keys(), wasm.function_exports.values()) |exp_name, function_index| { + const name = exp_name.slice(wasm); + try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len))); + try binary_bytes.appendSlice(gpa, name); + try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.function)); + const func_index = Wasm.OutputFunctionIndex.fromFunctionIndex(wasm, function_index); + try leb.writeUleb128(binary_writer, @intFromEnum(func_index)); + } + exports_len += wasm.function_exports.entries.len; + + if (wasm.export_table and f.indirect_function_table.entries.len > 0) { + const name = "__indirect_function_table"; + const index: u32 = @intCast(wasm.tables.getIndex(.__indirect_function_table).?); + try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len))); + try binary_bytes.appendSlice(gpa, name); + try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.table)); + try leb.writeUleb128(binary_writer, index); + exports_len += 1; + } + + if (export_memory) { + const name = "memory"; + try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len))); + try binary_bytes.appendSlice(gpa, name); + try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.memory)); + try leb.writeUleb128(binary_writer, @as(u32, 0)); + exports_len += 1; + } + + for (wasm.global_exports.items) |exp| { + const name = exp.name.slice(wasm); + try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len))); + try binary_bytes.appendSlice(gpa, name); + try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.global)); + try leb.writeUleb128(binary_writer, @intFromEnum(exp.global_index)); + } + exports_len += wasm.global_exports.items.len; + + if (exports_len > 0) { + replaceVecSectionHeader(binary_bytes, header_offset, .@"export", @intCast(exports_len)); + section_index += 1; + } else { + binary_bytes.shrinkRetainingCapacity(header_offset); + } + } + + // start section + if (wasm.functions.getIndex(.__wasm_init_memory)) |func_index| { + try emitStartSection(gpa, binary_bytes, .fromFunctionIndex(wasm, @enumFromInt(func_index))); + } else if (Wasm.OutputFunctionIndex.fromResolution(wasm, wasm.entry_resolution)) |func_index| { + try emitStartSection(gpa, binary_bytes, func_index); + } + + // element section + if (f.indirect_function_table.entries.len > 0) { + const header_offset = try reserveVecSectionHeader(gpa, binary_bytes); + + // indirect function table elements + const table_index: u32 = @intCast(wasm.tables.getIndex(.__indirect_function_table).?); + // passive with implicit 0-index table or set table index manually + const flags: u32 = if (table_index == 0) 0x0 else 0x02; + try leb.writeUleb128(binary_writer, flags); + if (flags == 0x02) { + try leb.writeUleb128(binary_writer, table_index); + } + // We start at index 1, so unresolved function pointers are invalid + try emitInit(binary_writer, .{ .i32_const = 1 }); + if (flags == 0x02) { + try leb.writeUleb128(binary_writer, @as(u8, 0)); // represents funcref + } + try leb.writeUleb128(binary_writer, @as(u32, @intCast(f.indirect_function_table.entries.len))); + for (f.indirect_function_table.keys()) |func_index| { + try leb.writeUleb128(binary_writer, @intFromEnum(func_index)); + } + + replaceVecSectionHeader(binary_bytes, header_offset, .element, 1); + section_index += 1; + } + + // When the shared-memory option is enabled, we *must* emit the 'data count' section. + if (f.data_segment_groups.items.len > 0) { + const header_offset = try reserveVecSectionHeader(gpa, binary_bytes); + replaceVecSectionHeader(binary_bytes, header_offset, .data_count, @intCast(f.data_segment_groups.items.len)); + } + + // Code section. + if (wasm.functions.count() != 0) { + const header_offset = try reserveVecSectionHeader(gpa, binary_bytes); + + for (wasm.functions.keys()) |resolution| switch (resolution.unpack(wasm)) { + .unresolved => unreachable, + .__wasm_apply_global_tls_relocs => @panic("TODO lower __wasm_apply_global_tls_relocs"), + .__wasm_call_ctors => { + const code_start = try reserveSize(gpa, binary_bytes); + defer replaceSize(binary_bytes, code_start); + try emitCallCtorsFunction(wasm, binary_bytes); + }, + .__wasm_init_memory => { + const code_start = try reserveSize(gpa, binary_bytes); + defer replaceSize(binary_bytes, code_start); + try emitInitMemoryFunction(wasm, binary_bytes, &virtual_addrs); + }, + .__wasm_init_tls => { + const code_start = try reserveSize(gpa, binary_bytes); + defer replaceSize(binary_bytes, code_start); + try emitInitTlsFunction(wasm, binary_bytes); + }, + .object_function => |i| { + const ptr = i.ptr(wasm); + const code = ptr.code.slice(wasm); + try leb.writeUleb128(binary_writer, code.len); + const code_start = binary_bytes.items.len; + try binary_bytes.appendSlice(gpa, code); + if (!is_obj) applyRelocs(binary_bytes.items[code_start..], ptr.offset, ptr.relocations(wasm), wasm); + }, + .zcu_func => |i| { + const code_start = try reserveSize(gpa, binary_bytes); + defer replaceSize(binary_bytes, code_start); + + log.debug("lowering function code for '{s}'", .{resolution.name(wasm).?}); + + const zcu = comp.zcu.?; + const ip = &zcu.intern_pool; + const ip_index = i.key(wasm).*; + switch (ip.indexToKey(ip_index)) { + .enum_type => { + try emitTagNameFunction(wasm, binary_bytes, f.data_segments.get(.__zig_tag_name_table).?, i.value(wasm).tag_name.table_index, ip_index); + }, + else => try i.value(wasm).function.lower(wasm, binary_bytes), + } + }, + }; + + replaceVecSectionHeader(binary_bytes, header_offset, .code, @intCast(wasm.functions.entries.len)); + code_section_index = section_index; + section_index += 1; + } + + if (!is_obj) { + for (wasm.uav_fixups.items) |uav_fixup| { + const ds_id: Wasm.DataSegmentId = .pack(wasm, .{ .uav_exe = uav_fixup.uavs_exe_index }); + const vaddr = f.data_segments.get(ds_id).? + uav_fixup.addend; + if (!is64) { + mem.writeInt(u32, wasm.string_bytes.items[uav_fixup.offset..][0..4], vaddr, .little); + } else { + mem.writeInt(u64, wasm.string_bytes.items[uav_fixup.offset..][0..8], vaddr, .little); + } + } + for (wasm.nav_fixups.items) |nav_fixup| { + const ds_id: Wasm.DataSegmentId = .pack(wasm, .{ .nav_exe = nav_fixup.navs_exe_index }); + const vaddr = f.data_segments.get(ds_id).? + nav_fixup.addend; + if (!is64) { + mem.writeInt(u32, wasm.string_bytes.items[nav_fixup.offset..][0..4], vaddr, .little); + } else { + mem.writeInt(u64, wasm.string_bytes.items[nav_fixup.offset..][0..8], vaddr, .little); + } + } + for (wasm.func_table_fixups.items) |fixup| { + const table_index: IndirectFunctionTableIndex = .fromZcuIndirectFunctionSetIndex(fixup.table_index); + if (!is64) { + mem.writeInt(u32, wasm.string_bytes.items[fixup.offset..][0..4], table_index.toAbi(), .little); + } else { + mem.writeInt(u64, wasm.string_bytes.items[fixup.offset..][0..8], table_index.toAbi(), .little); + } + } + } + + // Data section. + if (f.data_segment_groups.items.len != 0) { + const header_offset = try reserveVecSectionHeader(gpa, binary_bytes); + + var group_index: u32 = 0; + var segment_offset: u32 = 0; + var group_start_addr: u32 = data_vaddr; + var group_end_addr = f.data_segment_groups.items[group_index].end_addr; + for (segment_ids, segment_vaddrs) |segment_id, segment_vaddr| { + if (segment_vaddr >= group_end_addr) { + try binary_bytes.appendNTimes(gpa, 0, group_end_addr - group_start_addr - segment_offset); + group_index += 1; + if (group_index >= f.data_segment_groups.items.len) { + // All remaining segments are zero. + break; + } + group_start_addr = group_end_addr; + group_end_addr = f.data_segment_groups.items[group_index].end_addr; + segment_offset = 0; + } + if (segment_offset == 0) { + const group_size = group_end_addr - group_start_addr; + log.debug("emit data section group, {d} bytes", .{group_size}); + const flags: Object.DataSegmentFlags = if (segment_id.isPassive(wasm)) .passive else .active; + try leb.writeUleb128(binary_writer, @intFromEnum(flags)); + // Passive segments are initialized at runtime. + if (flags != .passive) { + try emitInit(binary_writer, .{ .i32_const = @as(i32, @bitCast(group_start_addr)) }); + } + try leb.writeUleb128(binary_writer, group_size); + } + if (segment_id.isEmpty(wasm)) { + // It counted for virtual memory but it does not go into the binary. + continue; + } + + // Padding for alignment. + const needed_offset = segment_vaddr - group_start_addr; + try binary_bytes.appendNTimes(gpa, 0, needed_offset - segment_offset); + segment_offset = needed_offset; + + const code_start = binary_bytes.items.len; + append: { + const code = switch (segment_id.unpack(wasm)) { + .__heap_base => { + mem.writeInt(u32, try binary_bytes.addManyAsArray(gpa, 4), virtual_addrs.heap_base, .little); + break :append; + }, + .__heap_end => { + mem.writeInt(u32, try binary_bytes.addManyAsArray(gpa, 4), virtual_addrs.heap_end, .little); + break :append; + }, + .__zig_error_names => { + try binary_bytes.appendSlice(gpa, wasm.error_name_bytes.items); + break :append; + }, + .__zig_error_name_table => { + if (is_obj) @panic("TODO error name table reloc"); + const base = f.data_segments.get(.__zig_error_names).?; + if (!is64) { + try emitTagNameTable(gpa, binary_bytes, wasm.error_name_offs.items, wasm.error_name_bytes.items, base, u32); + } else { + try emitTagNameTable(gpa, binary_bytes, wasm.error_name_offs.items, wasm.error_name_bytes.items, base, u64); + } + break :append; + }, + .__zig_tag_names => { + try binary_bytes.appendSlice(gpa, wasm.tag_name_bytes.items); + break :append; + }, + .__zig_tag_name_table => { + if (is_obj) @panic("TODO tag name table reloc"); + const base = f.data_segments.get(.__zig_tag_names).?; + if (!is64) { + try emitTagNameTable(gpa, binary_bytes, wasm.tag_name_offs.items, wasm.tag_name_bytes.items, base, u32); + } else { + try emitTagNameTable(gpa, binary_bytes, wasm.tag_name_offs.items, wasm.tag_name_bytes.items, base, u64); + } + break :append; + }, + .object => |i| { + const ptr = i.ptr(wasm); + try binary_bytes.appendSlice(gpa, ptr.payload.slice(wasm)); + if (!is_obj) applyRelocs(binary_bytes.items[code_start..], ptr.offset, ptr.relocations(wasm), wasm); + break :append; + }, + inline .uav_exe, .uav_obj, .nav_exe, .nav_obj => |i| i.value(wasm).code, + }; + try binary_bytes.appendSlice(gpa, code.slice(wasm)); + } + segment_offset += @intCast(binary_bytes.items.len - code_start); + } + + replaceVecSectionHeader(binary_bytes, header_offset, .data, @intCast(f.data_segment_groups.items.len)); + data_section_index = section_index; + section_index += 1; + } + + if (is_obj) { + @panic("TODO emit link section for object file and emit modified relocations"); + } else if (comp.config.debug_format != .strip) { + try emitNameSection(wasm, f.data_segment_groups.items, binary_bytes); + } + + if (comp.config.debug_format != .strip) { + // The build id must be computed on the main sections only, + // so we have to do it now, before the debug sections. + switch (wasm.base.build_id) { + .none => {}, + .fast => { + var id: [16]u8 = undefined; + std.crypto.hash.sha3.TurboShake128(null).hash(binary_bytes.items, &id, .{}); + var uuid: [36]u8 = undefined; + _ = try std.fmt.bufPrint(&uuid, "{s}-{s}-{s}-{s}-{s}", .{ + std.fmt.fmtSliceHexLower(id[0..4]), + std.fmt.fmtSliceHexLower(id[4..6]), + std.fmt.fmtSliceHexLower(id[6..8]), + std.fmt.fmtSliceHexLower(id[8..10]), + std.fmt.fmtSliceHexLower(id[10..]), + }); + try emitBuildIdSection(gpa, binary_bytes, &uuid); + }, + .hexstring => |hs| { + var buffer: [32 * 2]u8 = undefined; + const str = std.fmt.bufPrint(&buffer, "{s}", .{ + std.fmt.fmtSliceHexLower(hs.toSlice()), + }) catch unreachable; + try emitBuildIdSection(gpa, binary_bytes, str); + }, + else => |mode| { + var err = try diags.addErrorWithNotes(0); + try err.addMsg("build-id '{s}' is not supported for WebAssembly", .{@tagName(mode)}); + }, + } + + var debug_bytes = std.ArrayList(u8).init(gpa); + defer debug_bytes.deinit(); + + try emitProducerSection(gpa, binary_bytes); + try emitFeaturesSection(gpa, binary_bytes, target); + } + + // Finally, write the entire binary into the file. + const file = wasm.base.file.?; + try file.pwriteAll(binary_bytes.items, 0); + try file.setEndPos(binary_bytes.items.len); +} + +const VirtualAddrs = struct { + stack_pointer: u32, + heap_base: u32, + heap_end: u32, + tls_base: ?u32, + tls_align: Alignment, + tls_size: ?u32, + init_memory_flag: ?u32, +}; + +fn emitNameSection( + wasm: *Wasm, + data_segment_groups: []const DataSegmentGroup, + binary_bytes: *std.ArrayListUnmanaged(u8), +) !void { + const f = &wasm.flush_buffer; + const comp = wasm.base.comp; + const gpa = comp.gpa; + + const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes); + defer writeCustomSectionHeader(binary_bytes, header_offset); + + const name_name = "name"; + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, name_name.len)); + try binary_bytes.appendSlice(gpa, name_name); + + { + const sub_offset = try reserveCustomSectionHeader(gpa, binary_bytes); + defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.function)); + + const total_functions: u32 = @intCast(f.function_imports.entries.len + wasm.functions.entries.len); + try leb.writeUleb128(binary_bytes.writer(gpa), total_functions); + + for (f.function_imports.keys(), 0..) |name_index, function_index| { + const name = name_index.slice(wasm); + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(function_index))); + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len))); + try binary_bytes.appendSlice(gpa, name); + } + for (wasm.functions.keys(), f.function_imports.entries.len..) |resolution, function_index| { + const name = resolution.name(wasm).?; + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(function_index))); + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len))); + try binary_bytes.appendSlice(gpa, name); + } + } + + { + const sub_offset = try reserveCustomSectionHeader(gpa, binary_bytes); + defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.global)); + + const total_globals: u32 = @intCast(f.global_imports.entries.len + wasm.globals.entries.len); + try leb.writeUleb128(binary_bytes.writer(gpa), total_globals); + + for (f.global_imports.keys(), 0..) |name_index, global_index| { + const name = name_index.slice(wasm); + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(global_index))); + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len))); + try binary_bytes.appendSlice(gpa, name); + } + for (wasm.globals.keys(), f.global_imports.entries.len..) |resolution, global_index| { + const name = resolution.name(wasm).?; + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(global_index))); + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len))); + try binary_bytes.appendSlice(gpa, name); + } + } + + { + const sub_offset = try reserveCustomSectionHeader(gpa, binary_bytes); + defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.data_segment)); + + const total_data_segments: u32 = @intCast(data_segment_groups.len); + try leb.writeUleb128(binary_bytes.writer(gpa), total_data_segments); + + for (data_segment_groups, 0..) |group, i| { + const name, _ = splitSegmentName(group.first_segment.name(wasm)); + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(i))); + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len))); + try binary_bytes.appendSlice(gpa, name); + } + } +} + +fn emitFeaturesSection( + gpa: Allocator, + binary_bytes: *std.ArrayListUnmanaged(u8), + target: *const std.Target, +) Allocator.Error!void { + const feature_count = target.cpu.features.count(); + if (feature_count == 0) return; + + const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes); + defer writeCustomSectionHeader(binary_bytes, header_offset); + + const writer = binary_bytes.writer(gpa); + const target_features = "target_features"; + try leb.writeUleb128(writer, @as(u32, @intCast(target_features.len))); + try writer.writeAll(target_features); + + try leb.writeUleb128(writer, @as(u32, @intCast(feature_count))); + + var safety_count = feature_count; + for (target.cpu.arch.allFeaturesList(), 0..) |*feature, i| { + if (!std.Target.wasm.featureSetHas(target.cpu.features, @enumFromInt(i))) continue; + safety_count -= 1; + + try leb.writeUleb128(writer, @as(u32, '+')); + // Depends on llvm_name for the hyphenated version that matches wasm tooling conventions. + const name = feature.llvm_name.?; + try leb.writeUleb128(writer, @as(u32, @intCast(name.len))); + try writer.writeAll(name); + } + assert(safety_count == 0); +} + +fn emitBuildIdSection(gpa: Allocator, binary_bytes: *std.ArrayListUnmanaged(u8), build_id: []const u8) !void { + const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes); + defer writeCustomSectionHeader(binary_bytes, header_offset); + + const writer = binary_bytes.writer(gpa); + const hdr_build_id = "build_id"; + try leb.writeUleb128(writer, @as(u32, @intCast(hdr_build_id.len))); + try writer.writeAll(hdr_build_id); + + try leb.writeUleb128(writer, @as(u32, 1)); + try leb.writeUleb128(writer, @as(u32, @intCast(build_id.len))); + try writer.writeAll(build_id); +} + +fn emitProducerSection(gpa: Allocator, binary_bytes: *std.ArrayListUnmanaged(u8)) !void { + const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes); + defer writeCustomSectionHeader(binary_bytes, header_offset); + + const writer = binary_bytes.writer(gpa); + const producers = "producers"; + try leb.writeUleb128(writer, @as(u32, @intCast(producers.len))); + try writer.writeAll(producers); + + try leb.writeUleb128(writer, @as(u32, 2)); // 2 fields: Language + processed-by + + // language field + { + const language = "language"; + try leb.writeUleb128(writer, @as(u32, @intCast(language.len))); + try writer.writeAll(language); + + // field_value_count (TODO: Parse object files for producer sections to detect their language) + try leb.writeUleb128(writer, @as(u32, 1)); + + // versioned name + { + try leb.writeUleb128(writer, @as(u32, 3)); // len of "Zig" + try writer.writeAll("Zig"); + + try leb.writeUleb128(writer, @as(u32, @intCast(build_options.version.len))); + try writer.writeAll(build_options.version); + } + } + + // processed-by field + { + const processed_by = "processed-by"; + try leb.writeUleb128(writer, @as(u32, @intCast(processed_by.len))); + try writer.writeAll(processed_by); + + // field_value_count (TODO: Parse object files for producer sections to detect other used tools) + try leb.writeUleb128(writer, @as(u32, 1)); + + // versioned name + { + try leb.writeUleb128(writer, @as(u32, 3)); // len of "Zig" + try writer.writeAll("Zig"); + + try leb.writeUleb128(writer, @as(u32, @intCast(build_options.version.len))); + try writer.writeAll(build_options.version); + } + } +} + +fn splitSegmentName(name: []const u8) struct { []const u8, []const u8 } { + const start = @intFromBool(name.len >= 1 and name[0] == '.'); + const pivot = mem.indexOfScalarPos(u8, name, start, '.') orelse name.len; + return .{ name[0..pivot], name[pivot..] }; +} + +test splitSegmentName { + { + const a, const b = splitSegmentName(".data"); + try std.testing.expectEqualStrings(".data", a); + try std.testing.expectEqualStrings("", b); + } +} + +fn wantSegmentMerge( + wasm: *const Wasm, + a_id: Wasm.DataSegmentId, + b_id: Wasm.DataSegmentId, + b_category: Wasm.DataSegmentId.Category, +) bool { + const a_category = a_id.category(wasm); + if (a_category != b_category) return false; + if (a_category == .tls or b_category == .tls) return false; + if (a_id.isPassive(wasm) != b_id.isPassive(wasm)) return false; + if (b_category == .zero) return true; + const a_name = a_id.name(wasm); + const b_name = b_id.name(wasm); + const a_prefix, _ = splitSegmentName(a_name); + const b_prefix, _ = splitSegmentName(b_name); + return mem.eql(u8, a_prefix, b_prefix); +} + +/// section id + fixed leb contents size + fixed leb vector length +const section_header_reserve_size = 1 + 5 + 5; +const section_header_size = 5 + 1; + +fn reserveVecSectionHeader(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 { + try bytes.appendNTimes(gpa, 0, section_header_reserve_size); + return @intCast(bytes.items.len - section_header_reserve_size); +} + +fn replaceVecSectionHeader( + bytes: *std.ArrayListUnmanaged(u8), + offset: u32, + section: std.wasm.Section, + n_items: u32, +) void { + const size: u32 = @intCast(bytes.items.len - offset - section_header_reserve_size + uleb128size(n_items)); + var buf: [section_header_reserve_size]u8 = undefined; + var fbw = std.io.fixedBufferStream(&buf); + const w = fbw.writer(); + w.writeByte(@intFromEnum(section)) catch unreachable; + leb.writeUleb128(w, size) catch unreachable; + leb.writeUleb128(w, n_items) catch unreachable; + bytes.replaceRangeAssumeCapacity(offset, section_header_reserve_size, fbw.getWritten()); +} + +fn reserveCustomSectionHeader(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 { + try bytes.appendNTimes(gpa, 0, section_header_size); + return @intCast(bytes.items.len - section_header_size); +} + +fn writeCustomSectionHeader(bytes: *std.ArrayListUnmanaged(u8), offset: u32) void { + return replaceHeader(bytes, offset, 0); // 0 = 'custom' section +} + +fn replaceHeader(bytes: *std.ArrayListUnmanaged(u8), offset: u32, tag: u8) void { + const size: u32 = @intCast(bytes.items.len - offset - section_header_size); + var buf: [section_header_size]u8 = undefined; + var fbw = std.io.fixedBufferStream(&buf); + const w = fbw.writer(); + w.writeByte(tag) catch unreachable; + leb.writeUleb128(w, size) catch unreachable; + bytes.replaceRangeAssumeCapacity(offset, section_header_size, fbw.getWritten()); +} + +const max_size_encoding = 5; + +fn reserveSize(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 { + try bytes.appendNTimes(gpa, 0, max_size_encoding); + return @intCast(bytes.items.len - max_size_encoding); +} + +fn replaceSize(bytes: *std.ArrayListUnmanaged(u8), offset: u32) void { + const size: u32 = @intCast(bytes.items.len - offset - max_size_encoding); + var buf: [max_size_encoding]u8 = undefined; + var fbw = std.io.fixedBufferStream(&buf); + leb.writeUleb128(fbw.writer(), size) catch unreachable; + bytes.replaceRangeAssumeCapacity(offset, max_size_encoding, fbw.getWritten()); +} + +fn emitLimits( + gpa: Allocator, + binary_bytes: *std.ArrayListUnmanaged(u8), + limits: std.wasm.Limits, +) Allocator.Error!void { + try binary_bytes.append(gpa, @bitCast(limits.flags)); + try leb.writeUleb128(binary_bytes.writer(gpa), limits.min); + if (limits.flags.has_max) try leb.writeUleb128(binary_bytes.writer(gpa), limits.max); +} + +fn emitMemoryImport( + wasm: *Wasm, + binary_bytes: *std.ArrayListUnmanaged(u8), + name_index: String, + memory_import: *const Wasm.MemoryImport, +) Allocator.Error!void { + const gpa = wasm.base.comp.gpa; + const module_name = memory_import.module_name.slice(wasm); + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(module_name.len))); + try binary_bytes.appendSlice(gpa, module_name); + + const name = name_index.slice(wasm); + try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len))); + try binary_bytes.appendSlice(gpa, name); + + try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.memory)); + try emitLimits(gpa, binary_bytes, memory_import.limits()); +} + +pub fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void { + switch (init_expr) { + .i32_const => |val| { + try writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_const)); + try leb.writeIleb128(writer, val); + }, + .i64_const => |val| { + try writer.writeByte(@intFromEnum(std.wasm.Opcode.i64_const)); + try leb.writeIleb128(writer, val); + }, + .f32_const => |val| { + try writer.writeByte(@intFromEnum(std.wasm.Opcode.f32_const)); + try writer.writeInt(u32, @bitCast(val), .little); + }, + .f64_const => |val| { + try writer.writeByte(@intFromEnum(std.wasm.Opcode.f64_const)); + try writer.writeInt(u64, @bitCast(val), .little); + }, + .global_get => |val| { + try writer.writeByte(@intFromEnum(std.wasm.Opcode.global_get)); + try leb.writeUleb128(writer, val); + }, + } + try writer.writeByte(@intFromEnum(std.wasm.Opcode.end)); +} + +pub fn emitExpr(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanaged(u8), expr: Wasm.Expr) Allocator.Error!void { + const gpa = wasm.base.comp.gpa; + const slice = expr.slice(wasm); + try binary_bytes.appendSlice(gpa, slice[0 .. slice.len + 1]); // +1 to include end opcode +} + +fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void { + const gpa = wasm.base.comp.gpa; + const writer = binary_bytes.writer(gpa); + try leb.writeUleb128(writer, @intFromEnum(Wasm.SubsectionType.segment_info)); + const segment_offset = binary_bytes.items.len; + + try leb.writeUleb128(writer, @as(u32, @intCast(wasm.segment_info.count()))); + for (wasm.segment_info.values()) |segment_info| { + log.debug("Emit segment: {s} align({d}) flags({b})", .{ + segment_info.name, + segment_info.alignment, + segment_info.flags, + }); + try leb.writeUleb128(writer, @as(u32, @intCast(segment_info.name.len))); + try writer.writeAll(segment_info.name); + try leb.writeUleb128(writer, segment_info.alignment.toLog2Units()); + try leb.writeUleb128(writer, segment_info.flags); + } + + var buf: [5]u8 = undefined; + leb.writeUnsignedFixed(5, &buf, @as(u32, @intCast(binary_bytes.items.len - segment_offset))); + try binary_bytes.insertSlice(segment_offset, &buf); +} + +fn uleb128size(x: u32) u32 { + var value = x; + var size: u32 = 0; + while (value != 0) : (size += 1) value >>= 7; + return size; +} + +fn emitTagNameTable( + gpa: Allocator, + code: *std.ArrayListUnmanaged(u8), + tag_name_offs: []const u32, + tag_name_bytes: []const u8, + base: u32, + comptime Int: type, +) error{OutOfMemory}!void { + const ptr_size_bytes = @divExact(@bitSizeOf(Int), 8); + try code.ensureUnusedCapacity(gpa, ptr_size_bytes * 2 * tag_name_offs.len); + for (tag_name_offs) |off| { + const name_len: u32 = @intCast(mem.indexOfScalar(u8, tag_name_bytes[off..], 0).?); + mem.writeInt(Int, code.addManyAsArrayAssumeCapacity(ptr_size_bytes), base + off, .little); + mem.writeInt(Int, code.addManyAsArrayAssumeCapacity(ptr_size_bytes), name_len, .little); + } +} + +fn applyRelocs(code: []u8, code_offset: u32, relocs: Wasm.ObjectRelocation.IterableSlice, wasm: *const Wasm) void { + for ( + relocs.slice.tags(wasm), + relocs.slice.pointees(wasm), + relocs.slice.offsets(wasm), + relocs.slice.addends(wasm), + ) |tag, pointee, offset, *addend| { + if (offset >= relocs.end) break; + const sliced_code = code[offset - code_offset ..]; + switch (tag) { + .function_index_i32 => reloc_u32_function(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)), + .function_index_leb => reloc_leb_function(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)), + .function_offset_i32 => @panic("TODO this value is not known yet"), + .function_offset_i64 => @panic("TODO this value is not known yet"), + .table_index_i32 => reloc_u32_table_index(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)), + .table_index_i64 => reloc_u64_table_index(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)), + .table_index_rel_sleb => @panic("TODO what does this reloc tag mean?"), + .table_index_rel_sleb64 => @panic("TODO what does this reloc tag mean?"), + .table_index_sleb => reloc_sleb_table_index(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)), + .table_index_sleb64 => reloc_sleb64_table_index(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)), + + .function_import_index_i32 => reloc_u32_function(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)), + .function_import_index_leb => reloc_leb_function(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)), + .function_import_offset_i32 => @panic("TODO this value is not known yet"), + .function_import_offset_i64 => @panic("TODO this value is not known yet"), + .table_import_index_i32 => reloc_u32_table_index(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)), + .table_import_index_i64 => reloc_u64_table_index(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)), + .table_import_index_rel_sleb => @panic("TODO what does this reloc tag mean?"), + .table_import_index_rel_sleb64 => @panic("TODO what does this reloc tag mean?"), + .table_import_index_sleb => reloc_sleb_table_index(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)), + .table_import_index_sleb64 => reloc_sleb64_table_index(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)), + + .global_index_i32 => reloc_u32_global(sliced_code, .fromObjectGlobalHandlingWeak(wasm, pointee.global)), + .global_index_leb => reloc_leb_global(sliced_code, .fromObjectGlobalHandlingWeak(wasm, pointee.global)), + + .global_import_index_i32 => reloc_u32_global(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)), + .global_import_index_leb => reloc_leb_global(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)), + + .memory_addr_i32 => reloc_u32_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)), + .memory_addr_i64 => reloc_u64_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)), + .memory_addr_leb => reloc_leb_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)), + .memory_addr_leb64 => reloc_leb64_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)), + .memory_addr_locrel_i32 => @panic("TODO implement relocation memory_addr_locrel_i32"), + .memory_addr_rel_sleb => @panic("TODO implement relocation memory_addr_rel_sleb"), + .memory_addr_rel_sleb64 => @panic("TODO implement relocation memory_addr_rel_sleb64"), + .memory_addr_sleb => reloc_sleb_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)), + .memory_addr_sleb64 => reloc_sleb64_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)), + .memory_addr_tls_sleb => reloc_sleb_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)), + .memory_addr_tls_sleb64 => reloc_sleb64_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)), + + .memory_addr_import_i32 => reloc_u32_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)), + .memory_addr_import_i64 => reloc_u64_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)), + .memory_addr_import_leb => reloc_leb_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)), + .memory_addr_import_leb64 => reloc_leb64_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)), + .memory_addr_import_locrel_i32 => @panic("TODO implement relocation memory_addr_import_locrel_i32"), + .memory_addr_import_rel_sleb => @panic("TODO implement relocation memory_addr_import_rel_sleb"), + .memory_addr_import_rel_sleb64 => @panic("TODO implement memory_addr_import_rel_sleb64"), + .memory_addr_import_sleb => reloc_sleb_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)), + .memory_addr_import_sleb64 => reloc_sleb64_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)), + .memory_addr_import_tls_sleb => @panic("TODO"), + .memory_addr_import_tls_sleb64 => @panic("TODO"), + + .section_offset_i32 => @panic("TODO this value is not known yet"), + + .table_number_leb => reloc_leb_table(sliced_code, .fromObjectTable(wasm, pointee.table)), + .table_import_number_leb => reloc_leb_table(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)), + + .type_index_leb => reloc_leb_type(sliced_code, .fromTypeIndex(pointee.type_index, &wasm.flush_buffer)), + } + } +} + +fn reloc_u32_table_index(code: []u8, i: IndirectFunctionTableIndex) void { + mem.writeInt(u32, code[0..4], i.toAbi(), .little); +} + +fn reloc_u64_table_index(code: []u8, i: IndirectFunctionTableIndex) void { + mem.writeInt(u64, code[0..8], i.toAbi(), .little); +} + +fn reloc_sleb_table_index(code: []u8, i: IndirectFunctionTableIndex) void { + leb.writeSignedFixed(5, code[0..5], i.toAbi()); +} + +fn reloc_sleb64_table_index(code: []u8, i: IndirectFunctionTableIndex) void { + leb.writeSignedFixed(11, code[0..11], i.toAbi()); +} + +fn reloc_u32_function(code: []u8, function: Wasm.OutputFunctionIndex) void { + mem.writeInt(u32, code[0..4], @intFromEnum(function), .little); +} + +fn reloc_leb_function(code: []u8, function: Wasm.OutputFunctionIndex) void { + leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(function)); +} + +fn reloc_u32_global(code: []u8, global: Wasm.GlobalIndex) void { + mem.writeInt(u32, code[0..4], @intFromEnum(global), .little); +} + +fn reloc_leb_global(code: []u8, global: Wasm.GlobalIndex) void { + leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(global)); +} + +const RelocAddr = struct { + addr: u32, + + fn fromObjectData(wasm: *const Wasm, i: Wasm.ObjectData.Index, addend: i32) RelocAddr { + return fromDataLoc(&wasm.flush_buffer, .fromObjectDataIndex(wasm, i), addend); + } + + fn fromSymbolName(wasm: *const Wasm, name: String, addend: i32) RelocAddr { + const flush = &wasm.flush_buffer; + if (wasm.object_data_imports.getPtr(name)) |import| { + return fromDataLoc(flush, import.resolution.dataLoc(wasm), addend); + } else if (wasm.data_imports.get(name)) |id| { + return fromDataLoc(flush, .fromDataImportId(wasm, id), addend); + } else { + unreachable; + } + } + + fn fromDataLoc(flush: *const Flush, data_loc: Wasm.DataLoc, addend: i32) RelocAddr { + const base_addr: i64 = flush.data_segments.get(data_loc.segment).?; + return .{ .addr = @intCast(base_addr + data_loc.offset + addend) }; + } +}; + +fn reloc_u32_addr(code: []u8, ra: RelocAddr) void { + mem.writeInt(u32, code[0..4], ra.addr, .little); +} + +fn reloc_u64_addr(code: []u8, ra: RelocAddr) void { + mem.writeInt(u64, code[0..8], ra.addr, .little); +} + +fn reloc_leb_addr(code: []u8, ra: RelocAddr) void { + leb.writeUnsignedFixed(5, code[0..5], ra.addr); +} + +fn reloc_leb64_addr(code: []u8, ra: RelocAddr) void { + leb.writeUnsignedFixed(11, code[0..11], ra.addr); +} + +fn reloc_sleb_addr(code: []u8, ra: RelocAddr) void { + leb.writeSignedFixed(5, code[0..5], ra.addr); +} + +fn reloc_sleb64_addr(code: []u8, ra: RelocAddr) void { + leb.writeSignedFixed(11, code[0..11], ra.addr); +} + +fn reloc_leb_table(code: []u8, table: Wasm.TableIndex) void { + leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(table)); +} + +fn reloc_leb_type(code: []u8, index: FuncTypeIndex) void { + leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(index)); +} + +fn emitCallCtorsFunction(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!void { + const gpa = wasm.base.comp.gpa; + + try binary_bytes.ensureUnusedCapacity(gpa, 5 + 1); + appendReservedUleb32(binary_bytes, 0); // no locals + + for (wasm.object_init_funcs.items) |init_func| { + const func = init_func.function_index.ptr(wasm); + if (!func.object_index.ptr(wasm).is_included) continue; + const ty = func.type_index.ptr(wasm); + const n_returns = ty.returns.slice(wasm).len; + + // Call function by its function index + try binary_bytes.ensureUnusedCapacity(gpa, 1 + 5 + n_returns + 1); + const call_index: Wasm.OutputFunctionIndex = .fromObjectFunction(wasm, init_func.function_index); + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call)); + appendReservedUleb32(binary_bytes, @intFromEnum(call_index)); + + // drop all returned values from the stack as __wasm_call_ctors has no return value + binary_bytes.appendNTimesAssumeCapacity(@intFromEnum(std.wasm.Opcode.drop), n_returns); + } + + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); // end function body +} + +fn emitInitMemoryFunction( + wasm: *const Wasm, + binary_bytes: *std.ArrayListUnmanaged(u8), + virtual_addrs: *const VirtualAddrs, +) Allocator.Error!void { + const comp = wasm.base.comp; + const gpa = comp.gpa; + const shared_memory = comp.config.shared_memory; + + // Passive segments are used to avoid memory being reinitialized on each + // thread's instantiation. These passive segments are initialized and + // dropped in __wasm_init_memory, which is registered as the start function + // We also initialize bss segments (using memory.fill) as part of this + // function. + assert(wasm.any_passive_inits); + + try binary_bytes.ensureUnusedCapacity(gpa, 5 + 1); + appendReservedUleb32(binary_bytes, 0); // no locals + + if (virtual_addrs.init_memory_flag) |flag_address| { + assert(shared_memory); + try binary_bytes.ensureUnusedCapacity(gpa, 2 * 3 + 6 * 3 + 1 + 6 * 3 + 1 + 5 * 4 + 1 + 1); + // destination blocks + // based on values we jump to corresponding label + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block)); // $drop + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty)); + + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block)); // $wait + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty)); + + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block)); // $init + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty)); + + // atomically check + appendReservedI32Const(binary_bytes, flag_address); + appendReservedI32Const(binary_bytes, 0); + appendReservedI32Const(binary_bytes, 1); + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix)); + appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.i32_atomic_rmw_cmpxchg)); + appendReservedUleb32(binary_bytes, 2); // alignment + appendReservedUleb32(binary_bytes, 0); // offset + + // based on the value from the atomic check, jump to the label. + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_table)); + appendReservedUleb32(binary_bytes, 2); // length of the table (we have 3 blocks but because of the mandatory default the length is 2). + appendReservedUleb32(binary_bytes, 0); // $init + appendReservedUleb32(binary_bytes, 1); // $wait + appendReservedUleb32(binary_bytes, 2); // $drop + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); + } + + const segment_groups = wasm.flush_buffer.data_segment_groups.items; + var prev_end: u32 = 0; + for (segment_groups, 0..) |group, segment_index| { + defer prev_end = group.end_addr; + const segment = group.first_segment; + if (!segment.isPassive(wasm)) continue; + + const start_addr: u32 = @intCast(segment.alignment(wasm).forward(prev_end)); + const segment_size: u32 = group.end_addr - start_addr; + + try binary_bytes.ensureUnusedCapacity(gpa, 6 + 6 + 1 + 5 + 6 + 6 + 1 + 6 * 2 + 1 + 1); + + // For passive BSS segments we can simply issue a memory.fill(0). For + // non-BSS segments we do a memory.init. Both instructions take as + // their first argument the destination address. + appendReservedI32Const(binary_bytes, start_addr); + + if (shared_memory and segment.isTls(wasm)) { + // When we initialize the TLS segment we also set the `__tls_base` + // global. This allows the runtime to use this static copy of the + // TLS data for the first/main thread. + appendReservedI32Const(binary_bytes, start_addr); + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set)); + appendReservedUleb32(binary_bytes, virtual_addrs.tls_base.?); + } + + appendReservedI32Const(binary_bytes, 0); + appendReservedI32Const(binary_bytes, segment_size); + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix)); + if (segment.isBss(wasm)) { + // fill bss segment with zeroes + appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.MiscOpcode.memory_fill)); + } else { + // initialize the segment + appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.MiscOpcode.memory_init)); + appendReservedUleb32(binary_bytes, @intCast(segment_index)); + } + binary_bytes.appendAssumeCapacity(0); // memory index immediate + } + + if (virtual_addrs.init_memory_flag) |flag_address| { + assert(shared_memory); + try binary_bytes.ensureUnusedCapacity(gpa, 6 + 6 + 1 + 3 * 5 + 6 + 1 + 5 + 1 + 3 * 5 + 1 + 1 + 5 + 1 + 6 * 2 + 1 + 5 + 1 + 3 * 5 + 1 + 1 + 1); + // we set the init memory flag to value '2' + appendReservedI32Const(binary_bytes, flag_address); + appendReservedI32Const(binary_bytes, 2); + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix)); + appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.i32_atomic_store)); + appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment + appendReservedUleb32(binary_bytes, @as(u32, 0)); // offset + + // notify any waiters for segment initialization completion + appendReservedI32Const(binary_bytes, flag_address); + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const)); + leb.writeIleb128(binary_bytes.fixedWriter(), @as(i32, -1)) catch unreachable; // number of waiters + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix)); + appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_notify)); + appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment + appendReservedUleb32(binary_bytes, @as(u32, 0)); // offset + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.drop)); + + // branch and drop segments + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br)); + appendReservedUleb32(binary_bytes, @as(u32, 1)); + + // wait for thread to initialize memory segments + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); // end $wait + appendReservedI32Const(binary_bytes, flag_address); + appendReservedI32Const(binary_bytes, 1); // expected flag value + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const)); + leb.writeIleb128(binary_bytes.fixedWriter(), @as(i64, -1)) catch unreachable; // timeout + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix)); + appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_wait32)); + appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment + appendReservedUleb32(binary_bytes, @as(u32, 0)); // offset + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.drop)); + + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); // end $drop + } + + for (segment_groups, 0..) |group, segment_index| { + const segment = group.first_segment; + if (!segment.isPassive(wasm)) continue; + if (segment.isBss(wasm)) continue; + // The TLS region should not be dropped since its is needed + // during the initialization of each thread (__wasm_init_tls). + if (shared_memory and segment.isTls(wasm)) continue; + + try binary_bytes.ensureUnusedCapacity(gpa, 1 + 5 + 5 + 1); + + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix)); + appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.MiscOpcode.data_drop)); + appendReservedUleb32(binary_bytes, @intCast(segment_index)); + } + + // End of the function body + binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); +} + +fn emitInitTlsFunction(wasm: *const Wasm, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!void { + const comp = wasm.base.comp; + const gpa = comp.gpa; + + assert(comp.config.shared_memory); + + try bytes.ensureUnusedCapacity(gpa, 5 * 10 + 8); + + appendReservedUleb32(bytes, 0); // no locals + + // If there's a TLS segment, initialize it during runtime using the bulk-memory feature + // TLS segment is always the first one due to how we sort the data segments. + const data_segments = wasm.flush_buffer.data_segments.keys(); + if (data_segments.len > 0 and data_segments[0].isTls(wasm)) { + const start_addr = wasm.flush_buffer.data_segments.values()[0]; + const end_addr = wasm.flush_buffer.data_segment_groups.items[0].end_addr; + const group_size = end_addr - start_addr; + const data_segment_index = 0; + + const param_local: u32 = 0; + + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get)); + appendReservedUleb32(bytes, param_local); + + const tls_base_global_index: Wasm.GlobalIndex = @enumFromInt(wasm.globals.getIndex(.__tls_base).?); + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set)); + appendReservedUleb32(bytes, @intFromEnum(tls_base_global_index)); + + // load stack values for the bulk-memory operation + { + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get)); + appendReservedUleb32(bytes, param_local); + + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const)); + appendReservedUleb32(bytes, 0); //segment offset + + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const)); + appendReservedUleb32(bytes, group_size); //segment offset + } + + // perform the bulk-memory operation to initialize the data segment + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix)); + appendReservedUleb32(bytes, @intFromEnum(std.wasm.MiscOpcode.memory_init)); + // segment immediate + appendReservedUleb32(bytes, data_segment_index); + // memory index immediate (always 0) + appendReservedUleb32(bytes, 0); + } + + // If we have to perform any TLS relocations, call the corresponding function + // which performs all runtime TLS relocations. This is a synthetic function, + // generated by the linker. + if (wasm.functions.getIndex(.__wasm_apply_global_tls_relocs)) |function_index| { + const output_function_index: Wasm.OutputFunctionIndex = .fromFunctionIndex(wasm, @enumFromInt(function_index)); + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call)); + appendReservedUleb32(bytes, @intFromEnum(output_function_index)); + } + + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); +} + +fn emitStartSection(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8), i: Wasm.OutputFunctionIndex) !void { + const header_offset = try reserveVecSectionHeader(gpa, bytes); + replaceVecSectionHeader(bytes, header_offset, .start, @intFromEnum(i)); +} + +fn emitTagNameFunction( + wasm: *Wasm, + code: *std.ArrayListUnmanaged(u8), + table_base_addr: u32, + table_index: u32, + enum_type_ip: InternPool.Index, +) !void { + const comp = wasm.base.comp; + const gpa = comp.gpa; + const diags = &comp.link_diags; + const zcu = comp.zcu.?; + const ip = &zcu.intern_pool; + const enum_type = ip.loadEnumType(enum_type_ip); + const tag_values = enum_type.values.get(ip); + + try code.ensureUnusedCapacity(gpa, 7 * 5 + 6 + 1 * 6); + appendReservedUleb32(code, 0); // no locals + + const slice_abi_size = 8; + const encoded_alignment = @ctz(@as(u32, 4)); + if (tag_values.len == 0) { + // Then it's auto-numbered and therefore a direct table lookup. + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get)); + appendReservedUleb32(code, 0); + + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get)); + appendReservedUleb32(code, 1); + + appendReservedI32Const(code, slice_abi_size); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_mul)); + + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_load)); + appendReservedUleb32(code, encoded_alignment); + appendReservedUleb32(code, table_base_addr + table_index * 8); + + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_store)); + appendReservedUleb32(code, encoded_alignment); + appendReservedUleb32(code, 0); + } else { + const int_info = Zcu.Type.intInfo(.fromInterned(enum_type.tag_ty), zcu); + const outer_block_type: std.wasm.BlockType = switch (int_info.bits) { + 0...32 => .i32, + 33...64 => .i64, + else => return diags.fail("wasm linker does not yet implement @tagName for sparse enums with more than 64 bit integer tag types", .{}), + }; + + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get)); + appendReservedUleb32(code, 0); + + // Outer block that computes table offset. + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block)); + code.appendAssumeCapacity(@intFromEnum(outer_block_type)); + + for (tag_values, 0..) |tag_value, tag_index| { + // block for this if case + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block)); + code.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty)); + + // Tag value whose name should be returned. + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get)); + appendReservedUleb32(code, 1); + + const val: Zcu.Value = .fromInterned(tag_value); + switch (outer_block_type) { + .i32 => { + const x: u32 = switch (int_info.signedness) { + .signed => @bitCast(@as(i32, @intCast(val.toSignedInt(zcu)))), + .unsigned => @intCast(val.toUnsignedInt(zcu)), + }; + appendReservedI32Const(code, x); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_ne)); + }, + .i64 => { + const x: u64 = switch (int_info.signedness) { + .signed => @bitCast(val.toSignedInt(zcu)), + .unsigned => val.toUnsignedInt(zcu), + }; + appendReservedI64Const(code, x); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_ne)); + }, + else => unreachable, + } + + // if they're not equal, break out of current branch + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_if)); + appendReservedUleb32(code, 0); + + // Put the table offset of the result on the stack. + appendReservedI32Const(code, @intCast(tag_index * slice_abi_size)); + + // break outside blocks + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br)); + appendReservedUleb32(code, 1); + + // end the block for this case + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); + } + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.@"unreachable")); + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); + + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_load)); + appendReservedUleb32(code, encoded_alignment); + appendReservedUleb32(code, table_base_addr + table_index * 8); + + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_store)); + appendReservedUleb32(code, encoded_alignment); + appendReservedUleb32(code, 0); + } + + // End of the function body + code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); +} + +/// Writes an unsigned 32-bit integer as a LEB128-encoded 'i32.const' value. +fn appendReservedI32Const(bytes: *std.ArrayListUnmanaged(u8), val: u32) void { + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const)); + leb.writeIleb128(bytes.fixedWriter(), @as(i32, @bitCast(val))) catch unreachable; +} + +/// Writes an unsigned 64-bit integer as a LEB128-encoded 'i64.const' value. +fn appendReservedI64Const(bytes: *std.ArrayListUnmanaged(u8), val: u64) void { + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const)); + leb.writeIleb128(bytes.fixedWriter(), @as(i64, @bitCast(val))) catch unreachable; +} + +fn appendReservedUleb32(bytes: *std.ArrayListUnmanaged(u8), val: u32) void { + leb.writeUleb128(bytes.fixedWriter(), val) catch unreachable; +} + +fn appendGlobal(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8), mutable: u8, val: u32) Allocator.Error!void { + try bytes.ensureUnusedCapacity(gpa, 9); + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Valtype.i32)); + bytes.appendAssumeCapacity(mutable); + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const)); + appendReservedUleb32(bytes, val); + bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); +} diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig index fc11f918763e..4b9807fdf266 100644 --- a/src/link/Wasm/Object.zig +++ b/src/link/Wasm/Object.zig @@ -1,841 +1,1520 @@ -//! Object represents a wasm object file. When initializing a new -//! `Object`, it will parse the contents of a given file handler, and verify -//! the data on correctness. The result can then be used by the linker. const Object = @This(); const Wasm = @import("../Wasm.zig"); -const Atom = Wasm.Atom; const Alignment = Wasm.Alignment; -const Symbol = @import("Symbol.zig"); const std = @import("std"); const Allocator = std.mem.Allocator; -const leb = std.leb; -const meta = std.meta; const Path = std.Build.Cache.Path; - const log = std.log.scoped(.object); +const assert = std.debug.assert; /// Wasm spec version used for this `Object` -version: u32 = 0, +version: u32, /// For error reporting purposes only. /// Name (read path) of the object or archive file. path: Path, /// For error reporting purposes only. /// If this represents an object in an archive, it's the basename of the /// object, and path refers to the archive. -archive_member_name: ?[]const u8, -/// Parsed type section -func_types: []const std.wasm.Type = &.{}, -/// A list of all imports for this module -imports: []const Wasm.Import = &.{}, -/// Parsed function section -functions: []const std.wasm.Func = &.{}, -/// Parsed table section -tables: []const std.wasm.Table = &.{}, -/// Parsed memory section -memories: []const std.wasm.Memory = &.{}, -/// Parsed global section -globals: []const std.wasm.Global = &.{}, -/// Parsed export section -exports: []const Wasm.Export = &.{}, -/// Parsed element section -elements: []const std.wasm.Element = &.{}, +archive_member_name: Wasm.OptionalString, /// Represents the function ID that must be called on startup. /// This is `null` by default as runtimes may determine the startup /// function themselves. This is essentially legacy. -start: ?u32 = null, -/// A slice of features that tell the linker what features are mandatory, -/// used (or therefore missing) and must generate an error when another -/// object uses features that are not supported by the other. -features: []const Wasm.Feature = &.{}, -/// A table that maps the relocations we must perform where the key represents -/// the section that the list of relocations applies to. -relocations: std.AutoArrayHashMapUnmanaged(u32, []Wasm.Relocation) = .empty, -/// Table of symbols belonging to this Object file -symtable: []Symbol = &.{}, -/// Extra metadata about the linking section, such as alignment of segments and their name -segment_info: []const Wasm.NamedSegment = &.{}, -/// A sequence of function initializers that must be called on startup -init_funcs: []const Wasm.InitFunc = &.{}, -/// Comdat information -comdat_info: []const Wasm.Comdat = &.{}, -/// Represents non-synthetic sections that can essentially be mem-cpy'd into place -/// after performing relocations. -relocatable_data: std.AutoHashMapUnmanaged(RelocatableData.Tag, []RelocatableData) = .empty, -/// Amount of functions in the `import` sections. -imported_functions_count: u32 = 0, -/// Amount of globals in the `import` section. -imported_globals_count: u32 = 0, -/// Amount of tables in the `import` section. -imported_tables_count: u32 = 0, - -/// Represents a single item within a section (depending on its `type`) -pub const RelocatableData = struct { - /// The type of the relocatable data - type: Tag, - /// Pointer to the data of the segment, where its length is written to `size` - data: [*]u8, - /// The size in bytes of the data representing the segment within the section - size: u32, - /// The index within the section itself, or in case of a debug section, - /// the offset within the `string_table`. - index: u32, - /// The offset within the section where the data starts - offset: u32, - /// Represents the index of the section it belongs to - section_index: u32, - /// Whether the relocatable section is represented by a symbol or not. - /// Can only be `true` for custom sections. - represented: bool = false, - - const Tag = enum { data, code, custom }; - - /// Returns the alignment of the segment, by retrieving it from the segment - /// meta data of the given object file. - /// NOTE: Alignment is encoded as a power of 2, so we shift the symbol's - /// alignment to retrieve the natural alignment. - pub fn getAlignment(relocatable_data: RelocatableData, object: *const Object) Alignment { - if (relocatable_data.type != .data) return .@"1"; - return object.segment_info[relocatable_data.index].alignment; - } - - /// Returns the symbol kind that corresponds to the relocatable section - pub fn getSymbolKind(relocatable_data: RelocatableData) Symbol.Tag { - return switch (relocatable_data.type) { - .data => .data, - .code => .function, - .custom => .section, - }; - } - - /// Returns the index within a section, or in case of a custom section, - /// returns the section index within the object file. - pub fn getIndex(relocatable_data: RelocatableData) u32 { - if (relocatable_data.type == .custom) return relocatable_data.section_index; - return relocatable_data.index; - } +start_function: Wasm.OptionalObjectFunctionIndex, +/// A slice of features that tell the linker what features are mandatory, used +/// (or therefore missing) and must generate an error when another object uses +/// features that are not supported by the other. +features: Wasm.Feature.Set, +/// Points into `Wasm.object_functions` +functions: RelativeSlice, +/// Points into `Wasm.object_function_imports` +function_imports: RelativeSlice, +/// Points into `Wasm.object_global_imports` +global_imports: RelativeSlice, +/// Points into `Wasm.object_table_imports` +table_imports: RelativeSlice, +// Points into `Wasm.object_data_imports` +data_imports: RelativeSlice, +/// Points into Wasm object_custom_segments +custom_segments: RelativeSlice, +/// Points into Wasm object_init_funcs +init_funcs: RelativeSlice, +/// Points into Wasm object_comdats +comdats: RelativeSlice, +/// Guaranteed to be non-null when functions has nonzero length. +code_section_index: ?Wasm.ObjectSectionIndex, +/// Guaranteed to be non-null when globals has nonzero length. +global_section_index: ?Wasm.ObjectSectionIndex, +/// Guaranteed to be non-null when data segments has nonzero length. +data_section_index: ?Wasm.ObjectSectionIndex, +is_included: bool, + +pub const RelativeSlice = struct { + off: u32, + len: u32, }; -/// Initializes a new `Object` from a wasm object file. -/// This also parses and verifies the object file. -/// When a max size is given, will only parse up to the given size, -/// else will read until the end of the file. -pub fn create( - wasm: *Wasm, - file_contents: []const u8, - path: Path, - archive_member_name: ?[]const u8, -) !Object { - const gpa = wasm.base.comp.gpa; - var object: Object = .{ - .path = path, - .archive_member_name = archive_member_name, +pub const SegmentInfo = struct { + name: Wasm.String, + flags: Flags, + + /// Matches the ABI. + pub const Flags = packed struct(u32) { + /// Signals that the segment contains only null terminated strings allowing + /// the linker to perform merging. + strings: bool, + /// The segment contains thread-local data. This means that a unique copy + /// of this segment will be created for each thread. + tls: bool, + /// If the object file is included in the final link, the segment should be + /// retained in the final output regardless of whether it is used by the + /// program. + retain: bool, + alignment: Alignment, + + _: u23 = 0, }; +}; - var parser: Parser = .{ - .object = &object, - .wasm = wasm, - .reader = std.io.fixedBufferStream(file_contents), - }; - try parser.parseObject(gpa); +pub const FunctionImport = struct { + module_name: Wasm.String, + name: Wasm.String, + function_index: ScratchSpace.FuncTypeIndex, +}; - return object; -} +pub const GlobalImport = struct { + module_name: Wasm.String, + name: Wasm.String, + valtype: std.wasm.Valtype, + mutable: bool, +}; -/// Frees all memory of `Object` at once. The given `Allocator` must be -/// the same allocator that was used when `init` was called. -pub fn deinit(object: *Object, gpa: Allocator) void { - for (object.func_types) |func_ty| { - gpa.free(func_ty.params); - gpa.free(func_ty.returns); - } - gpa.free(object.func_types); - gpa.free(object.functions); - gpa.free(object.imports); - gpa.free(object.tables); - gpa.free(object.memories); - gpa.free(object.globals); - gpa.free(object.exports); - for (object.elements) |el| { - gpa.free(el.func_indexes); - } - gpa.free(object.elements); - gpa.free(object.features); - for (object.relocations.values()) |val| { - gpa.free(val); - } - object.relocations.deinit(gpa); - gpa.free(object.symtable); - gpa.free(object.comdat_info); - gpa.free(object.init_funcs); - for (object.segment_info) |info| { - gpa.free(info.name); - } - gpa.free(object.segment_info); - { - var it = object.relocatable_data.valueIterator(); - while (it.next()) |relocatable_data| { - for (relocatable_data.*) |rel_data| { - gpa.free(rel_data.data[0..rel_data.size]); - } - gpa.free(relocatable_data.*); - } - } - object.relocatable_data.deinit(gpa); - object.* = undefined; -} +pub const TableImport = struct { + module_name: Wasm.String, + name: Wasm.String, + limits_min: u32, + limits_max: u32, + limits_has_max: bool, + limits_is_shared: bool, + ref_type: std.wasm.RefType, +}; -/// Finds the import within the list of imports from a given kind and index of that kind. -/// Asserts the import exists -pub fn findImport(object: *const Object, sym: Symbol) Wasm.Import { - var i: u32 = 0; - return for (object.imports) |import| { - if (std.meta.activeTag(import.kind) == sym.tag.externalType()) { - if (i == sym.index) return import; - i += 1; - } - } else unreachable; // Only existing imports are allowed to be found -} +pub const DataSegmentFlags = enum(u32) { active, passive, active_memidx }; -/// Checks if the object file is an MVP version. -/// When that's the case, we check if there's an import table definition with its name -/// set to '__indirect_function_table". When that's also the case, -/// we initialize a new table symbol that corresponds to that import and return that symbol. -/// -/// When the object file is *NOT* MVP, we return `null`. -fn checkLegacyIndirectFunctionTable(object: *Object, wasm: *const Wasm) !?Symbol { - const diags = &wasm.base.comp.link_diags; +pub const SubsectionType = enum(u8) { + segment_info = 5, + init_funcs = 6, + comdat_info = 7, + symbol_table = 8, +}; - var table_count: usize = 0; - for (object.symtable) |sym| { - if (sym.tag == .table) table_count += 1; - } +/// Specified by https://github.com/WebAssembly/tool-conventions/blob/main/Linking.md +pub const RelocationType = enum(u8) { + function_index_leb = 0, + table_index_sleb = 1, + table_index_i32 = 2, + memory_addr_leb = 3, + memory_addr_sleb = 4, + memory_addr_i32 = 5, + type_index_leb = 6, + global_index_leb = 7, + function_offset_i32 = 8, + section_offset_i32 = 9, + event_index_leb = 10, + memory_addr_rel_sleb = 11, + table_index_rel_sleb = 12, + global_index_i32 = 13, + memory_addr_leb64 = 14, + memory_addr_sleb64 = 15, + memory_addr_i64 = 16, + memory_addr_rel_sleb64 = 17, + table_index_sleb64 = 18, + table_index_i64 = 19, + table_number_leb = 20, + memory_addr_tls_sleb = 21, + function_offset_i64 = 22, + memory_addr_locrel_i32 = 23, + table_index_rel_sleb64 = 24, + memory_addr_tls_sleb64 = 25, + function_index_i32 = 26, +}; - // For each import table, we also have a symbol so this is not a legacy object file - if (object.imported_tables_count == table_count) return null; +pub const Symbol = struct { + flags: Wasm.SymbolFlags, + name: Wasm.OptionalString, + pointee: Pointee, + + /// https://github.com/WebAssembly/tool-conventions/blob/df8d737539eb8a8f446ba5eab9dc670c40dfb81e/Linking.md#symbol-table-subsection + const Tag = enum(u8) { + function, + data, + global, + section, + event, + table, + }; - if (table_count != 0) { - return diags.failParse(object.path, "expected a table entry symbol for each of the {d} table(s), but instead got {d} symbols.", .{ - object.imported_tables_count, - table_count, - }); - } + const Pointee = union(enum) { + function: Wasm.ObjectFunctionIndex, + function_import: ScratchSpace.FuncImportIndex, + data: Wasm.ObjectData.Index, + data_import: void, + global: Wasm.ObjectGlobalIndex, + global_import: ScratchSpace.GlobalImportIndex, + section: Wasm.ObjectSectionIndex, + table: Wasm.ObjectTableIndex, + table_import: ScratchSpace.TableImportIndex, + }; +}; - // MVP object files cannot have any table definitions, only imports (for the indirect function table). - if (object.tables.len > 0) { - return diags.failParse(object.path, "unexpected table definition without representing table symbols.", .{}); - } +pub const ScratchSpace = struct { + func_types: std.ArrayListUnmanaged(Wasm.FunctionType.Index) = .empty, + func_type_indexes: std.ArrayListUnmanaged(FuncTypeIndex) = .empty, + func_imports: std.ArrayListUnmanaged(FunctionImport) = .empty, + global_imports: std.ArrayListUnmanaged(GlobalImport) = .empty, + table_imports: std.ArrayListUnmanaged(TableImport) = .empty, + symbol_table: std.ArrayListUnmanaged(Symbol) = .empty, + segment_info: std.ArrayListUnmanaged(SegmentInfo) = .empty, + exports: std.ArrayListUnmanaged(Export) = .empty, + + const Export = struct { + name: Wasm.String, + pointee: Pointee, + + const Pointee = union(std.wasm.ExternalKind) { + function: Wasm.ObjectFunctionIndex, + table: Wasm.ObjectTableIndex, + memory: Wasm.ObjectMemory.Index, + global: Wasm.ObjectGlobalIndex, + }; + }; - if (object.imported_tables_count != 1) { - return diags.failParse(object.path, "found more than one table import, but no representing table symbols", .{}); - } + /// Index into `func_imports`. + const FuncImportIndex = enum(u32) { + _, - const table_import: Wasm.Import = for (object.imports) |imp| { - if (imp.kind == .table) { - break imp; + fn ptr(index: FuncImportIndex, ss: *const ScratchSpace) *FunctionImport { + return &ss.func_imports.items[@intFromEnum(index)]; } - } else unreachable; + }; - if (table_import.name != wasm.preloaded_strings.__indirect_function_table) { - return diags.failParse(object.path, "non-indirect function table import '{s}' is missing a corresponding symbol", .{ - wasm.stringSlice(table_import.name), - }); - } + /// Index into `global_imports`. + const GlobalImportIndex = enum(u32) { + _, - var table_symbol: Symbol = .{ - .flags = 0, - .name = table_import.name, - .tag = .table, - .index = 0, - .virtual_address = undefined, + fn ptr(index: GlobalImportIndex, ss: *const ScratchSpace) *GlobalImport { + return &ss.global_imports.items[@intFromEnum(index)]; + } }; - table_symbol.setFlag(.WASM_SYM_UNDEFINED); - table_symbol.setFlag(.WASM_SYM_NO_STRIP); - return table_symbol; -} -const Parser = struct { - reader: std.io.FixedBufferStream([]const u8), - /// Object file we're building - object: *Object, - /// Mutable so that the string table can be modified. - wasm: *Wasm, + /// Index into `table_imports`. + const TableImportIndex = enum(u32) { + _, + + fn ptr(index: TableImportIndex, ss: *const ScratchSpace) *TableImport { + return &ss.table_imports.items[@intFromEnum(index)]; + } + }; - fn parseObject(parser: *Parser, gpa: Allocator) anyerror!void { - const wasm = parser.wasm; + /// Index into `func_types`. + const FuncTypeIndex = enum(u32) { + _, - { - var magic_bytes: [4]u8 = undefined; - try parser.reader.reader().readNoEof(&magic_bytes); - if (!std.mem.eql(u8, &magic_bytes, &std.wasm.magic)) return error.BadObjectMagic; + fn ptr(index: FuncTypeIndex, ss: *const ScratchSpace) *Wasm.FunctionType.Index { + return &ss.func_types.items[@intFromEnum(index)]; } + }; - const version = try parser.reader.reader().readInt(u32, .little); - parser.object.version = version; - - var saw_linking_section = false; - - var section_index: u32 = 0; - while (parser.reader.reader().readByte()) |byte| : (section_index += 1) { - const len = try readLeb(u32, parser.reader.reader()); - var limited_reader = std.io.limitedReader(parser.reader.reader(), len); - const reader = limited_reader.reader(); - switch (@as(std.wasm.Section, @enumFromInt(byte))) { - .custom => { - const name_len = try readLeb(u32, reader); - const name = try gpa.alloc(u8, name_len); - defer gpa.free(name); - try reader.readNoEof(name); - - if (std.mem.eql(u8, name, "linking")) { - saw_linking_section = true; - try parser.parseMetadata(gpa, @as(usize, @intCast(reader.context.bytes_left))); - } else if (std.mem.startsWith(u8, name, "reloc")) { - try parser.parseRelocations(gpa); - } else if (std.mem.eql(u8, name, "target_features")) { - try parser.parseFeatures(gpa); - } else if (std.mem.startsWith(u8, name, ".debug")) { - const gop = try parser.object.relocatable_data.getOrPut(gpa, .custom); - var relocatable_data: std.ArrayListUnmanaged(RelocatableData) = .empty; - defer relocatable_data.deinit(gpa); - if (!gop.found_existing) { - gop.value_ptr.* = &.{}; - } else { - relocatable_data = std.ArrayListUnmanaged(RelocatableData).fromOwnedSlice(gop.value_ptr.*); - } - const debug_size = @as(u32, @intCast(reader.context.bytes_left)); - const debug_content = try gpa.alloc(u8, debug_size); - errdefer gpa.free(debug_content); - try reader.readNoEof(debug_content); - - try relocatable_data.append(gpa, .{ - .type = .custom, - .data = debug_content.ptr, - .size = debug_size, - .index = @intFromEnum(try wasm.internString(name)), - .offset = 0, // debug sections only contain 1 entry, so no need to calculate offset - .section_index = section_index, - }); - gop.value_ptr.* = try relocatable_data.toOwnedSlice(gpa); - } else { - try reader.skipBytes(reader.context.bytes_left, .{}); - } - }, - .type => { - for (try readVec(&parser.object.func_types, reader, gpa)) |*type_val| { - if ((try reader.readByte()) != std.wasm.function_type) return error.ExpectedFuncType; + pub fn deinit(ss: *ScratchSpace, gpa: Allocator) void { + ss.exports.deinit(gpa); + ss.func_types.deinit(gpa); + ss.func_type_indexes.deinit(gpa); + ss.func_imports.deinit(gpa); + ss.global_imports.deinit(gpa); + ss.table_imports.deinit(gpa); + ss.symbol_table.deinit(gpa); + ss.segment_info.deinit(gpa); + ss.* = undefined; + } - for (try readVec(&type_val.params, reader, gpa)) |*param| { - param.* = try readEnum(std.wasm.Valtype, reader); - } + fn clear(ss: *ScratchSpace) void { + ss.exports.clearRetainingCapacity(); + ss.func_types.clearRetainingCapacity(); + ss.func_type_indexes.clearRetainingCapacity(); + ss.func_imports.clearRetainingCapacity(); + ss.global_imports.clearRetainingCapacity(); + ss.table_imports.clearRetainingCapacity(); + ss.symbol_table.clearRetainingCapacity(); + ss.segment_info.clearRetainingCapacity(); + } +}; - for (try readVec(&type_val.returns, reader, gpa)) |*result| { - result.* = try readEnum(std.wasm.Valtype, reader); +pub fn parse( + wasm: *Wasm, + bytes: []const u8, + path: Path, + archive_member_name: ?[]const u8, + host_name: Wasm.OptionalString, + ss: *ScratchSpace, + must_link: bool, + gc_sections: bool, +) anyerror!Object { + const comp = wasm.base.comp; + const gpa = comp.gpa; + const diags = &comp.link_diags; + + var pos: usize = 0; + + if (!std.mem.eql(u8, bytes[0..std.wasm.magic.len], &std.wasm.magic)) return error.BadObjectMagic; + pos += std.wasm.magic.len; + + const version = std.mem.readInt(u32, bytes[pos..][0..4], .little); + pos += 4; + + const data_segment_start: u32 = @intCast(wasm.object_data_segments.items.len); + const custom_segment_start: u32 = @intCast(wasm.object_custom_segments.entries.len); + const functions_start: u32 = @intCast(wasm.object_functions.items.len); + const tables_start: u32 = @intCast(wasm.object_tables.items.len); + const memories_start: u32 = @intCast(wasm.object_memories.items.len); + const globals_start: u32 = @intCast(wasm.object_globals.items.len); + const init_funcs_start: u32 = @intCast(wasm.object_init_funcs.items.len); + const comdats_start: u32 = @intCast(wasm.object_comdats.items.len); + const function_imports_start: u32 = @intCast(wasm.object_function_imports.entries.len); + const global_imports_start: u32 = @intCast(wasm.object_global_imports.entries.len); + const table_imports_start: u32 = @intCast(wasm.object_table_imports.entries.len); + const data_imports_start: u32 = @intCast(wasm.object_data_imports.entries.len); + const local_section_index_base = wasm.object_total_sections; + const object_index: Wasm.ObjectIndex = @enumFromInt(wasm.objects.items.len); + const source_location: Wasm.SourceLocation = .fromObject(object_index, wasm); + + ss.clear(); + + var start_function: Wasm.OptionalObjectFunctionIndex = .none; + var opt_features: ?Wasm.Feature.Set = null; + var saw_linking_section = false; + var has_tls = false; + var table_import_symbol_count: usize = 0; + var code_section_index: ?Wasm.ObjectSectionIndex = null; + var global_section_index: ?Wasm.ObjectSectionIndex = null; + var data_section_index: ?Wasm.ObjectSectionIndex = null; + while (pos < bytes.len) : (wasm.object_total_sections += 1) { + const section_index: Wasm.ObjectSectionIndex = @enumFromInt(wasm.object_total_sections); + + const section_tag: std.wasm.Section = @enumFromInt(bytes[pos]); + pos += 1; + + const len, pos = readLeb(u32, bytes, pos); + const section_end = pos + len; + switch (section_tag) { + .custom => { + const section_name, pos = readBytes(bytes, pos); + if (std.mem.eql(u8, section_name, "linking")) { + saw_linking_section = true; + const section_version, pos = readLeb(u32, bytes, pos); + log.debug("link meta data version: {d}", .{section_version}); + if (section_version != 2) return error.UnsupportedVersion; + while (pos < section_end) { + const sub_type, pos = readLeb(u8, bytes, pos); + log.debug("found subsection: {s}", .{@tagName(@as(SubsectionType, @enumFromInt(sub_type)))}); + const payload_len, pos = readLeb(u32, bytes, pos); + if (payload_len == 0) break; + + const count, pos = readLeb(u32, bytes, pos); + + switch (@as(SubsectionType, @enumFromInt(sub_type))) { + .segment_info => { + for (try ss.segment_info.addManyAsSlice(gpa, count)) |*segment| { + const name, pos = readBytes(bytes, pos); + const alignment, pos = readLeb(u32, bytes, pos); + const flags_u32, pos = readLeb(u32, bytes, pos); + const flags: SegmentInfo.Flags = @bitCast(flags_u32); + const tls = flags.tls or + // Supports legacy object files that specified + // being TLS by the name instead of the TLS flag. + std.mem.startsWith(u8, name, ".tdata") or + std.mem.startsWith(u8, name, ".tbss"); + has_tls = has_tls or tls; + segment.* = .{ + .name = try wasm.internString(name), + .flags = .{ + .strings = flags.strings, + .tls = tls, + .alignment = @enumFromInt(alignment), + .retain = flags.retain, + }, + }; + } + }, + .init_funcs => { + for (try wasm.object_init_funcs.addManyAsSlice(gpa, count)) |*func| { + const priority, pos = readLeb(u32, bytes, pos); + const symbol_index, pos = readLeb(u32, bytes, pos); + if (symbol_index > ss.symbol_table.items.len) + return diags.failParse(path, "init_funcs before symbol table", .{}); + const sym = &ss.symbol_table.items[symbol_index]; + if (sym.pointee != .function) { + return diags.failParse(path, "init_func symbol '{s}' not a function", .{ + sym.name.slice(wasm).?, + }); + } else if (sym.flags.undefined) { + return diags.failParse(path, "init_func symbol '{s}' is an import", .{ + sym.name.slice(wasm).?, + }); + } + func.* = .{ + .priority = priority, + .function_index = sym.pointee.function, + }; + } + }, + .comdat_info => { + for (try wasm.object_comdats.addManyAsSlice(gpa, count)) |*comdat| { + const name, pos = readBytes(bytes, pos); + const flags, pos = readLeb(u32, bytes, pos); + if (flags != 0) return error.UnexpectedComdatFlags; + const symbol_count, pos = readLeb(u32, bytes, pos); + const start_off: u32 = @intCast(wasm.object_comdat_symbols.len); + try wasm.object_comdat_symbols.ensureUnusedCapacity(gpa, symbol_count); + for (0..symbol_count) |_| { + const kind, pos = readEnum(Wasm.Comdat.Symbol.Type, bytes, pos); + const index, pos = readLeb(u32, bytes, pos); + if (true) @panic("TODO rebase index depending on kind"); + wasm.object_comdat_symbols.appendAssumeCapacity(.{ + .kind = kind, + .index = index, + }); + } + comdat.* = .{ + .name = try wasm.internString(name), + .flags = flags, + .symbols = .{ + .off = start_off, + .len = @intCast(wasm.object_comdat_symbols.len - start_off), + }, + }; + } + }, + .symbol_table => { + for (try ss.symbol_table.addManyAsSlice(gpa, count)) |*symbol| { + const tag, pos = readEnum(Symbol.Tag, bytes, pos); + const flags, pos = readLeb(u32, bytes, pos); + symbol.* = .{ + .flags = @bitCast(flags), + .name = .none, + .pointee = undefined, + }; + symbol.flags.initZigSpecific(must_link, gc_sections); + + switch (tag) { + .data => { + const name, pos = readBytes(bytes, pos); + const interned_name = try wasm.internString(name); + symbol.name = interned_name.toOptional(); + if (symbol.flags.undefined) { + symbol.pointee = .data_import; + } else { + const segment_index, pos = readLeb(u32, bytes, pos); + const segment_offset, pos = readLeb(u32, bytes, pos); + const size, pos = readLeb(u32, bytes, pos); + try wasm.object_datas.append(gpa, .{ + .segment = @enumFromInt(data_segment_start + segment_index), + .offset = segment_offset, + .size = size, + .name = interned_name, + .flags = symbol.flags, + }); + symbol.pointee = .{ + .data = @enumFromInt(wasm.object_datas.items.len - 1), + }; + } + }, + .section => { + const local_section, pos = readLeb(u32, bytes, pos); + const section: Wasm.ObjectSectionIndex = @enumFromInt(local_section_index_base + local_section); + symbol.pointee = .{ .section = section }; + }, + + .function => { + const local_index, pos = readLeb(u32, bytes, pos); + if (symbol.flags.undefined) { + const function_import: ScratchSpace.FuncImportIndex = @enumFromInt(local_index); + symbol.pointee = .{ .function_import = function_import }; + if (symbol.flags.explicit_name) { + const name, pos = readBytes(bytes, pos); + symbol.name = (try wasm.internString(name)).toOptional(); + } else { + symbol.name = function_import.ptr(ss).name.toOptional(); + } + } else { + symbol.pointee = .{ .function = @enumFromInt(functions_start + (local_index - ss.func_imports.items.len)) }; + const name, pos = readBytes(bytes, pos); + symbol.name = (try wasm.internString(name)).toOptional(); + } + }, + .global => { + const local_index, pos = readLeb(u32, bytes, pos); + if (symbol.flags.undefined) { + const global_import: ScratchSpace.GlobalImportIndex = @enumFromInt(local_index); + symbol.pointee = .{ .global_import = global_import }; + if (symbol.flags.explicit_name) { + const name, pos = readBytes(bytes, pos); + symbol.name = (try wasm.internString(name)).toOptional(); + } else { + symbol.name = global_import.ptr(ss).name.toOptional(); + } + } else { + symbol.pointee = .{ .global = @enumFromInt(globals_start + (local_index - ss.global_imports.items.len)) }; + const name, pos = readBytes(bytes, pos); + symbol.name = (try wasm.internString(name)).toOptional(); + } + }, + .table => { + const local_index, pos = readLeb(u32, bytes, pos); + if (symbol.flags.undefined) { + table_import_symbol_count += 1; + const table_import: ScratchSpace.TableImportIndex = @enumFromInt(local_index); + symbol.pointee = .{ .table_import = table_import }; + if (symbol.flags.explicit_name) { + const name, pos = readBytes(bytes, pos); + symbol.name = (try wasm.internString(name)).toOptional(); + } else { + symbol.name = table_import.ptr(ss).name.toOptional(); + } + } else { + symbol.pointee = .{ .table = @enumFromInt(tables_start + (local_index - ss.table_imports.items.len)) }; + const name, pos = readBytes(bytes, pos); + symbol.name = (try wasm.internString(name)).toOptional(); + } + }, + else => { + log.debug("unrecognized symbol type tag: {x}", .{@intFromEnum(tag)}); + return error.UnrecognizedSymbolType; + }, + } + } + }, } } - try assertEnd(reader); - }, - .import => { - for (try readVec(&parser.object.imports, reader, gpa)) |*import| { - const module_len = try readLeb(u32, reader); - const module_name = try gpa.alloc(u8, module_len); - defer gpa.free(module_name); - try reader.readNoEof(module_name); - - const name_len = try readLeb(u32, reader); - const name = try gpa.alloc(u8, name_len); - defer gpa.free(name); - try reader.readNoEof(name); - - const kind = try readEnum(std.wasm.ExternalKind, reader); - const kind_value: std.wasm.Import.Kind = switch (kind) { - .function => val: { - parser.object.imported_functions_count += 1; - break :val .{ .function = try readLeb(u32, reader) }; + } else if (std.mem.startsWith(u8, section_name, "reloc.")) { + // 'The "reloc." custom sections must come after the "linking" custom section' + if (!saw_linking_section) return error.RelocBeforeLinkingSection; + + // "Relocation sections start with an identifier specifying + // which section they apply to, and must be sequenced in + // the module after that section." + // "Relocation sections can only target code, data and custom sections." + const local_section, pos = readLeb(u32, bytes, pos); + const count, pos = readLeb(u32, bytes, pos); + const section: Wasm.ObjectSectionIndex = @enumFromInt(local_section_index_base + local_section); + + log.debug("found {d} relocations for section={d}", .{ count, section }); + + var prev_offset: u32 = 0; + try wasm.object_relocations.ensureUnusedCapacity(gpa, count); + for (0..count) |_| { + const tag: RelocationType = @enumFromInt(bytes[pos]); + pos += 1; + const offset, pos = readLeb(u32, bytes, pos); + const index, pos = readLeb(u32, bytes, pos); + + if (offset < prev_offset) + return diags.failParse(path, "relocation entries not sorted by offset", .{}); + prev_offset = offset; + + const sym = &ss.symbol_table.items[index]; + + switch (tag) { + .memory_addr_leb, + .memory_addr_sleb, + .memory_addr_i32, + .memory_addr_rel_sleb, + .memory_addr_leb64, + .memory_addr_sleb64, + .memory_addr_i64, + .memory_addr_rel_sleb64, + .memory_addr_tls_sleb, + .memory_addr_locrel_i32, + .memory_addr_tls_sleb64, + => { + const addend: i32, pos = readLeb(i32, bytes, pos); + wasm.object_relocations.appendAssumeCapacity(switch (sym.pointee) { + .data => |data| .{ + .tag = .fromType(tag), + .offset = offset, + .pointee = .{ .data = data }, + .addend = addend, + }, + .data_import => .{ + .tag = .fromTypeImport(tag), + .offset = offset, + .pointee = .{ .symbol_name = sym.name.unwrap().? }, + .addend = addend, + }, + else => unreachable, + }); + }, + .function_offset_i32, .function_offset_i64 => { + const addend: i32, pos = readLeb(i32, bytes, pos); + wasm.object_relocations.appendAssumeCapacity(switch (sym.pointee) { + .function => .{ + .tag = .fromType(tag), + .offset = offset, + .pointee = .{ .function = sym.pointee.function }, + .addend = addend, + }, + .function_import => .{ + .tag = .fromTypeImport(tag), + .offset = offset, + .pointee = .{ .symbol_name = sym.name.unwrap().? }, + .addend = addend, + }, + else => unreachable, + }); }, - .memory => .{ .memory = try readLimits(reader) }, - .global => val: { - parser.object.imported_globals_count += 1; - break :val .{ .global = .{ - .valtype = try readEnum(std.wasm.Valtype, reader), - .mutable = (try reader.readByte()) == 0x01, - } }; + .section_offset_i32 => { + const addend: i32, pos = readLeb(i32, bytes, pos); + wasm.object_relocations.appendAssumeCapacity(.{ + .tag = .section_offset_i32, + .offset = offset, + .pointee = .{ .section = sym.pointee.section }, + .addend = addend, + }); }, - .table => val: { - parser.object.imported_tables_count += 1; - break :val .{ .table = .{ - .reftype = try readEnum(std.wasm.RefType, reader), - .limits = try readLimits(reader), - } }; + .type_index_leb => { + wasm.object_relocations.appendAssumeCapacity(.{ + .tag = .type_index_leb, + .offset = offset, + .pointee = .{ .type_index = ss.func_types.items[index] }, + .addend = undefined, + }); + }, + .function_index_leb, + .function_index_i32, + .table_index_sleb, + .table_index_i32, + .table_index_sleb64, + .table_index_i64, + .table_index_rel_sleb, + .table_index_rel_sleb64, + => { + wasm.object_relocations.appendAssumeCapacity(switch (sym.pointee) { + .function => .{ + .tag = .fromType(tag), + .offset = offset, + .pointee = .{ .function = sym.pointee.function }, + .addend = undefined, + }, + .function_import => .{ + .tag = .fromTypeImport(tag), + .offset = offset, + .pointee = .{ .symbol_name = sym.name.unwrap().? }, + .addend = undefined, + }, + else => unreachable, + }); + }, + .global_index_leb, .global_index_i32 => { + wasm.object_relocations.appendAssumeCapacity(switch (sym.pointee) { + .global => .{ + .tag = .fromType(tag), + .offset = offset, + .pointee = .{ .global = sym.pointee.global }, + .addend = undefined, + }, + .global_import => .{ + .tag = .fromTypeImport(tag), + .offset = offset, + .pointee = .{ .symbol_name = sym.name.unwrap().? }, + .addend = undefined, + }, + else => unreachable, + }); }, - }; - import.* = .{ - .module_name = try wasm.internString(module_name), - .name = try wasm.internString(name), - .kind = kind_value, - }; - } - try assertEnd(reader); - }, - .function => { - for (try readVec(&parser.object.functions, reader, gpa)) |*func| { - func.* = .{ .type_index = try readLeb(u32, reader) }; - } - try assertEnd(reader); - }, - .table => { - for (try readVec(&parser.object.tables, reader, gpa)) |*table| { - table.* = .{ - .reftype = try readEnum(std.wasm.RefType, reader), - .limits = try readLimits(reader), - }; + .table_number_leb => { + wasm.object_relocations.appendAssumeCapacity(switch (sym.pointee) { + .table => .{ + .tag = .fromType(tag), + .offset = offset, + .pointee = .{ .table = sym.pointee.table }, + .addend = undefined, + }, + .table_import => .{ + .tag = .fromTypeImport(tag), + .offset = offset, + .pointee = .{ .symbol_name = sym.name.unwrap().? }, + .addend = undefined, + }, + else => unreachable, + }); + }, + .event_index_leb => return diags.failParse(path, "unsupported relocation: R_WASM_EVENT_INDEX_LEB", .{}), + } } - try assertEnd(reader); - }, - .memory => { - for (try readVec(&parser.object.memories, reader, gpa)) |*memory| { - memory.* = .{ .limits = try readLimits(reader) }; + + try wasm.object_relocations_table.putNoClobber(gpa, section, .{ + .off = @intCast(wasm.object_relocations.len - count), + .len = count, + }); + } else if (std.mem.eql(u8, section_name, "target_features")) { + opt_features, pos = try parseFeatures(wasm, bytes, pos, path); + } else if (std.mem.startsWith(u8, section_name, ".debug")) { + const debug_content = bytes[pos..section_end]; + pos = section_end; + + const data_off: u32 = @intCast(wasm.string_bytes.items.len); + try wasm.string_bytes.appendSlice(gpa, debug_content); + + try wasm.object_custom_segments.put(gpa, section_index, .{ + .payload = .{ + .off = @enumFromInt(data_off), + .len = @intCast(debug_content.len), + }, + .flags = .{}, + .section_name = try wasm.internString(section_name), + }); + } else { + pos = section_end; + } + }, + .type => { + const func_types_len, pos = readLeb(u32, bytes, pos); + for (try ss.func_types.addManyAsSlice(gpa, func_types_len)) |*func_type| { + if (bytes[pos] != std.wasm.function_type) return error.ExpectedFuncType; + pos += 1; + + const params, pos = readBytes(bytes, pos); + const returns, pos = readBytes(bytes, pos); + func_type.* = try wasm.addFuncType(.{ + .params = .fromString(try wasm.internString(params)), + .returns = .fromString(try wasm.internString(returns)), + }); + } + }, + .import => { + const imports_len, pos = readLeb(u32, bytes, pos); + for (0..imports_len) |_| { + const module_name, pos = readBytes(bytes, pos); + const name, pos = readBytes(bytes, pos); + const kind, pos = readEnum(std.wasm.ExternalKind, bytes, pos); + const interned_module_name = try wasm.internString(module_name); + const interned_name = try wasm.internString(name); + switch (kind) { + .function => { + const function, pos = readLeb(u32, bytes, pos); + try ss.func_imports.append(gpa, .{ + .module_name = interned_module_name, + .name = interned_name, + .function_index = @enumFromInt(function), + }); + }, + .memory => { + const limits, pos = readLimits(bytes, pos); + const gop = try wasm.object_memory_imports.getOrPut(gpa, interned_name); + if (gop.found_existing) { + if (gop.value_ptr.module_name != interned_module_name) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("memory '{s}' mismatching module names", .{name}); + gop.value_ptr.source_location.addNote(&err, "module '{s}' here", .{ + gop.value_ptr.module_name.slice(wasm), + }); + source_location.addNote(&err, "module '{s}' here", .{module_name}); + } + // TODO error for mismatching flags + gop.value_ptr.limits_min = @min(gop.value_ptr.limits_min, limits.min); + gop.value_ptr.limits_max = @max(gop.value_ptr.limits_max, limits.max); + } else { + gop.value_ptr.* = .{ + .module_name = interned_module_name, + .limits_min = limits.min, + .limits_max = limits.max, + .limits_has_max = limits.flags.has_max, + .limits_is_shared = limits.flags.is_shared, + .source_location = source_location, + }; + } + }, + .global => { + const valtype, pos = readEnum(std.wasm.Valtype, bytes, pos); + const mutable = bytes[pos] == 0x01; + pos += 1; + try ss.global_imports.append(gpa, .{ + .name = interned_name, + .valtype = valtype, + .mutable = mutable, + .module_name = interned_module_name, + }); + }, + .table => { + const ref_type, pos = readEnum(std.wasm.RefType, bytes, pos); + const limits, pos = readLimits(bytes, pos); + try ss.table_imports.append(gpa, .{ + .name = interned_name, + .module_name = interned_module_name, + .limits_min = limits.min, + .limits_max = limits.max, + .limits_has_max = limits.flags.has_max, + .limits_is_shared = limits.flags.is_shared, + .ref_type = ref_type, + }); + }, } - try assertEnd(reader); - }, - .global => { - for (try readVec(&parser.object.globals, reader, gpa)) |*global| { - global.* = .{ + } + }, + .function => { + const functions_len, pos = readLeb(u32, bytes, pos); + for (try ss.func_type_indexes.addManyAsSlice(gpa, functions_len)) |*func_type_index| { + const i, pos = readLeb(u32, bytes, pos); + func_type_index.* = @enumFromInt(i); + } + }, + .table => { + const tables_len, pos = readLeb(u32, bytes, pos); + for (try wasm.object_tables.addManyAsSlice(gpa, tables_len)) |*table| { + const ref_type, pos = readEnum(std.wasm.RefType, bytes, pos); + const limits, pos = readLimits(bytes, pos); + table.* = .{ + .name = .none, + .module_name = .none, + .flags = .{ + .ref_type = .from(ref_type), + .limits_has_max = limits.flags.has_max, + .limits_is_shared = limits.flags.is_shared, + }, + .limits_min = limits.min, + .limits_max = limits.max, + }; + } + }, + .memory => { + const memories_len, pos = readLeb(u32, bytes, pos); + for (try wasm.object_memories.addManyAsSlice(gpa, memories_len)) |*memory| { + const limits, pos = readLimits(bytes, pos); + memory.* = .{ + .name = .none, + .flags = .{ + .limits_has_max = limits.flags.has_max, + .limits_is_shared = limits.flags.is_shared, + }, + .limits_min = limits.min, + .limits_max = limits.max, + }; + } + }, + .global => { + if (global_section_index != null) + return diags.failParse(path, "object has more than one global section", .{}); + global_section_index = section_index; + + const section_start = pos; + const globals_len, pos = readLeb(u32, bytes, pos); + for (try wasm.object_globals.addManyAsSlice(gpa, globals_len)) |*global| { + const valtype, pos = readEnum(std.wasm.Valtype, bytes, pos); + const mutable = bytes[pos] == 0x01; + pos += 1; + const init_start = pos; + const expr, pos = try readInit(wasm, bytes, pos); + global.* = .{ + .name = .none, + .flags = .{ .global_type = .{ - .valtype = try readEnum(std.wasm.Valtype, reader), - .mutable = (try reader.readByte()) == 0x01, + .valtype = .from(valtype), + .mutable = mutable, }, - .init = try readInit(reader), - }; - } - try assertEnd(reader); - }, - .@"export" => { - for (try readVec(&parser.object.exports, reader, gpa)) |*exp| { - const name_len = try readLeb(u32, reader); - const name = try gpa.alloc(u8, name_len); - defer gpa.free(name); - try reader.readNoEof(name); - exp.* = .{ - .name = try wasm.internString(name), - .kind = try readEnum(std.wasm.ExternalKind, reader), - .index = try readLeb(u32, reader), - }; + }, + .expr = expr, + .object_index = object_index, + .offset = @intCast(init_start - section_start), + .size = @intCast(pos - init_start), + }; + } + }, + .@"export" => { + const exports_len, pos = readLeb(u32, bytes, pos); + // Read into scratch space, and then later add this data as if + // it were extra symbol table entries, but allow merging with + // existing symbol table data if the name matches. + for (try ss.exports.addManyAsSlice(gpa, exports_len)) |*exp| { + const name, pos = readBytes(bytes, pos); + const kind: std.wasm.ExternalKind = @enumFromInt(bytes[pos]); + pos += 1; + const index, pos = readLeb(u32, bytes, pos); + exp.* = .{ + .name = try wasm.internString(name), + .pointee = switch (kind) { + .function => .{ .function = @enumFromInt(functions_start + (index - ss.func_imports.items.len)) }, + .table => .{ .table = @enumFromInt(tables_start + (index - ss.table_imports.items.len)) }, + .memory => .{ .memory = @enumFromInt(memories_start + index) }, + .global => .{ .global = @enumFromInt(globals_start + (index - ss.global_imports.items.len)) }, + }, + }; + } + }, + .start => { + const index, pos = readLeb(u32, bytes, pos); + start_function = @enumFromInt(functions_start + index); + }, + .element => { + log.warn("unimplemented: element section in {} {?s}", .{ path, archive_member_name }); + pos = section_end; + }, + .code => { + if (code_section_index != null) + return diags.failParse(path, "object has more than one code section", .{}); + code_section_index = section_index; + + const start = pos; + const count, pos = readLeb(u32, bytes, pos); + for (try wasm.object_functions.addManyAsSlice(gpa, count)) |*elem| { + const code_len, pos = readLeb(u32, bytes, pos); + const offset: u32 = @intCast(pos - start); + const payload = try wasm.addRelocatableDataPayload(bytes[pos..][0..code_len]); + pos += code_len; + elem.* = .{ + .flags = .{}, // populated from symbol table + .name = .none, // populated from symbol table + .type_index = undefined, // populated from func_types + .code = payload, + .offset = offset, + .object_index = object_index, + }; + } + }, + .data => { + if (data_section_index != null) + return diags.failParse(path, "object has more than one data section", .{}); + data_section_index = section_index; + + const section_start = pos; + const count, pos = readLeb(u32, bytes, pos); + for (try wasm.object_data_segments.addManyAsSlice(gpa, count)) |*elem| { + const flags, pos = readEnum(DataSegmentFlags, bytes, pos); + if (flags == .active_memidx) { + const memidx, pos = readLeb(u32, bytes, pos); + if (memidx != 0) return diags.failParse(path, "data section uses mem index {d}", .{memidx}); } - try assertEnd(reader); - }, - .start => { - parser.object.start = try readLeb(u32, reader); - try assertEnd(reader); - }, - .element => { - for (try readVec(&parser.object.elements, reader, gpa)) |*elem| { - elem.table_index = try readLeb(u32, reader); - elem.offset = try readInit(reader); + //const expr, pos = if (flags != .passive) try readInit(wasm, bytes, pos) else .{ .none, pos }; + if (flags != .passive) pos = try skipInit(bytes, pos); + const data_len, pos = readLeb(u32, bytes, pos); + const segment_start = pos; + const payload = try wasm.addRelocatableDataPayload(bytes[pos..][0..data_len]); + pos += data_len; + elem.* = .{ + .payload = payload, + .name = .none, // Populated from segment_info + .flags = .{ + .is_passive = flags == .passive, + }, // Remainder populated from segment_info + .offset = @intCast(segment_start - section_start), + .object_index = object_index, + }; + } + }, + else => pos = section_end, + } + if (pos != section_end) return error.MalformedSection; + } + if (!saw_linking_section) return error.MissingLinkingSection; - for (try readVec(&elem.func_indexes, reader, gpa)) |*idx| { - idx.* = try readLeb(u32, reader); - } - } - try assertEnd(reader); + const target_features = comp.root_mod.resolved_target.result.cpu.features; + + if (has_tls) { + if (!std.Target.wasm.featureSetHas(target_features, .atomics)) + return diags.failParse(path, "object has TLS segment but target CPU feature atomics is disabled", .{}); + if (!std.Target.wasm.featureSetHas(target_features, .bulk_memory)) + return diags.failParse(path, "object has TLS segment but target CPU feature bulk_memory is disabled", .{}); + } + + const features = opt_features orelse return error.MissingFeatures; + for (features.slice(wasm)) |feat| { + log.debug("feature: {s}{s}", .{ @tagName(feat.prefix), @tagName(feat.tag) }); + switch (feat.prefix) { + .invalid => unreachable, + .@"-" => switch (feat.tag) { + .@"shared-mem" => if (comp.config.shared_memory) { + return diags.failParse(path, "object forbids shared-mem but compilation enables it", .{}); }, - .code => { - const start = reader.context.bytes_left; - var index: u32 = 0; - const count = try readLeb(u32, reader); - const imported_function_count = parser.object.imported_functions_count; - var relocatable_data = try std.ArrayList(RelocatableData).initCapacity(gpa, count); - defer relocatable_data.deinit(); - while (index < count) : (index += 1) { - const code_len = try readLeb(u32, reader); - const offset = @as(u32, @intCast(start - reader.context.bytes_left)); - const data = try gpa.alloc(u8, code_len); - errdefer gpa.free(data); - try reader.readNoEof(data); - relocatable_data.appendAssumeCapacity(.{ - .type = .code, - .data = data.ptr, - .size = code_len, - .index = imported_function_count + index, - .offset = offset, - .section_index = section_index, - }); + else => { + const f = feat.tag.toCpuFeature().?; + if (std.Target.wasm.featureSetHas(target_features, f)) { + return diags.failParse( + path, + "object forbids {s} but specified target features include {s}", + .{ @tagName(feat.tag), @tagName(f) }, + ); } - try parser.object.relocatable_data.put(gpa, .code, try relocatable_data.toOwnedSlice()); }, - .data => { - const start = reader.context.bytes_left; - var index: u32 = 0; - const count = try readLeb(u32, reader); - var relocatable_data = try std.ArrayList(RelocatableData).initCapacity(gpa, count); - defer relocatable_data.deinit(); - while (index < count) : (index += 1) { - const flags = try readLeb(u32, reader); - const data_offset = try readInit(reader); - _ = flags; // TODO: Do we need to check flags to detect passive/active memory? - _ = data_offset; - const data_len = try readLeb(u32, reader); - const offset = @as(u32, @intCast(start - reader.context.bytes_left)); - const data = try gpa.alloc(u8, data_len); - errdefer gpa.free(data); - try reader.readNoEof(data); - relocatable_data.appendAssumeCapacity(.{ - .type = .data, - .data = data.ptr, - .size = data_len, - .index = index, - .offset = offset, - .section_index = section_index, - }); + }, + .@"+", .@"=" => switch (feat.tag) { + .@"shared-mem" => if (!comp.config.shared_memory) { + return diags.failParse(path, "object requires shared-mem but compilation disables it", .{}); + }, + else => { + const f = feat.tag.toCpuFeature().?; + if (!std.Target.wasm.featureSetHas(target_features, f)) { + return diags.failParse( + path, + "object requires {s} but specified target features exclude {s}", + .{ @tagName(feat.tag), @tagName(f) }, + ); } - try parser.object.relocatable_data.put(gpa, .data, try relocatable_data.toOwnedSlice()); }, - else => try parser.reader.reader().skipBytes(len, .{}), - } - } else |err| switch (err) { - error.EndOfStream => {}, // finished parsing the file - else => |e| return e, - } - if (!saw_linking_section) return error.MissingLinkingSection; - } - - /// Based on the "features" custom section, parses it into a list of - /// features that tell the linker what features were enabled and may be mandatory - /// to be able to link. - /// Logs an info message when an undefined feature is detected. - fn parseFeatures(parser: *Parser, gpa: Allocator) !void { - const diags = &parser.wasm.base.comp.link_diags; - const reader = parser.reader.reader(); - for (try readVec(&parser.object.features, reader, gpa)) |*feature| { - const prefix = try readEnum(Wasm.Feature.Prefix, reader); - const name_len = try leb.readUleb128(u32, reader); - const name = try gpa.alloc(u8, name_len); - defer gpa.free(name); - try reader.readNoEof(name); - - const tag = Wasm.known_features.get(name) orelse { - return diags.failParse(parser.object.path, "object file contains unknown feature: {s}", .{name}); - }; - feature.* = .{ - .prefix = prefix, - .tag = tag, - }; - } - } - - /// Parses a "reloc" custom section into a list of relocations. - /// The relocations are mapped into `Object` where the key is the section - /// they apply to. - fn parseRelocations(parser: *Parser, gpa: Allocator) !void { - const reader = parser.reader.reader(); - const section = try leb.readUleb128(u32, reader); - const count = try leb.readUleb128(u32, reader); - const relocations = try gpa.alloc(Wasm.Relocation, count); - errdefer gpa.free(relocations); - - log.debug("Found {d} relocations for section ({d})", .{ - count, - section, - }); - - for (relocations) |*relocation| { - const rel_type = try reader.readByte(); - const rel_type_enum = std.meta.intToEnum(Wasm.Relocation.RelocationType, rel_type) catch return error.MalformedSection; - relocation.* = .{ - .relocation_type = rel_type_enum, - .offset = try leb.readUleb128(u32, reader), - .index = try leb.readUleb128(u32, reader), - .addend = if (rel_type_enum.addendIsPresent()) try leb.readIleb128(i32, reader) else 0, - }; - log.debug("Found relocation: type({s}) offset({d}) index({d}) addend({?d})", .{ - @tagName(relocation.relocation_type), - relocation.offset, - relocation.index, - relocation.addend, - }); + }, } - - try parser.object.relocations.putNoClobber(gpa, section, relocations); } - /// Parses the "linking" custom section. Versions that are not - /// supported will be an error. `payload_size` is required to be able - /// to calculate the subsections we need to parse, as that data is not - /// available within the section itparser. - fn parseMetadata(parser: *Parser, gpa: Allocator, payload_size: usize) !void { - var limited = std.io.limitedReader(parser.reader.reader(), payload_size); - const limited_reader = limited.reader(); - - const version = try leb.readUleb128(u32, limited_reader); - log.debug("Link meta data version: {d}", .{version}); - if (version != 2) return error.UnsupportedVersion; - - while (limited.bytes_left > 0) { - try parser.parseSubsection(gpa, limited_reader); - } + // Apply function type information. + for (ss.func_type_indexes.items, wasm.object_functions.items[functions_start..]) |func_type, *func| { + func.type_index = func_type.ptr(ss).*; } - /// Parses a `spec.Subsection`. - /// The `reader` param for this is to provide a `LimitedReader`, which allows - /// us to only read until a max length. - /// - /// `parser` is used to provide access to other sections that may be needed, - /// such as access to the `import` section to find the name of a symbol. - fn parseSubsection(parser: *Parser, gpa: Allocator, reader: anytype) !void { - const wasm = parser.wasm; - const sub_type = try leb.readUleb128(u8, reader); - log.debug("Found subsection: {s}", .{@tagName(@as(Wasm.SubsectionType, @enumFromInt(sub_type)))}); - const payload_len = try leb.readUleb128(u32, reader); - if (payload_len == 0) return; - - var limited = std.io.limitedReader(reader, payload_len); - const limited_reader = limited.reader(); - - // every subsection contains a 'count' field - const count = try leb.readUleb128(u32, limited_reader); - - switch (@as(Wasm.SubsectionType, @enumFromInt(sub_type))) { - .WASM_SEGMENT_INFO => { - const segments = try gpa.alloc(Wasm.NamedSegment, count); - errdefer gpa.free(segments); - for (segments) |*segment| { - const name_len = try leb.readUleb128(u32, reader); - const name = try gpa.alloc(u8, name_len); - errdefer gpa.free(name); - try reader.readNoEof(name); - segment.* = .{ - .name = name, - .alignment = @enumFromInt(try leb.readUleb128(u32, reader)), - .flags = try leb.readUleb128(u32, reader), - }; - log.debug("Found segment: {s} align({d}) flags({b})", .{ - segment.name, - segment.alignment, - segment.flags, + // Apply symbol table information. + for (ss.symbol_table.items) |symbol| switch (symbol.pointee) { + .function_import => |index| { + const ptr = index.ptr(ss); + const name = symbol.name.unwrap() orelse ptr.name; + if (symbol.flags.binding == .local) { + diags.addParseError(path, "local symbol '{s}' references import", .{name.slice(wasm)}); + continue; + } + const gop = try wasm.object_function_imports.getOrPut(gpa, name); + const fn_ty_index = ptr.function_index.ptr(ss).*; + if (gop.found_existing) { + if (gop.value_ptr.type != fn_ty_index) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching function signatures", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "imported as {} here", .{ + gop.value_ptr.type.fmt(wasm), }); - - // support legacy object files that specified being TLS by the name instead of the TLS flag. - if (!segment.isTLS() and (std.mem.startsWith(u8, segment.name, ".tdata") or std.mem.startsWith(u8, segment.name, ".tbss"))) { - // set the flag so we can simply check for the flag in the rest of the linker. - segment.flags |= @intFromEnum(Wasm.NamedSegment.Flags.WASM_SEG_FLAG_TLS); + source_location.addNote(&err, "imported as {} here", .{fn_ty_index.fmt(wasm)}); + continue; + } + if (gop.value_ptr.module_name != ptr.module_name.toOptional()) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching module names", .{name.slice(wasm)}); + if (gop.value_ptr.module_name.slice(wasm)) |module_name| { + gop.value_ptr.source_location.addNote(&err, "module '{s}' here", .{module_name}); + } else { + gop.value_ptr.source_location.addNote(&err, "no module here", .{}); } + source_location.addNote(&err, "module '{s}' here", .{ptr.module_name.slice(wasm)}); + continue; } - parser.object.segment_info = segments; - }, - .WASM_INIT_FUNCS => { - const funcs = try gpa.alloc(Wasm.InitFunc, count); - errdefer gpa.free(funcs); - for (funcs) |*func| { - func.* = .{ - .priority = try leb.readUleb128(u32, reader), - .symbol_index = try leb.readUleb128(u32, reader), - }; - log.debug("Found function - prio: {d}, index: {d}", .{ func.priority, func.symbol_index }); + if (gop.value_ptr.name != ptr.name) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching import names", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "imported as '{s}' here", .{gop.value_ptr.name.slice(wasm)}); + source_location.addNote(&err, "imported as '{s}' here", .{ptr.name.slice(wasm)}); + continue; } - parser.object.init_funcs = funcs; - }, - .WASM_COMDAT_INFO => { - const comdats = try gpa.alloc(Wasm.Comdat, count); - errdefer gpa.free(comdats); - for (comdats) |*comdat| { - const name_len = try leb.readUleb128(u32, reader); - const name = try gpa.alloc(u8, name_len); - errdefer gpa.free(name); - try reader.readNoEof(name); - - const flags = try leb.readUleb128(u32, reader); - if (flags != 0) { - return error.UnexpectedValue; - } - - const symbol_count = try leb.readUleb128(u32, reader); - const symbols = try gpa.alloc(Wasm.ComdatSym, symbol_count); - errdefer gpa.free(symbols); - for (symbols) |*symbol| { - symbol.* = .{ - .kind = @as(Wasm.ComdatSym.Type, @enumFromInt(try leb.readUleb128(u8, reader))), - .index = try leb.readUleb128(u32, reader), - }; + } else { + gop.value_ptr.* = .{ + .flags = symbol.flags, + .module_name = ptr.module_name.toOptional(), + .name = ptr.name, + .source_location = source_location, + .resolution = .unresolved, + .type = fn_ty_index, + }; + } + }, + .global_import => |index| { + const ptr = index.ptr(ss); + const name = symbol.name.unwrap() orelse ptr.name; + if (symbol.flags.binding == .local) { + diags.addParseError(path, "local symbol '{s}' references import", .{name.slice(wasm)}); + continue; + } + const gop = try wasm.object_global_imports.getOrPut(gpa, name); + if (gop.found_existing) { + const existing_ty = gop.value_ptr.type(); + if (ptr.valtype != existing_ty.valtype) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching global types", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "type {s} here", .{@tagName(existing_ty.valtype)}); + source_location.addNote(&err, "type {s} here", .{@tagName(ptr.valtype)}); + continue; + } + if (ptr.mutable != existing_ty.mutable) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching global mutability", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "{s} here", .{ + if (existing_ty.mutable) "mutable" else "not mutable", + }); + source_location.addNote(&err, "{s} here", .{ + if (ptr.mutable) "mutable" else "not mutable", + }); + continue; + } + if (gop.value_ptr.module_name != ptr.module_name.toOptional()) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching module names", .{name.slice(wasm)}); + if (gop.value_ptr.module_name.slice(wasm)) |module_name| { + gop.value_ptr.source_location.addNote(&err, "module '{s}' here", .{module_name}); + } else { + gop.value_ptr.source_location.addNote(&err, "no module here", .{}); } - - comdat.* = .{ - .name = name, - .flags = flags, - .symbols = symbols, - }; + source_location.addNote(&err, "module '{s}' here", .{ptr.module_name.slice(wasm)}); + continue; } - - parser.object.comdat_info = comdats; - }, - .WASM_SYMBOL_TABLE => { - var symbols = try std.ArrayList(Symbol).initCapacity(gpa, count); - - var i: usize = 0; - while (i < count) : (i += 1) { - const symbol = symbols.addOneAssumeCapacity(); - symbol.* = try parser.parseSymbol(gpa, reader); - log.debug("Found symbol: type({s}) name({s}) flags(0b{b:0>8})", .{ - @tagName(symbol.tag), - wasm.stringSlice(symbol.name), - symbol.flags, + if (gop.value_ptr.name != ptr.name) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching import names", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "imported as '{s}' here", .{gop.value_ptr.name.slice(wasm)}); + source_location.addNote(&err, "imported as '{s}' here", .{ptr.name.slice(wasm)}); + continue; + } + } else { + gop.value_ptr.* = .{ + .flags = symbol.flags, + .module_name = ptr.module_name.toOptional(), + .name = ptr.name, + .source_location = source_location, + .resolution = .unresolved, + }; + gop.value_ptr.flags.global_type = .{ + .valtype = .from(ptr.valtype), + .mutable = ptr.mutable, + }; + } + }, + .table_import => |index| { + const ptr = index.ptr(ss); + const name = symbol.name.unwrap() orelse ptr.name; + if (symbol.flags.binding == .local) { + diags.addParseError(path, "local symbol '{s}' references import", .{name.slice(wasm)}); + continue; + } + const gop = try wasm.object_table_imports.getOrPut(gpa, name); + if (gop.found_existing) { + const existing_reftype = gop.value_ptr.flags.ref_type.to(); + if (ptr.ref_type != existing_reftype) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching table reftypes", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "{s} here", .{@tagName(existing_reftype)}); + source_location.addNote(&err, "{s} here", .{@tagName(ptr.ref_type)}); + continue; + } + if (gop.value_ptr.module_name != ptr.module_name) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching module names", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "module '{s}' here", .{ + gop.value_ptr.module_name.slice(wasm), }); + source_location.addNote(&err, "module '{s}' here", .{ptr.module_name.slice(wasm)}); + continue; } - - // we found all symbols, check for indirect function table - // in case of an MVP object file - if (try parser.object.checkLegacyIndirectFunctionTable(parser.wasm)) |symbol| { - try symbols.append(symbol); - log.debug("Found legacy indirect function table. Created symbol", .{}); + if (gop.value_ptr.name != ptr.name) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching import names", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "imported as '{s}' here", .{gop.value_ptr.name.slice(wasm)}); + source_location.addNote(&err, "imported as '{s}' here", .{ptr.name.slice(wasm)}); + continue; } - - // Not all debug sections may be represented by a symbol, for those sections - // we manually create a symbol. - if (parser.object.relocatable_data.get(.custom)) |custom_sections| { - for (custom_sections) |*data| { - if (!data.represented) { - const name = wasm.castToString(data.index); - try symbols.append(.{ - .name = name, - .flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL), - .tag = .section, - .virtual_address = 0, - .index = data.section_index, - }); - data.represented = true; - log.debug("Created synthetic custom section symbol for '{s}'", .{ - wasm.stringSlice(name), - }); - } - } + if (symbol.flags.binding == .strong) gop.value_ptr.flags.binding = .strong; + if (!symbol.flags.visibility_hidden) gop.value_ptr.flags.visibility_hidden = false; + if (symbol.flags.no_strip) gop.value_ptr.flags.no_strip = true; + } else { + gop.value_ptr.* = .{ + .flags = symbol.flags, + .module_name = ptr.module_name, + .name = ptr.name, + .source_location = source_location, + .resolution = .unresolved, + .limits_min = ptr.limits_min, + .limits_max = ptr.limits_max, + }; + gop.value_ptr.flags.limits_has_max = ptr.limits_has_max; + gop.value_ptr.flags.limits_is_shared = ptr.limits_is_shared; + gop.value_ptr.flags.ref_type = .from(ptr.ref_type); + } + }, + .data_import => { + const name = symbol.name.unwrap().?; + if (symbol.flags.binding == .local) { + diags.addParseError(path, "local symbol '{s}' references import", .{name.slice(wasm)}); + continue; + } + const gop = try wasm.object_data_imports.getOrPut(gpa, name); + if (!gop.found_existing) gop.value_ptr.* = .{ + .flags = symbol.flags, + .source_location = source_location, + .resolution = .unresolved, + }; + }, + .function => |index| { + assert(!symbol.flags.undefined); + const ptr = index.ptr(wasm); + ptr.name = symbol.name; + ptr.flags = symbol.flags; + if (symbol.flags.binding == .local) continue; // No participation in symbol resolution. + const name = symbol.name.unwrap().?; + const gop = try wasm.object_function_imports.getOrPut(gpa, name); + if (gop.found_existing) { + if (gop.value_ptr.type != ptr.type_index) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("function signature mismatch: {s}", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "exported as {} here", .{ + ptr.type_index.fmt(wasm), + }); + const word = if (gop.value_ptr.resolution == .unresolved) "imported" else "exported"; + source_location.addNote(&err, "{s} as {} here", .{ word, gop.value_ptr.type.fmt(wasm) }); + continue; } + if (gop.value_ptr.resolution == .unresolved or gop.value_ptr.flags.binding == .weak) { + // Intentional: if they're both weak, take the last one. + gop.value_ptr.source_location = source_location; + gop.value_ptr.module_name = host_name; + gop.value_ptr.resolution = .fromObjectFunction(wasm, index); + gop.value_ptr.flags = symbol.flags; + continue; + } + if (ptr.flags.binding == .weak) { + // Keep the existing one. + continue; + } + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol collision: {s}", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "exported as {} here", .{ptr.type_index.fmt(wasm)}); + source_location.addNote(&err, "exported as {} here", .{gop.value_ptr.type.fmt(wasm)}); + continue; + } else { + gop.value_ptr.* = .{ + .flags = symbol.flags, + .module_name = host_name, + .name = name, + .source_location = source_location, + .resolution = .fromObjectFunction(wasm, index), + .type = ptr.type_index, + }; + } + }, + .global => |index| { + assert(!symbol.flags.undefined); + const ptr = index.ptr(wasm); + ptr.name = symbol.name; + ptr.flags = symbol.flags; + if (symbol.flags.binding == .local) continue; // No participation in symbol resolution. + const name = symbol.name.unwrap().?; + const new_ty = ptr.type(); + const gop = try wasm.object_global_imports.getOrPut(gpa, name); + if (gop.found_existing) { + const existing_ty = gop.value_ptr.type(); + if (new_ty.valtype != existing_ty.valtype) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching global types", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "type {s} here", .{@tagName(existing_ty.valtype)}); + source_location.addNote(&err, "type {s} here", .{@tagName(new_ty.valtype)}); + continue; + } + if (new_ty.mutable != existing_ty.mutable) { + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol '{s}' mismatching global mutability", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "{s} here", .{ + if (existing_ty.mutable) "mutable" else "not mutable", + }); + source_location.addNote(&err, "{s} here", .{ + if (new_ty.mutable) "mutable" else "not mutable", + }); + continue; + } + if (gop.value_ptr.resolution == .unresolved or gop.value_ptr.flags.binding == .weak) { + // Intentional: if they're both weak, take the last one. + gop.value_ptr.source_location = source_location; + gop.value_ptr.module_name = host_name; + gop.value_ptr.resolution = .fromObjectGlobal(wasm, index); + gop.value_ptr.flags = symbol.flags; + continue; + } + if (ptr.flags.binding == .weak) { + // Keep the existing one. + continue; + } + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol collision: {s}", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "exported as {s} here", .{@tagName(existing_ty.valtype)}); + source_location.addNote(&err, "exported as {s} here", .{@tagName(new_ty.valtype)}); + continue; + } else { + gop.value_ptr.* = .{ + .flags = symbol.flags, + .module_name = .none, + .name = name, + .source_location = source_location, + .resolution = .fromObjectGlobal(wasm, index), + }; + gop.value_ptr.flags.global_type = .{ + .valtype = .from(new_ty.valtype), + .mutable = new_ty.mutable, + }; + } + }, + .table => |i| { + assert(!symbol.flags.undefined); + const ptr = i.ptr(wasm); + ptr.name = symbol.name; + ptr.flags = symbol.flags; + }, + .data => |index| { + assert(!symbol.flags.undefined); + const ptr = index.ptr(wasm); + const name = ptr.name; + assert(name.toOptional() == symbol.name); + ptr.flags = symbol.flags; + if (symbol.flags.binding == .local) continue; // No participation in symbol resolution. + const gop = try wasm.object_data_imports.getOrPut(gpa, name); + if (gop.found_existing) { + if (gop.value_ptr.resolution == .unresolved or gop.value_ptr.flags.binding == .weak) { + // Intentional: if they're both weak, take the last one. + gop.value_ptr.source_location = source_location; + gop.value_ptr.resolution = .fromObjectDataIndex(wasm, index); + gop.value_ptr.flags = symbol.flags; + continue; + } + if (ptr.flags.binding == .weak) { + // Keep the existing one. + continue; + } + var err = try diags.addErrorWithNotes(2); + try err.addMsg("symbol collision: {s}", .{name.slice(wasm)}); + gop.value_ptr.source_location.addNote(&err, "exported here", .{}); + source_location.addNote(&err, "exported here", .{}); + continue; + } else { + gop.value_ptr.* = .{ + .flags = symbol.flags, + .source_location = source_location, + .resolution = .fromObjectDataIndex(wasm, index), + }; + } + }, + .section => |i| { + // Name is provided by the section directly; symbol table does not have it. + //const ptr = i.ptr(wasm); + //ptr.flags = symbol.flags; + _ = i; + if (symbol.flags.undefined and symbol.flags.binding == .local) { + const name = symbol.name.slice(wasm).?; + diags.addParseError(path, "local symbol '{s}' references import", .{name}); + } + }, + }; - parser.object.symtable = try symbols.toOwnedSlice(); + // Apply export section info. This is done after the symbol table above so + // that the symbol table can take precedence, overriding the export name. + for (ss.exports.items) |*exp| { + switch (exp.pointee) { + inline .function, .table, .memory, .global => |index| { + const ptr = index.ptr(wasm); + ptr.name = exp.name.toOptional(); + ptr.flags.exported = true; }, } } - /// Parses the symbol information based on its kind, - /// requires access to `Object` to find the name of a symbol when it's - /// an import and flag `WASM_SYM_EXPLICIT_NAME` is not set. - fn parseSymbol(parser: *Parser, gpa: Allocator, reader: anytype) !Symbol { - const wasm = parser.wasm; - const tag: Symbol.Tag = @enumFromInt(try leb.readUleb128(u8, reader)); - const flags = try leb.readUleb128(u32, reader); - var symbol: Symbol = .{ - .flags = flags, - .tag = tag, - .name = undefined, - .index = undefined, - .virtual_address = undefined, + // Apply segment_info. + const data_segments = wasm.object_data_segments.items[data_segment_start..]; + if (data_segments.len != ss.segment_info.items.len) { + return diags.failParse(path, "expected {d} segment_info entries; found {d}", .{ + data_segments.len, ss.segment_info.items.len, + }); + } + for (data_segments, ss.segment_info.items) |*data, info| { + data.name = info.name.toOptional(); + data.flags = .{ + .is_passive = data.flags.is_passive, + .strings = info.flags.strings, + .tls = info.flags.tls, + .retain = info.flags.retain, + .alignment = info.flags.alignment, }; + } - switch (tag) { - .data => { - const name_len = try leb.readUleb128(u32, reader); - const name = try gpa.alloc(u8, name_len); - defer gpa.free(name); - try reader.readNoEof(name); - symbol.name = try wasm.internString(name); - - // Data symbols only have the following fields if the symbol is defined - if (symbol.isDefined()) { - symbol.index = try leb.readUleb128(u32, reader); - // @TODO: We should verify those values - _ = try leb.readUleb128(u32, reader); - _ = try leb.readUleb128(u32, reader); - } - }, - .section => { - symbol.index = try leb.readUleb128(u32, reader); - const section_data = parser.object.relocatable_data.get(.custom).?; - for (section_data) |*data| { - if (data.section_index == symbol.index) { - symbol.name = wasm.castToString(data.index); - data.represented = true; - break; - } - } - }, - else => { - symbol.index = try leb.readUleb128(u32, reader); - const is_undefined = symbol.isUndefined(); - const explicit_name = symbol.hasFlag(.WASM_SYM_EXPLICIT_NAME); - symbol.name = if (!is_undefined or (is_undefined and explicit_name)) name: { - const name_len = try leb.readUleb128(u32, reader); - const name = try gpa.alloc(u8, name_len); - defer gpa.free(name); - try reader.readNoEof(name); - break :name try wasm.internString(name); - } else parser.object.findImport(symbol).name; - }, + // Check for indirect function table in case of an MVP object file. + legacy_indirect_function_table: { + // If there is a symbol for each import table, this is not a legacy object file. + if (ss.table_imports.items.len == table_import_symbol_count) break :legacy_indirect_function_table; + if (table_import_symbol_count != 0) { + return diags.failParse(path, "expected a table entry symbol for each of the {d} table(s), but instead got {d} symbols.", .{ + ss.table_imports.items.len, table_import_symbol_count, + }); } - return symbol; + // MVP object files cannot have any table definitions, only imports + // (for the indirect function table). + const tables = wasm.object_tables.items[tables_start..]; + if (tables.len > 0) { + return diags.failParse(path, "table definition without representing table symbols", .{}); + } + if (ss.table_imports.items.len != 1) { + return diags.failParse(path, "found more than one table import, but no representing table symbols", .{}); + } + const table_import_name = ss.table_imports.items[0].name; + if (table_import_name != wasm.preloaded_strings.__indirect_function_table) { + return diags.failParse(path, "non-indirect function table import '{s}' is missing a corresponding symbol", .{ + table_import_name.slice(wasm), + }); + } + const ptr = wasm.object_table_imports.getPtr(table_import_name).?; + ptr.flags = .{ + .undefined = true, + .no_strip = true, + }; + } + + for (wasm.object_init_funcs.items[init_funcs_start..]) |init_func| { + const func = init_func.function_index.ptr(wasm); + const params = func.type_index.ptr(wasm).params.slice(wasm); + if (params.len != 0) diags.addError("constructor function '{s}' has non-empty parameter list", .{ + func.name.slice(wasm).?, + }); } -}; -/// First reads the count from the reader and then allocate -/// a slice of ptr child's element type. -fn readVec(ptr: anytype, reader: anytype, gpa: Allocator) ![]ElementType(@TypeOf(ptr)) { - const len = try readLeb(u32, reader); - const slice = try gpa.alloc(ElementType(@TypeOf(ptr)), len); - ptr.* = slice; - return slice; + const functions_len: u32 = @intCast(wasm.object_functions.items.len - functions_start); + if (functions_len > 0 and code_section_index == null) + return diags.failParse(path, "code section missing ({d} functions)", .{functions_len}); + + return .{ + .version = version, + .path = path, + .archive_member_name = try wasm.internOptionalString(archive_member_name), + .start_function = start_function, + .features = features, + .functions = .{ + .off = functions_start, + .len = functions_len, + }, + .function_imports = .{ + .off = function_imports_start, + .len = @intCast(wasm.object_function_imports.entries.len - function_imports_start), + }, + .global_imports = .{ + .off = global_imports_start, + .len = @intCast(wasm.object_global_imports.entries.len - global_imports_start), + }, + .table_imports = .{ + .off = table_imports_start, + .len = @intCast(wasm.object_table_imports.entries.len - table_imports_start), + }, + .data_imports = .{ + .off = data_imports_start, + .len = @intCast(wasm.object_data_imports.entries.len - data_imports_start), + }, + .init_funcs = .{ + .off = init_funcs_start, + .len = @intCast(wasm.object_init_funcs.items.len - init_funcs_start), + }, + .comdats = .{ + .off = comdats_start, + .len = @intCast(wasm.object_comdats.items.len - comdats_start), + }, + .custom_segments = .{ + .off = custom_segment_start, + .len = @intCast(wasm.object_custom_segments.entries.len - custom_segment_start), + }, + .code_section_index = code_section_index, + .global_section_index = global_section_index, + .data_section_index = data_section_index, + .is_included = must_link, + }; } -fn ElementType(comptime ptr: type) type { - return meta.Elem(meta.Child(ptr)); +/// Based on the "features" custom section, parses it into a list of +/// features that tell the linker what features were enabled and may be mandatory +/// to be able to link. +fn parseFeatures( + wasm: *Wasm, + bytes: []const u8, + start_pos: usize, + path: Path, +) error{ OutOfMemory, LinkFailure }!struct { Wasm.Feature.Set, usize } { + const gpa = wasm.base.comp.gpa; + const diags = &wasm.base.comp.link_diags; + const features_len, var pos = readLeb(u32, bytes, start_pos); + // This temporary allocation could be avoided by using the string_bytes buffer as a scratch space. + const feature_buffer = try gpa.alloc(Wasm.Feature, features_len); + defer gpa.free(feature_buffer); + for (feature_buffer) |*feature| { + const prefix: Wasm.Feature.Prefix = switch (bytes[pos]) { + '-' => .@"-", + '+' => .@"+", + '=' => .@"=", + else => |b| return diags.failParse(path, "invalid feature prefix: 0x{x}", .{b}), + }; + pos += 1; + const name, pos = readBytes(bytes, pos); + const tag = std.meta.stringToEnum(Wasm.Feature.Tag, name) orelse { + return diags.failParse(path, "unrecognized wasm feature in object: {s}", .{name}); + }; + feature.* = .{ + .prefix = prefix, + .tag = tag, + }; + } + std.mem.sortUnstable(Wasm.Feature, feature_buffer, {}, Wasm.Feature.lessThan); + + return .{ + .fromString(try wasm.internString(@ptrCast(feature_buffer))), + pos, + }; } -/// Uses either `readIleb128` or `readUleb128` depending on the -/// signedness of the given type `T`. -/// Asserts `T` is an integer. -fn readLeb(comptime T: type, reader: anytype) !T { - return switch (@typeInfo(T).int.signedness) { - .signed => try leb.readIleb128(T, reader), - .unsigned => try leb.readUleb128(T, reader), +fn readLeb(comptime T: type, bytes: []const u8, pos: usize) struct { T, usize } { + var fbr = std.io.fixedBufferStream(bytes[pos..]); + return .{ + switch (@typeInfo(T).int.signedness) { + .signed => std.leb.readIleb128(T, fbr.reader()) catch unreachable, + .unsigned => std.leb.readUleb128(T, fbr.reader()) catch unreachable, + }, + pos + fbr.pos, }; } -/// Reads an enum type from the given reader. -/// Asserts `T` is an enum -fn readEnum(comptime T: type, reader: anytype) !T { - switch (@typeInfo(T)) { - .@"enum" => |enum_type| return @as(T, @enumFromInt(try readLeb(enum_type.tag_type, reader))), - else => @compileError("T must be an enum. Instead was given type " ++ @typeName(T)), - } +fn readBytes(bytes: []const u8, start_pos: usize) struct { []const u8, usize } { + const len, const pos = readLeb(u32, bytes, start_pos); + return .{ + bytes[pos..][0..len], + pos + len, + }; } -fn readLimits(reader: anytype) !std.wasm.Limits { - const flags = try reader.readByte(); - const min = try readLeb(u32, reader); - var limits: std.wasm.Limits = .{ +fn readEnum(comptime T: type, bytes: []const u8, pos: usize) struct { T, usize } { + const Tag = @typeInfo(T).@"enum".tag_type; + const int, const new_pos = readLeb(Tag, bytes, pos); + return .{ @enumFromInt(int), new_pos }; +} + +fn readLimits(bytes: []const u8, start_pos: usize) struct { std.wasm.Limits, usize } { + const flags: std.wasm.Limits.Flags = @bitCast(bytes[start_pos]); + const min, const max_pos = readLeb(u32, bytes, start_pos + 1); + const max, const end_pos = if (flags.has_max) readLeb(u32, bytes, max_pos) else .{ 0, max_pos }; + return .{ .{ .flags = flags, .min = min, - .max = undefined, - }; - if (limits.hasFlag(.WASM_LIMITS_FLAG_HAS_MAX)) { - limits.max = try readLeb(u32, reader); - } - return limits; + .max = max, + }, end_pos }; } -fn readInit(reader: anytype) !std.wasm.InitExpression { - const opcode = try reader.readByte(); - const init_expr: std.wasm.InitExpression = switch (@as(std.wasm.Opcode, @enumFromInt(opcode))) { - .i32_const => .{ .i32_const = try readLeb(i32, reader) }, - .global_get => .{ .global_get = try readLeb(u32, reader) }, - else => @panic("TODO: initexpression for other opcodes"), - }; +fn readInit(wasm: *Wasm, bytes: []const u8, pos: usize) !struct { Wasm.Expr, usize } { + const end_pos = try skipInit(bytes, pos); // one after the end opcode + return .{ try wasm.addExpr(bytes[pos..end_pos]), end_pos }; +} - if ((try readEnum(std.wasm.Opcode, reader)) != .end) return error.MissingEndForExpression; - return init_expr; +pub fn exprEndPos(bytes: []const u8, pos: usize) error{InvalidInitOpcode}!usize { + const opcode = bytes[pos]; + return switch (@as(std.wasm.Opcode, @enumFromInt(opcode))) { + .i32_const => readLeb(i32, bytes, pos + 1)[1], + .i64_const => readLeb(i64, bytes, pos + 1)[1], + .f32_const => pos + 5, + .f64_const => pos + 9, + .global_get => readLeb(u32, bytes, pos + 1)[1], + else => return error.InvalidInitOpcode, + }; } -fn assertEnd(reader: anytype) !void { - var buf: [1]u8 = undefined; - const len = try reader.read(&buf); - if (len != 0) return error.MalformedSection; - if (reader.context.bytes_left != 0) return error.MalformedSection; +fn skipInit(bytes: []const u8, pos: usize) !usize { + const end_pos = try exprEndPos(bytes, pos); + const op, const final_pos = readEnum(std.wasm.Opcode, bytes, end_pos); + if (op != .end) return error.InitExprMissingEnd; + return final_pos; } diff --git a/src/link/Wasm/Symbol.zig b/src/link/Wasm/Symbol.zig deleted file mode 100644 index b60b73c46fd1..000000000000 --- a/src/link/Wasm/Symbol.zig +++ /dev/null @@ -1,210 +0,0 @@ -//! Represents a WebAssembly symbol. Containing all of its properties, -//! as well as providing helper methods to determine its functionality -//! and how it will/must be linked. -//! The name of the symbol can be found by providing the offset, found -//! on the `name` field, to a string table in the wasm binary or object file. - -/// Bitfield containings flags for a symbol -/// Can contain any of the flags defined in `Flag` -flags: u32, -/// Symbol name, when the symbol is undefined the name will be taken from the import. -/// Note: This is an index into the wasm string table. -name: wasm.String, -/// Index into the list of objects based on set `tag` -/// NOTE: This will be set to `undefined` when `tag` is `data` -/// and the symbol is undefined. -index: u32, -/// Represents the kind of the symbol, such as a function or global. -tag: Tag, -/// Contains the virtual address of the symbol, relative to the start of its section. -/// This differs from the offset of an `Atom` which is relative to the start of a segment. -virtual_address: u32, - -/// Represents a symbol index where `null` represents an invalid index. -pub const Index = enum(u32) { - null, - _, -}; - -pub const Tag = enum { - function, - data, - global, - section, - event, - table, - /// synthetic kind used by the wasm linker during incremental compilation - /// to notate a symbol has been freed, but still lives in the symbol list. - dead, - undefined, - - /// From a given symbol tag, returns the `ExternalType` - /// Asserts the given tag can be represented as an external type. - pub fn externalType(tag: Tag) std.wasm.ExternalKind { - return switch (tag) { - .function => .function, - .global => .global, - .data => unreachable, // Data symbols will generate a global - .section => unreachable, // Not an external type - .event => unreachable, // Not an external type - .dead => unreachable, // Dead symbols should not be referenced - .undefined => unreachable, - .table => .table, - }; - } -}; - -pub const Flag = enum(u32) { - /// Indicates a weak symbol. - /// When linking multiple modules defining the same symbol, all weak definitions are discarded - /// in favourite of the strong definition. When no strong definition exists, all weak but one definition is discarded. - /// If multiple definitions remain, we get an error: symbol collision. - WASM_SYM_BINDING_WEAK = 0x1, - /// Indicates a local, non-exported, non-module-linked symbol. - /// The names of local symbols are not required to be unique, unlike non-local symbols. - WASM_SYM_BINDING_LOCAL = 0x2, - /// Represents the binding of a symbol, indicating if it's local or not, and weak or not. - WASM_SYM_BINDING_MASK = 0x3, - /// Indicates a hidden symbol. Hidden symbols will not be exported to the link result, but may - /// link to other modules. - WASM_SYM_VISIBILITY_HIDDEN = 0x4, - /// Indicates an undefined symbol. For non-data symbols, this must match whether the symbol is - /// an import or is defined. For data symbols however, determines whether a segment is specified. - WASM_SYM_UNDEFINED = 0x10, - /// Indicates a symbol of which its intention is to be exported from the wasm module to the host environment. - /// This differs from the visibility flag as this flag affects the static linker. - WASM_SYM_EXPORTED = 0x20, - /// Indicates the symbol uses an explicit symbol name, rather than reusing the name from a wasm import. - /// Allows remapping imports from foreign WASM modules into local symbols with a different name. - WASM_SYM_EXPLICIT_NAME = 0x40, - /// Indicates the symbol is to be included in the linker output, regardless of whether it is used or has any references to it. - WASM_SYM_NO_STRIP = 0x80, - /// Indicates a symbol is TLS - WASM_SYM_TLS = 0x100, - /// Zig specific flag. Uses the most significant bit of the flag to annotate whether a symbol is - /// alive or not. Dead symbols are allowed to be garbage collected. - alive = 0x80000000, -}; - -/// Verifies if the given symbol should be imported from the -/// host environment or not -pub fn requiresImport(symbol: Symbol) bool { - if (symbol.tag == .data) return false; - if (!symbol.isUndefined()) return false; - if (symbol.isWeak()) return false; - // if (symbol.isDefined() and symbol.isWeak()) return true; //TODO: Only when building shared lib - - return true; -} - -/// Marks a symbol as 'alive', ensuring the garbage collector will not collect the trash. -pub fn mark(symbol: *Symbol) void { - symbol.flags |= @intFromEnum(Flag.alive); -} - -pub fn unmark(symbol: *Symbol) void { - symbol.flags &= ~@intFromEnum(Flag.alive); -} - -pub fn isAlive(symbol: Symbol) bool { - return symbol.flags & @intFromEnum(Flag.alive) != 0; -} - -pub fn isDead(symbol: Symbol) bool { - return symbol.flags & @intFromEnum(Flag.alive) == 0; -} - -pub fn isTLS(symbol: Symbol) bool { - return symbol.flags & @intFromEnum(Flag.WASM_SYM_TLS) != 0; -} - -pub fn hasFlag(symbol: Symbol, flag: Flag) bool { - return symbol.flags & @intFromEnum(flag) != 0; -} - -pub fn setFlag(symbol: *Symbol, flag: Flag) void { - symbol.flags |= @intFromEnum(flag); -} - -pub fn isUndefined(symbol: Symbol) bool { - return symbol.flags & @intFromEnum(Flag.WASM_SYM_UNDEFINED) != 0; -} - -pub fn setUndefined(symbol: *Symbol, is_undefined: bool) void { - if (is_undefined) { - symbol.setFlag(.WASM_SYM_UNDEFINED); - } else { - symbol.flags &= ~@intFromEnum(Flag.WASM_SYM_UNDEFINED); - } -} - -pub fn setGlobal(symbol: *Symbol, is_global: bool) void { - if (is_global) { - symbol.flags &= ~@intFromEnum(Flag.WASM_SYM_BINDING_LOCAL); - } else { - symbol.setFlag(.WASM_SYM_BINDING_LOCAL); - } -} - -pub fn isDefined(symbol: Symbol) bool { - return !symbol.isUndefined(); -} - -pub fn isVisible(symbol: Symbol) bool { - return symbol.flags & @intFromEnum(Flag.WASM_SYM_VISIBILITY_HIDDEN) == 0; -} - -pub fn isLocal(symbol: Symbol) bool { - return symbol.flags & @intFromEnum(Flag.WASM_SYM_BINDING_LOCAL) != 0; -} - -pub fn isGlobal(symbol: Symbol) bool { - return symbol.flags & @intFromEnum(Flag.WASM_SYM_BINDING_LOCAL) == 0; -} - -pub fn isHidden(symbol: Symbol) bool { - return symbol.flags & @intFromEnum(Flag.WASM_SYM_VISIBILITY_HIDDEN) != 0; -} - -pub fn isNoStrip(symbol: Symbol) bool { - return symbol.flags & @intFromEnum(Flag.WASM_SYM_NO_STRIP) != 0; -} - -pub fn isExported(symbol: Symbol, is_dynamic: bool) bool { - if (symbol.isUndefined() or symbol.isLocal()) return false; - if (is_dynamic and symbol.isVisible()) return true; - return symbol.hasFlag(.WASM_SYM_EXPORTED); -} - -pub fn isWeak(symbol: Symbol) bool { - return symbol.flags & @intFromEnum(Flag.WASM_SYM_BINDING_WEAK) != 0; -} - -/// Formats the symbol into human-readable text -pub fn format(symbol: Symbol, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { - _ = fmt; - _ = options; - - const kind_fmt: u8 = switch (symbol.tag) { - .function => 'F', - .data => 'D', - .global => 'G', - .section => 'S', - .event => 'E', - .table => 'T', - .dead => '-', - .undefined => unreachable, - }; - const visible: []const u8 = if (symbol.isVisible()) "yes" else "no"; - const binding: []const u8 = if (symbol.isLocal()) "local" else "global"; - const undef: []const u8 = if (symbol.isUndefined()) "undefined" else ""; - - try writer.print( - "{c} binding={s} visible={s} id={d} name_offset={d} {s}", - .{ kind_fmt, binding, visible, symbol.index, symbol.name, undef }, - ); -} - -const std = @import("std"); -const Symbol = @This(); -const wasm = @import("../Wasm.zig"); diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig deleted file mode 100644 index f2bce777ed70..000000000000 --- a/src/link/Wasm/ZigObject.zig +++ /dev/null @@ -1,1229 +0,0 @@ -//! ZigObject encapsulates the state of the incrementally compiled Zig module. -//! It stores the associated input local and global symbols, allocated atoms, -//! and any relocations that may have been emitted. - -/// For error reporting purposes only. -path: Path, -/// Map of all `Nav` that are currently alive. -/// Each index maps to the corresponding `NavInfo`. -navs: std.AutoHashMapUnmanaged(InternPool.Nav.Index, NavInfo) = .empty, -/// List of function type signatures for this Zig module. -func_types: std.ArrayListUnmanaged(std.wasm.Type) = .empty, -/// List of `std.wasm.Func`. Each entry contains the function signature, -/// rather than the actual body. -functions: std.ArrayListUnmanaged(std.wasm.Func) = .empty, -/// List of indexes pointing to an entry within the `functions` list which has been removed. -functions_free_list: std.ArrayListUnmanaged(u32) = .empty, -/// Map of symbol locations, represented by its `Wasm.Import`. -imports: std.AutoHashMapUnmanaged(Symbol.Index, Wasm.Import) = .empty, -/// List of WebAssembly globals. -globals: std.ArrayListUnmanaged(std.wasm.Global) = .empty, -/// Mapping between an `Atom` and its type index representing the Wasm -/// type of the function signature. -atom_types: std.AutoHashMapUnmanaged(Atom.Index, u32) = .empty, -/// List of all symbols generated by Zig code. -symbols: std.ArrayListUnmanaged(Symbol) = .empty, -/// Map from symbol name to their index into the `symbols` list. -global_syms: std.AutoHashMapUnmanaged(Wasm.String, Symbol.Index) = .empty, -/// List of symbol indexes which are free to be used. -symbols_free_list: std.ArrayListUnmanaged(Symbol.Index) = .empty, -/// Extra metadata about the linking section, such as alignment of segments and their name. -segment_info: std.ArrayListUnmanaged(Wasm.NamedSegment) = .empty, -/// List of indexes which contain a free slot in the `segment_info` list. -segment_free_list: std.ArrayListUnmanaged(u32) = .empty, -/// Map for storing anonymous declarations. Each anonymous decl maps to its Atom's index. -uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Atom.Index) = .empty, -/// List of atom indexes of functions that are generated by the backend. -synthetic_functions: std.ArrayListUnmanaged(Atom.Index) = .empty, -/// Represents the symbol index of the error name table -/// When this is `null`, no code references an error using runtime `@errorName`. -/// During initializion, a symbol with corresponding atom will be created that is -/// used to perform relocations to the pointer of this table. -/// The actual table is populated during `flush`. -error_table_symbol: Symbol.Index = .null, -/// Atom index of the table of symbol names. This is stored so we can clean up the atom. -error_names_atom: Atom.Index = .null, -/// Amount of functions in the `import` sections. -imported_functions_count: u32 = 0, -/// Amount of globals in the `import` section. -imported_globals_count: u32 = 0, -/// Symbol index representing the stack pointer. This will be set upon initializion -/// of a new `ZigObject`. Codegen will make calls into this to create relocations for -/// this symbol each time the stack pointer is moved. -stack_pointer_sym: Symbol.Index, -/// Debug information for the Zig module. -dwarf: ?Dwarf = null, -// Debug section atoms. These are only set when the current compilation -// unit contains Zig code. The lifetime of these atoms are extended -// until the end of the compiler's lifetime. Meaning they're not freed -// during `flush()` in incremental-mode. -debug_info_atom: ?Atom.Index = null, -debug_line_atom: ?Atom.Index = null, -debug_loc_atom: ?Atom.Index = null, -debug_ranges_atom: ?Atom.Index = null, -debug_abbrev_atom: ?Atom.Index = null, -debug_str_atom: ?Atom.Index = null, -debug_pubnames_atom: ?Atom.Index = null, -debug_pubtypes_atom: ?Atom.Index = null, -/// The index of the segment representing the custom '.debug_info' section. -debug_info_index: ?u32 = null, -/// The index of the segment representing the custom '.debug_line' section. -debug_line_index: ?u32 = null, -/// The index of the segment representing the custom '.debug_loc' section. -debug_loc_index: ?u32 = null, -/// The index of the segment representing the custom '.debug_ranges' section. -debug_ranges_index: ?u32 = null, -/// The index of the segment representing the custom '.debug_pubnames' section. -debug_pubnames_index: ?u32 = null, -/// The index of the segment representing the custom '.debug_pubtypes' section. -debug_pubtypes_index: ?u32 = null, -/// The index of the segment representing the custom '.debug_pubtypes' section. -debug_str_index: ?u32 = null, -/// The index of the segment representing the custom '.debug_pubtypes' section. -debug_abbrev_index: ?u32 = null, - -const NavInfo = struct { - atom: Atom.Index = .null, - exports: std.ArrayListUnmanaged(Symbol.Index) = .empty, - - fn @"export"(ni: NavInfo, zo: *const ZigObject, name: Wasm.String) ?Symbol.Index { - for (ni.exports.items) |sym_index| { - if (zo.symbol(sym_index).name == name) return sym_index; - } - return null; - } - - fn appendExport(ni: *NavInfo, gpa: std.mem.Allocator, sym_index: Symbol.Index) !void { - return ni.exports.append(gpa, sym_index); - } - - fn deleteExport(ni: *NavInfo, sym_index: Symbol.Index) void { - for (ni.exports.items, 0..) |idx, index| { - if (idx == sym_index) { - _ = ni.exports.swapRemove(index); - return; - } - } - unreachable; // invalid sym_index - } -}; - -/// Initializes the `ZigObject` with initial symbols. -pub fn init(zig_object: *ZigObject, wasm: *Wasm) !void { - // Initialize an undefined global with the name __stack_pointer. Codegen will use - // this to generate relocations when moving the stack pointer. This symbol will be - // resolved automatically by the final linking stage. - try zig_object.createStackPointer(wasm); - - // TODO: Initialize debug information when we reimplement Dwarf support. -} - -fn createStackPointer(zig_object: *ZigObject, wasm: *Wasm) !void { - const gpa = wasm.base.comp.gpa; - const sym_index = try zig_object.getGlobalSymbol(gpa, wasm.preloaded_strings.__stack_pointer); - const sym = zig_object.symbol(sym_index); - sym.index = zig_object.imported_globals_count; - sym.tag = .global; - const is_wasm32 = wasm.base.comp.root_mod.resolved_target.result.cpu.arch == .wasm32; - try zig_object.imports.putNoClobber(gpa, sym_index, .{ - .name = sym.name, - .module_name = wasm.host_name, - .kind = .{ .global = .{ .valtype = if (is_wasm32) .i32 else .i64, .mutable = true } }, - }); - zig_object.imported_globals_count += 1; - zig_object.stack_pointer_sym = sym_index; -} - -pub fn symbol(zig_object: *const ZigObject, index: Symbol.Index) *Symbol { - return &zig_object.symbols.items[@intFromEnum(index)]; -} - -/// Frees and invalidates all memory of the incrementally compiled Zig module. -/// It is illegal behavior to access the `ZigObject` after calling `deinit`. -pub fn deinit(zig_object: *ZigObject, wasm: *Wasm) void { - const gpa = wasm.base.comp.gpa; - for (zig_object.segment_info.items) |segment_info| { - gpa.free(segment_info.name); - } - - { - var it = zig_object.navs.valueIterator(); - while (it.next()) |nav_info| { - const atom = wasm.getAtomPtr(nav_info.atom); - for (atom.locals.items) |local_index| { - const local_atom = wasm.getAtomPtr(local_index); - local_atom.deinit(gpa); - } - atom.deinit(gpa); - nav_info.exports.deinit(gpa); - } - } - { - for (zig_object.uavs.values()) |atom_index| { - const atom = wasm.getAtomPtr(atom_index); - for (atom.locals.items) |local_index| { - const local_atom = wasm.getAtomPtr(local_index); - local_atom.deinit(gpa); - } - atom.deinit(gpa); - } - } - if (zig_object.global_syms.get(wasm.preloaded_strings.__zig_errors_len)) |sym_index| { - const atom_index = wasm.symbol_atom.get(.{ .file = .zig_object, .index = sym_index }).?; - wasm.getAtomPtr(atom_index).deinit(gpa); - } - if (wasm.symbol_atom.get(.{ .file = .zig_object, .index = zig_object.error_table_symbol })) |atom_index| { - const atom = wasm.getAtomPtr(atom_index); - atom.deinit(gpa); - } - for (zig_object.synthetic_functions.items) |atom_index| { - const atom = wasm.getAtomPtr(atom_index); - atom.deinit(gpa); - } - zig_object.synthetic_functions.deinit(gpa); - for (zig_object.func_types.items) |*ty| { - ty.deinit(gpa); - } - if (zig_object.error_names_atom != .null) { - const atom = wasm.getAtomPtr(zig_object.error_names_atom); - atom.deinit(gpa); - } - zig_object.global_syms.deinit(gpa); - zig_object.func_types.deinit(gpa); - zig_object.atom_types.deinit(gpa); - zig_object.functions.deinit(gpa); - zig_object.imports.deinit(gpa); - zig_object.navs.deinit(gpa); - zig_object.uavs.deinit(gpa); - zig_object.symbols.deinit(gpa); - zig_object.symbols_free_list.deinit(gpa); - zig_object.segment_info.deinit(gpa); - zig_object.segment_free_list.deinit(gpa); - - if (zig_object.dwarf) |*dwarf| { - dwarf.deinit(); - } - gpa.free(zig_object.path.sub_path); - zig_object.* = undefined; -} - -/// Allocates a new symbol and returns its index. -/// Will re-use slots when a symbol was freed at an earlier stage. -pub fn allocateSymbol(zig_object: *ZigObject, gpa: std.mem.Allocator) !Symbol.Index { - try zig_object.symbols.ensureUnusedCapacity(gpa, 1); - const sym: Symbol = .{ - .name = undefined, // will be set after updateDecl as well as during atom creation for decls - .flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL), - .tag = .undefined, // will be set after updateDecl - .index = std.math.maxInt(u32), // will be set during atom parsing - .virtual_address = std.math.maxInt(u32), // will be set during atom allocation - }; - if (zig_object.symbols_free_list.popOrNull()) |index| { - zig_object.symbols.items[@intFromEnum(index)] = sym; - return index; - } - const index: Symbol.Index = @enumFromInt(zig_object.symbols.items.len); - zig_object.symbols.appendAssumeCapacity(sym); - return index; -} - -// Generate code for the `Nav`, storing it in memory to be later written to -// the file on flush(). -pub fn updateNav( - zig_object: *ZigObject, - wasm: *Wasm, - pt: Zcu.PerThread, - nav_index: InternPool.Nav.Index, -) !void { - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const nav = ip.getNav(nav_index); - - const nav_val = zcu.navValue(nav_index); - const is_extern, const lib_name, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) { - .variable => |variable| .{ false, .none, Value.fromInterned(variable.init) }, - .func => return, - .@"extern" => |@"extern"| if (ip.isFunctionType(nav.typeOf(ip))) - return - else - .{ true, @"extern".lib_name, nav_val }, - else => .{ false, .none, nav_val }, - }; - - if (nav_init.typeOf(zcu).hasRuntimeBits(zcu)) { - const gpa = wasm.base.comp.gpa; - const atom_index = try zig_object.getOrCreateAtomForNav(wasm, pt, nav_index); - const atom = wasm.getAtomPtr(atom_index); - atom.clear(); - - if (is_extern) - return zig_object.addOrUpdateImport(wasm, nav.name.toSlice(ip), atom.sym_index, lib_name.toSlice(ip), null); - - var code_writer = std.ArrayList(u8).init(gpa); - defer code_writer.deinit(); - - const res = try codegen.generateSymbol( - &wasm.base, - pt, - zcu.navSrcLoc(nav_index), - nav_init, - &code_writer, - .{ .atom_index = @intFromEnum(atom.sym_index) }, - ); - - const code = switch (res) { - .ok => code_writer.items, - .fail => |em| { - try zcu.failed_codegen.put(zcu.gpa, nav_index, em); - return; - }, - }; - - try zig_object.finishUpdateNav(wasm, pt, nav_index, code); - } -} - -pub fn updateFunc( - zig_object: *ZigObject, - wasm: *Wasm, - pt: Zcu.PerThread, - func_index: InternPool.Index, - air: Air, - liveness: Liveness, -) !void { - const zcu = pt.zcu; - const gpa = zcu.gpa; - const func = pt.zcu.funcInfo(func_index); - const atom_index = try zig_object.getOrCreateAtomForNav(wasm, pt, func.owner_nav); - const atom = wasm.getAtomPtr(atom_index); - atom.clear(); - - var code_writer = std.ArrayList(u8).init(gpa); - defer code_writer.deinit(); - const result = try codegen.generateFunction( - &wasm.base, - pt, - zcu.navSrcLoc(func.owner_nav), - func_index, - air, - liveness, - &code_writer, - .none, - ); - - const code = switch (result) { - .ok => code_writer.items, - .fail => |em| { - try pt.zcu.failed_codegen.put(gpa, func.owner_nav, em); - return; - }, - }; - - return zig_object.finishUpdateNav(wasm, pt, func.owner_nav, code); -} - -fn finishUpdateNav( - zig_object: *ZigObject, - wasm: *Wasm, - pt: Zcu.PerThread, - nav_index: InternPool.Nav.Index, - code: []const u8, -) !void { - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const gpa = zcu.gpa; - const nav = ip.getNav(nav_index); - const nav_val = zcu.navValue(nav_index); - const nav_info = zig_object.navs.get(nav_index).?; - const atom_index = nav_info.atom; - const atom = wasm.getAtomPtr(atom_index); - const sym = zig_object.symbol(atom.sym_index); - sym.name = try wasm.internString(nav.fqn.toSlice(ip)); - try atom.code.appendSlice(gpa, code); - atom.size = @intCast(code.len); - - if (ip.isFunctionType(nav.typeOf(ip))) { - sym.index = try zig_object.appendFunction(gpa, .{ .type_index = zig_object.atom_types.get(atom_index).? }); - sym.tag = .function; - } else { - const is_const, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) { - .variable => |variable| .{ false, variable.init }, - .@"extern" => |@"extern"| .{ @"extern".is_const, .none }, - else => .{ true, nav_val.toIntern() }, - }; - const segment_name = name: { - if (is_const) break :name ".rodata."; - - if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu)) { - break :name switch (zcu.navFileScope(nav_index).mod.optimize_mode) { - .Debug, .ReleaseSafe => ".data.", - .ReleaseFast, .ReleaseSmall => ".bss.", - }; - } - // when the decl is all zeroes, we store the atom in the bss segment, - // in all other cases it will be in the data segment. - for (atom.code.items) |byte| { - if (byte != 0) break :name ".data."; - } - break :name ".bss."; - }; - if ((wasm.base.isObject() or wasm.base.comp.config.import_memory) and - std.mem.startsWith(u8, segment_name, ".bss")) - { - @memset(atom.code.items, 0); - } - // Will be freed upon freeing of decl or after cleanup of Wasm binary. - const full_segment_name = try std.mem.concat(gpa, u8, &.{ - segment_name, - nav.fqn.toSlice(ip), - }); - errdefer gpa.free(full_segment_name); - sym.tag = .data; - sym.index = try zig_object.createDataSegment(gpa, full_segment_name, pt.navAlignment(nav_index)); - } - if (code.len == 0) return; - atom.alignment = pt.navAlignment(nav_index); -} - -/// Creates and initializes a new segment in the 'Data' section. -/// Reuses free slots in the list of segments and returns the index. -fn createDataSegment( - zig_object: *ZigObject, - gpa: std.mem.Allocator, - name: []const u8, - alignment: InternPool.Alignment, -) !u32 { - const segment_index: u32 = if (zig_object.segment_free_list.popOrNull()) |index| - index - else index: { - const idx: u32 = @intCast(zig_object.segment_info.items.len); - _ = try zig_object.segment_info.addOne(gpa); - break :index idx; - }; - zig_object.segment_info.items[segment_index] = .{ - .alignment = alignment, - .flags = 0, - .name = name, - }; - return segment_index; -} - -/// For a given `InternPool.Nav.Index` returns its corresponding `Atom.Index`. -/// When the index was not found, a new `Atom` will be created, and its index will be returned. -/// The newly created Atom is empty with default fields as specified by `Atom.empty`. -pub fn getOrCreateAtomForNav( - zig_object: *ZigObject, - wasm: *Wasm, - pt: Zcu.PerThread, - nav_index: InternPool.Nav.Index, -) !Atom.Index { - const ip = &pt.zcu.intern_pool; - const gpa = pt.zcu.gpa; - const gop = try zig_object.navs.getOrPut(gpa, nav_index); - if (!gop.found_existing) { - const sym_index = try zig_object.allocateSymbol(gpa); - gop.value_ptr.* = .{ .atom = try wasm.createAtom(sym_index, .zig_object) }; - const nav = ip.getNav(nav_index); - const sym = zig_object.symbol(sym_index); - sym.name = try wasm.internString(nav.fqn.toSlice(ip)); - } - return gop.value_ptr.atom; -} - -pub fn lowerUav( - zig_object: *ZigObject, - wasm: *Wasm, - pt: Zcu.PerThread, - uav: InternPool.Index, - explicit_alignment: InternPool.Alignment, - src_loc: Zcu.LazySrcLoc, -) !codegen.GenResult { - const gpa = wasm.base.comp.gpa; - const gop = try zig_object.uavs.getOrPut(gpa, uav); - if (!gop.found_existing) { - var name_buf: [32]u8 = undefined; - const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{ - @intFromEnum(uav), - }) catch unreachable; - - switch (try zig_object.lowerConst(wasm, pt, name, Value.fromInterned(uav), src_loc)) { - .ok => |atom_index| zig_object.uavs.values()[gop.index] = atom_index, - .fail => |em| return .{ .fail = em }, - } - } - - const atom = wasm.getAtomPtr(zig_object.uavs.values()[gop.index]); - atom.alignment = switch (atom.alignment) { - .none => explicit_alignment, - else => switch (explicit_alignment) { - .none => atom.alignment, - else => atom.alignment.maxStrict(explicit_alignment), - }, - }; - return .{ .mcv = .{ .load_symbol = @intFromEnum(atom.sym_index) } }; -} - -const LowerConstResult = union(enum) { - ok: Atom.Index, - fail: *Zcu.ErrorMsg, -}; - -fn lowerConst( - zig_object: *ZigObject, - wasm: *Wasm, - pt: Zcu.PerThread, - name: []const u8, - val: Value, - src_loc: Zcu.LazySrcLoc, -) !LowerConstResult { - const gpa = wasm.base.comp.gpa; - const zcu = wasm.base.comp.zcu.?; - - const ty = val.typeOf(zcu); - - // Create and initialize a new local symbol and atom - const sym_index = try zig_object.allocateSymbol(gpa); - const atom_index = try wasm.createAtom(sym_index, .zig_object); - var value_bytes = std.ArrayList(u8).init(gpa); - defer value_bytes.deinit(); - - const code = code: { - const atom = wasm.getAtomPtr(atom_index); - atom.alignment = ty.abiAlignment(zcu); - const segment_name = try std.mem.concat(gpa, u8, &.{ ".rodata.", name }); - errdefer gpa.free(segment_name); - zig_object.symbol(sym_index).* = .{ - .name = try wasm.internString(name), - .flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL), - .tag = .data, - .index = try zig_object.createDataSegment( - gpa, - segment_name, - ty.abiAlignment(zcu), - ), - .virtual_address = undefined, - }; - - const result = try codegen.generateSymbol( - &wasm.base, - pt, - src_loc, - val, - &value_bytes, - .{ .atom_index = @intFromEnum(atom.sym_index) }, - ); - break :code switch (result) { - .ok => value_bytes.items, - .fail => |em| { - return .{ .fail = em }; - }, - }; - }; - - const atom = wasm.getAtomPtr(atom_index); - atom.size = @intCast(code.len); - try atom.code.appendSlice(gpa, code); - return .{ .ok = atom_index }; -} - -/// Returns the symbol index of the error name table. -/// -/// When the symbol does not yet exist, it will create a new one instead. -pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm: *Wasm, pt: Zcu.PerThread) !Symbol.Index { - if (zig_object.error_table_symbol != .null) { - return zig_object.error_table_symbol; - } - - // no error was referenced yet, so create a new symbol and atom for it - // and then return said symbol's index. The final table will be populated - // during `flush` when we know all possible error names. - const gpa = wasm.base.comp.gpa; - const sym_index = try zig_object.allocateSymbol(gpa); - const atom_index = try wasm.createAtom(sym_index, .zig_object); - const atom = wasm.getAtomPtr(atom_index); - const slice_ty = Type.slice_const_u8_sentinel_0; - atom.alignment = slice_ty.abiAlignment(pt.zcu); - - const segment_name = try gpa.dupe(u8, ".rodata.__zig_err_name_table"); - const sym = zig_object.symbol(sym_index); - sym.* = .{ - .name = wasm.preloaded_strings.__zig_err_name_table, - .tag = .data, - .flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL), - .index = try zig_object.createDataSegment(gpa, segment_name, atom.alignment), - .virtual_address = undefined, - }; - - log.debug("Error name table was created with symbol index: ({d})", .{@intFromEnum(sym_index)}); - zig_object.error_table_symbol = sym_index; - return sym_index; -} - -/// Populates the error name table, when `error_table_symbol` is not null. -/// -/// This creates a table that consists of pointers and length to each error name. -/// The table is what is being pointed to within the runtime bodies that are generated. -fn populateErrorNameTable(zig_object: *ZigObject, wasm: *Wasm, tid: Zcu.PerThread.Id) !void { - if (zig_object.error_table_symbol == .null) return; - const gpa = wasm.base.comp.gpa; - const atom_index = wasm.symbol_atom.get(.{ .file = .zig_object, .index = zig_object.error_table_symbol }).?; - - // Rather than creating a symbol for each individual error name, - // we create a symbol for the entire region of error names. We then calculate - // the pointers into the list using addends which are appended to the relocation. - const names_sym_index = try zig_object.allocateSymbol(gpa); - const names_atom_index = try wasm.createAtom(names_sym_index, .zig_object); - const names_atom = wasm.getAtomPtr(names_atom_index); - names_atom.alignment = .@"1"; - const segment_name = try gpa.dupe(u8, ".rodata.__zig_err_names"); - const names_symbol = zig_object.symbol(names_sym_index); - names_symbol.* = .{ - .name = wasm.preloaded_strings.__zig_err_names, - .tag = .data, - .flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL), - .index = try zig_object.createDataSegment(gpa, segment_name, names_atom.alignment), - .virtual_address = undefined, - }; - - log.debug("Populating error names", .{}); - - // Addend for each relocation to the table - var addend: u32 = 0; - const pt: Zcu.PerThread = .activate(wasm.base.comp.zcu.?, tid); - defer pt.deactivate(); - const slice_ty = Type.slice_const_u8_sentinel_0; - const atom = wasm.getAtomPtr(atom_index); - { - // TODO: remove this unreachable entry - try atom.code.appendNTimes(gpa, 0, 4); - try atom.code.writer(gpa).writeInt(u32, 0, .little); - atom.size += @intCast(slice_ty.abiSize(pt.zcu)); - addend += 1; - - try names_atom.code.append(gpa, 0); - } - const ip = &pt.zcu.intern_pool; - for (ip.global_error_set.getNamesFromMainThread()) |error_name| { - const error_name_slice = error_name.toSlice(ip); - const len: u32 = @intCast(error_name_slice.len + 1); // names are 0-terminated - - const offset = @as(u32, @intCast(atom.code.items.len)); - // first we create the data for the slice of the name - try atom.code.appendNTimes(gpa, 0, 4); // ptr to name, will be relocated - try atom.code.writer(gpa).writeInt(u32, len - 1, .little); - // create relocation to the error name - try atom.relocs.append(gpa, .{ - .index = @intFromEnum(names_atom.sym_index), - .relocation_type = .R_WASM_MEMORY_ADDR_I32, - .offset = offset, - .addend = @intCast(addend), - }); - atom.size += @intCast(slice_ty.abiSize(pt.zcu)); - addend += len; - - // as we updated the error name table, we now store the actual name within the names atom - try names_atom.code.ensureUnusedCapacity(gpa, len); - names_atom.code.appendSliceAssumeCapacity(error_name_slice[0..len]); - - log.debug("Populated error name: '{}'", .{error_name.fmt(ip)}); - } - names_atom.size = addend; - zig_object.error_names_atom = names_atom_index; -} - -/// Either creates a new import, or updates one if existing. -/// When `type_index` is non-null, we assume an external function. -/// In all other cases, a data-symbol will be created instead. -pub fn addOrUpdateImport( - zig_object: *ZigObject, - wasm: *Wasm, - /// Name of the import - name: []const u8, - /// Symbol index that is external - symbol_index: Symbol.Index, - /// Optional library name (i.e. `extern "c" fn foo() void` - lib_name: ?[:0]const u8, - /// The index of the type that represents the function signature - /// when the extern is a function. When this is null, a data-symbol - /// is asserted instead. - type_index: ?u32, -) !void { - const gpa = wasm.base.comp.gpa; - std.debug.assert(symbol_index != .null); - // For the import name, we use the decl's name, rather than the fully qualified name - // Also mangle the name when the lib name is set and not equal to "C" so imports with the same - // name but different module can be resolved correctly. - const mangle_name = if (lib_name) |n| !std.mem.eql(u8, n, "c") else false; - const full_name = if (mangle_name) - try std.fmt.allocPrint(gpa, "{s}|{s}", .{ name, lib_name.? }) - else - name; - defer if (mangle_name) gpa.free(full_name); - - const decl_name_index = try wasm.internString(full_name); - const sym: *Symbol = &zig_object.symbols.items[@intFromEnum(symbol_index)]; - sym.setUndefined(true); - sym.setGlobal(true); - sym.name = decl_name_index; - if (mangle_name) { - // we specified a specific name for the symbol that does not match the import name - sym.setFlag(.WASM_SYM_EXPLICIT_NAME); - } - - if (type_index) |ty_index| { - const gop = try zig_object.imports.getOrPut(gpa, symbol_index); - const module_name = if (lib_name) |n| try wasm.internString(n) else wasm.host_name; - if (!gop.found_existing) zig_object.imported_functions_count += 1; - gop.value_ptr.* = .{ - .module_name = module_name, - .name = try wasm.internString(name), - .kind = .{ .function = ty_index }, - }; - sym.tag = .function; - } else { - sym.tag = .data; - } -} - -/// Returns the symbol index from a symbol of which its flag is set global, -/// such as an exported or imported symbol. -/// If the symbol does not yet exist, creates a new one symbol instead -/// and then returns the index to it. -pub fn getGlobalSymbol(zig_object: *ZigObject, gpa: std.mem.Allocator, name_index: Wasm.String) !Symbol.Index { - const gop = try zig_object.global_syms.getOrPut(gpa, name_index); - if (gop.found_existing) { - return gop.value_ptr.*; - } - - var sym: Symbol = .{ - .name = name_index, - .flags = 0, - .index = undefined, // index to type will be set after merging symbols - .tag = .function, - .virtual_address = std.math.maxInt(u32), - }; - sym.setGlobal(true); - sym.setUndefined(true); - - const sym_index = if (zig_object.symbols_free_list.popOrNull()) |index| index else blk: { - const index: Symbol.Index = @enumFromInt(zig_object.symbols.items.len); - try zig_object.symbols.ensureUnusedCapacity(gpa, 1); - zig_object.symbols.items.len += 1; - break :blk index; - }; - zig_object.symbol(sym_index).* = sym; - gop.value_ptr.* = sym_index; - return sym_index; -} - -/// For a given decl, find the given symbol index's atom, and create a relocation for the type. -/// Returns the given pointer address -pub fn getNavVAddr( - zig_object: *ZigObject, - wasm: *Wasm, - pt: Zcu.PerThread, - nav_index: InternPool.Nav.Index, - reloc_info: link.File.RelocInfo, -) !u64 { - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const gpa = zcu.gpa; - const nav = ip.getNav(nav_index); - const target = &zcu.navFileScope(nav_index).mod.resolved_target.result; - - const target_atom_index = try zig_object.getOrCreateAtomForNav(wasm, pt, nav_index); - const target_atom = wasm.getAtom(target_atom_index); - const target_symbol_index = @intFromEnum(target_atom.sym_index); - if (nav.getExtern(ip)) |@"extern"| { - try zig_object.addOrUpdateImport( - wasm, - nav.name.toSlice(ip), - target_atom.sym_index, - @"extern".lib_name.toSlice(ip), - null, - ); - } - - std.debug.assert(reloc_info.parent.atom_index != 0); - const atom_index = wasm.symbol_atom.get(.{ - .file = .zig_object, - .index = @enumFromInt(reloc_info.parent.atom_index), - }).?; - const atom = wasm.getAtomPtr(atom_index); - const is_wasm32 = target.cpu.arch == .wasm32; - if (ip.isFunctionType(ip.getNav(nav_index).typeOf(ip))) { - std.debug.assert(reloc_info.addend == 0); // addend not allowed for function relocations - try atom.relocs.append(gpa, .{ - .index = target_symbol_index, - .offset = @intCast(reloc_info.offset), - .relocation_type = if (is_wasm32) .R_WASM_TABLE_INDEX_I32 else .R_WASM_TABLE_INDEX_I64, - }); - } else { - try atom.relocs.append(gpa, .{ - .index = target_symbol_index, - .offset = @intCast(reloc_info.offset), - .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_I32 else .R_WASM_MEMORY_ADDR_I64, - .addend = @intCast(reloc_info.addend), - }); - } - - // we do not know the final address at this point, - // as atom allocation will determine the address and relocations - // will calculate and rewrite this. Therefore, we simply return the symbol index - // that was targeted. - return target_symbol_index; -} - -pub fn getUavVAddr( - zig_object: *ZigObject, - wasm: *Wasm, - uav: InternPool.Index, - reloc_info: link.File.RelocInfo, -) !u64 { - const gpa = wasm.base.comp.gpa; - const target = wasm.base.comp.root_mod.resolved_target.result; - const atom_index = zig_object.uavs.get(uav).?; - const target_symbol_index = @intFromEnum(wasm.getAtom(atom_index).sym_index); - - const parent_atom_index = wasm.symbol_atom.get(.{ - .file = .zig_object, - .index = @enumFromInt(reloc_info.parent.atom_index), - }).?; - const parent_atom = wasm.getAtomPtr(parent_atom_index); - const is_wasm32 = target.cpu.arch == .wasm32; - const zcu = wasm.base.comp.zcu.?; - const ty = Type.fromInterned(zcu.intern_pool.typeOf(uav)); - if (ty.zigTypeTag(zcu) == .@"fn") { - std.debug.assert(reloc_info.addend == 0); // addend not allowed for function relocations - try parent_atom.relocs.append(gpa, .{ - .index = target_symbol_index, - .offset = @intCast(reloc_info.offset), - .relocation_type = if (is_wasm32) .R_WASM_TABLE_INDEX_I32 else .R_WASM_TABLE_INDEX_I64, - }); - } else { - try parent_atom.relocs.append(gpa, .{ - .index = target_symbol_index, - .offset = @intCast(reloc_info.offset), - .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_I32 else .R_WASM_MEMORY_ADDR_I64, - .addend = @intCast(reloc_info.addend), - }); - } - - // we do not know the final address at this point, - // as atom allocation will determine the address and relocations - // will calculate and rewrite this. Therefore, we simply return the symbol index - // that was targeted. - return target_symbol_index; -} - -pub fn deleteExport( - zig_object: *ZigObject, - wasm: *Wasm, - exported: Zcu.Exported, - name: InternPool.NullTerminatedString, -) void { - const zcu = wasm.base.comp.zcu.?; - const nav_index = switch (exported) { - .nav => |nav_index| nav_index, - .uav => @panic("TODO: implement Wasm linker code for exporting a constant value"), - }; - const nav_info = zig_object.navs.getPtr(nav_index) orelse return; - const name_interned = wasm.getExistingString(name.toSlice(&zcu.intern_pool)).?; - if (nav_info.@"export"(zig_object, name_interned)) |sym_index| { - const sym = zig_object.symbol(sym_index); - nav_info.deleteExport(sym_index); - std.debug.assert(zig_object.global_syms.remove(sym.name)); - std.debug.assert(wasm.symbol_atom.remove(.{ .file = .zig_object, .index = sym_index })); - zig_object.symbols_free_list.append(wasm.base.comp.gpa, sym_index) catch {}; - sym.tag = .dead; - } -} - -pub fn updateExports( - zig_object: *ZigObject, - wasm: *Wasm, - pt: Zcu.PerThread, - exported: Zcu.Exported, - export_indices: []const u32, -) !void { - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const nav_index = switch (exported) { - .nav => |nav| nav, - .uav => |uav| { - _ = uav; - @panic("TODO: implement Wasm linker code for exporting a constant value"); - }, - }; - const nav = ip.getNav(nav_index); - const atom_index = try zig_object.getOrCreateAtomForNav(wasm, pt, nav_index); - const nav_info = zig_object.navs.getPtr(nav_index).?; - const atom = wasm.getAtom(atom_index); - const atom_sym = wasm.symbolLocSymbol(atom.symbolLoc()).*; - const gpa = zcu.gpa; - log.debug("Updating exports for decl '{}'", .{nav.name.fmt(ip)}); - - for (export_indices) |export_idx| { - const exp = zcu.all_exports.items[export_idx]; - if (exp.opts.section.toSlice(ip)) |section| { - try zcu.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create( - gpa, - zcu.navSrcLoc(nav_index), - "Unimplemented: ExportOptions.section '{s}'", - .{section}, - )); - continue; - } - - const export_name = try wasm.internString(exp.opts.name.toSlice(ip)); - const sym_index = if (nav_info.@"export"(zig_object, export_name)) |idx| idx else index: { - const sym_index = try zig_object.allocateSymbol(gpa); - try nav_info.appendExport(gpa, sym_index); - break :index sym_index; - }; - - const sym = zig_object.symbol(sym_index); - sym.setGlobal(true); - sym.setUndefined(false); - sym.index = atom_sym.index; - sym.tag = atom_sym.tag; - sym.name = export_name; - - switch (exp.opts.linkage) { - .internal => { - sym.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); - }, - .weak => { - sym.setFlag(.WASM_SYM_BINDING_WEAK); - }, - .strong => {}, // symbols are strong by default - .link_once => { - try zcu.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create( - gpa, - zcu.navSrcLoc(nav_index), - "Unimplemented: LinkOnce", - .{}, - )); - continue; - }, - } - if (exp.opts.visibility == .hidden) { - sym.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); - } - log.debug(" with name '{s}' - {}", .{ wasm.stringSlice(export_name), sym }); - try zig_object.global_syms.put(gpa, export_name, sym_index); - try wasm.symbol_atom.put(gpa, .{ .file = .zig_object, .index = sym_index }, atom_index); - } -} - -pub fn freeNav(zig_object: *ZigObject, wasm: *Wasm, nav_index: InternPool.Nav.Index) void { - const gpa = wasm.base.comp.gpa; - const zcu = wasm.base.comp.zcu.?; - const ip = &zcu.intern_pool; - const nav_info = zig_object.navs.getPtr(nav_index).?; - const atom_index = nav_info.atom; - const atom = wasm.getAtomPtr(atom_index); - zig_object.symbols_free_list.append(gpa, atom.sym_index) catch {}; - for (nav_info.exports.items) |exp_sym_index| { - const exp_sym = zig_object.symbol(exp_sym_index); - exp_sym.tag = .dead; - zig_object.symbols_free_list.append(exp_sym_index) catch {}; - } - nav_info.exports.deinit(gpa); - std.debug.assert(zig_object.navs.remove(nav_index)); - const sym = &zig_object.symbols.items[atom.sym_index]; - for (atom.locals.items) |local_atom_index| { - const local_atom = wasm.getAtom(local_atom_index); - const local_symbol = &zig_object.symbols.items[local_atom.sym_index]; - std.debug.assert(local_symbol.tag == .data); - zig_object.symbols_free_list.append(gpa, local_atom.sym_index) catch {}; - std.debug.assert(wasm.symbol_atom.remove(local_atom.symbolLoc())); - local_symbol.tag = .dead; // also for any local symbol - const segment = &zig_object.segment_info.items[local_atom.sym_index]; - gpa.free(segment.name); - segment.name = &.{}; // Ensure no accidental double free - } - - const nav = ip.getNav(nav_index); - if (nav.getExtern(ip) != null) { - std.debug.assert(zig_object.imports.remove(atom.sym_index)); - } - std.debug.assert(wasm.symbol_atom.remove(atom.symbolLoc())); - - // if (wasm.dwarf) |*dwarf| { - // dwarf.freeDecl(decl_index); - // } - - atom.prev = null; - sym.tag = .dead; - if (sym.isGlobal()) { - std.debug.assert(zig_object.global_syms.remove(atom.sym_index)); - } - if (ip.isFunctionType(nav.typeOf(ip))) { - zig_object.functions_free_list.append(gpa, sym.index) catch {}; - std.debug.assert(zig_object.atom_types.remove(atom_index)); - } else { - zig_object.segment_free_list.append(gpa, sym.index) catch {}; - const segment = &zig_object.segment_info.items[sym.index]; - gpa.free(segment.name); - segment.name = &.{}; // Prevent accidental double free - } -} - -fn getTypeIndex(zig_object: *const ZigObject, func_type: std.wasm.Type) ?u32 { - var index: u32 = 0; - while (index < zig_object.func_types.items.len) : (index += 1) { - if (zig_object.func_types.items[index].eql(func_type)) return index; - } - return null; -} - -/// Searches for a matching function signature. When no matching signature is found, -/// a new entry will be made. The value returned is the index of the type within `wasm.func_types`. -pub fn putOrGetFuncType(zig_object: *ZigObject, gpa: std.mem.Allocator, func_type: std.wasm.Type) !u32 { - if (zig_object.getTypeIndex(func_type)) |index| { - return index; - } - - // functype does not exist. - const index: u32 = @intCast(zig_object.func_types.items.len); - const params = try gpa.dupe(std.wasm.Valtype, func_type.params); - errdefer gpa.free(params); - const returns = try gpa.dupe(std.wasm.Valtype, func_type.returns); - errdefer gpa.free(returns); - try zig_object.func_types.append(gpa, .{ - .params = params, - .returns = returns, - }); - return index; -} - -/// Generates an atom containing the global error set' size. -/// This will only be generated if the symbol exists. -fn setupErrorsLen(zig_object: *ZigObject, wasm: *Wasm) !void { - const gpa = wasm.base.comp.gpa; - const sym_index = zig_object.global_syms.get(wasm.preloaded_strings.__zig_errors_len) orelse return; - - const errors_len = 1 + wasm.base.comp.zcu.?.intern_pool.global_error_set.getNamesFromMainThread().len; - // overwrite existing atom if it already exists (maybe the error set has increased) - // if not, allocate a new atom. - const atom_index = if (wasm.symbol_atom.get(.{ .file = .zig_object, .index = sym_index })) |index| blk: { - const atom = wasm.getAtomPtr(index); - atom.prev = .null; - atom.deinit(gpa); - break :blk index; - } else idx: { - // We found a call to __zig_errors_len so make the symbol a local symbol - // and define it, so the final binary or resulting object file will not attempt - // to resolve it. - const sym = zig_object.symbol(sym_index); - sym.setGlobal(false); - sym.setUndefined(false); - sym.tag = .data; - const segment_name = try gpa.dupe(u8, ".rodata.__zig_errors_len"); - sym.index = try zig_object.createDataSegment(gpa, segment_name, .@"2"); - break :idx try wasm.createAtom(sym_index, .zig_object); - }; - - const atom = wasm.getAtomPtr(atom_index); - atom.code.clearRetainingCapacity(); - atom.sym_index = sym_index; - atom.size = 2; - atom.alignment = .@"2"; - try atom.code.writer(gpa).writeInt(u16, @intCast(errors_len), .little); -} - -/// Initializes symbols and atoms for the debug sections -/// Initialization is only done when compiling Zig code. -/// When Zig is invoked as a linker instead, the atoms -/// and symbols come from the object files instead. -pub fn initDebugSections(zig_object: *ZigObject) !void { - if (zig_object.dwarf == null) return; // not compiling Zig code, so no need to pre-initialize debug sections - std.debug.assert(zig_object.debug_info_index == null); - // this will create an Atom and set the index for us. - zig_object.debug_info_atom = try zig_object.createDebugSectionForIndex(&zig_object.debug_info_index, ".debug_info"); - zig_object.debug_line_atom = try zig_object.createDebugSectionForIndex(&zig_object.debug_line_index, ".debug_line"); - zig_object.debug_loc_atom = try zig_object.createDebugSectionForIndex(&zig_object.debug_loc_index, ".debug_loc"); - zig_object.debug_abbrev_atom = try zig_object.createDebugSectionForIndex(&zig_object.debug_abbrev_index, ".debug_abbrev"); - zig_object.debug_ranges_atom = try zig_object.createDebugSectionForIndex(&zig_object.debug_ranges_index, ".debug_ranges"); - zig_object.debug_str_atom = try zig_object.createDebugSectionForIndex(&zig_object.debug_str_index, ".debug_str"); - zig_object.debug_pubnames_atom = try zig_object.createDebugSectionForIndex(&zig_object.debug_pubnames_index, ".debug_pubnames"); - zig_object.debug_pubtypes_atom = try zig_object.createDebugSectionForIndex(&zig_object.debug_pubtypes_index, ".debug_pubtypes"); -} - -/// From a given index variable, creates a new debug section. -/// This initializes the index, appends a new segment, -/// and finally, creates a managed `Atom`. -pub fn createDebugSectionForIndex(zig_object: *ZigObject, wasm: *Wasm, index: *?u32, name: []const u8) !Atom.Index { - const gpa = wasm.base.comp.gpa; - const new_index: u32 = @intCast(zig_object.segments.items.len); - index.* = new_index; - try zig_object.appendDummySegment(); - - const sym_index = try zig_object.allocateSymbol(gpa); - const atom_index = try wasm.createAtom(sym_index, .zig_object); - const atom = wasm.getAtomPtr(atom_index); - zig_object.symbols.items[sym_index] = .{ - .tag = .section, - .name = try wasm.internString(name), - .index = 0, - .flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL), - }; - - atom.alignment = .@"1"; // debug sections are always 1-byte-aligned - return atom_index; -} - -pub fn updateLineNumber(zig_object: *ZigObject, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void { - if (zig_object.dwarf) |*dw| { - try dw.updateLineNumber(pt.zcu, ti_id); - } -} - -/// Allocates debug atoms into their respective debug sections -/// to merge them with maybe-existing debug atoms from object files. -fn allocateDebugAtoms(zig_object: *ZigObject) !void { - if (zig_object.dwarf == null) return; - - const allocAtom = struct { - fn f(ctx: *ZigObject, maybe_index: *?u32, atom_index: Atom.Index) !void { - const index = maybe_index.* orelse idx: { - const index = @as(u32, @intCast(ctx.segments.items.len)); - try ctx.appendDummySegment(); - maybe_index.* = index; - break :idx index; - }; - const atom = ctx.getAtomPtr(atom_index); - atom.size = @as(u32, @intCast(atom.code.items.len)); - ctx.symbols.items[atom.sym_index].index = index; - try ctx.appendAtomAtIndex(index, atom_index); - } - }.f; - - try allocAtom(zig_object, &zig_object.debug_info_index, zig_object.debug_info_atom.?); - try allocAtom(zig_object, &zig_object.debug_line_index, zig_object.debug_line_atom.?); - try allocAtom(zig_object, &zig_object.debug_loc_index, zig_object.debug_loc_atom.?); - try allocAtom(zig_object, &zig_object.debug_str_index, zig_object.debug_str_atom.?); - try allocAtom(zig_object, &zig_object.debug_ranges_index, zig_object.debug_ranges_atom.?); - try allocAtom(zig_object, &zig_object.debug_abbrev_index, zig_object.debug_abbrev_atom.?); - try allocAtom(zig_object, &zig_object.debug_pubnames_index, zig_object.debug_pubnames_atom.?); - try allocAtom(zig_object, &zig_object.debug_pubtypes_index, zig_object.debug_pubtypes_atom.?); -} - -/// For the given `decl_index`, stores the corresponding type representing the function signature. -/// Asserts declaration has an associated `Atom`. -/// Returns the index into the list of types. -pub fn storeDeclType(zig_object: *ZigObject, gpa: std.mem.Allocator, nav_index: InternPool.Nav.Index, func_type: std.wasm.Type) !u32 { - const nav_info = zig_object.navs.get(nav_index).?; - const index = try zig_object.putOrGetFuncType(gpa, func_type); - try zig_object.atom_types.put(gpa, nav_info.atom, index); - return index; -} - -/// The symbols in ZigObject are already represented by an atom as we need to store its data. -/// So rather than creating a new Atom and returning its index, we use this opportunity to scan -/// its relocations and create any GOT symbols or function table indexes it may require. -pub fn parseSymbolIntoAtom(zig_object: *ZigObject, wasm: *Wasm, index: Symbol.Index) !Atom.Index { - const gpa = wasm.base.comp.gpa; - const loc: Wasm.SymbolLoc = .{ .file = .zig_object, .index = index }; - const atom_index = wasm.symbol_atom.get(loc).?; - const final_index = try wasm.getMatchingSegment(.zig_object, index); - try wasm.appendAtomAtIndex(final_index, atom_index); - const atom = wasm.getAtom(atom_index); - for (atom.relocs.items) |reloc| { - const reloc_index: Symbol.Index = @enumFromInt(reloc.index); - switch (reloc.relocation_type) { - .R_WASM_TABLE_INDEX_I32, - .R_WASM_TABLE_INDEX_I64, - .R_WASM_TABLE_INDEX_SLEB, - .R_WASM_TABLE_INDEX_SLEB64, - => { - try wasm.function_table.put(gpa, .{ - .file = .zig_object, - .index = reloc_index, - }, 0); - }, - .R_WASM_GLOBAL_INDEX_I32, - .R_WASM_GLOBAL_INDEX_LEB, - => { - const sym = zig_object.symbol(reloc_index); - if (sym.tag != .global) { - try wasm.got_symbols.append(gpa, .{ - .file = .zig_object, - .index = reloc_index, - }); - } - }, - else => {}, - } - } - return atom_index; -} - -/// Creates a new Wasm function with a given symbol name and body. -/// Returns the symbol index of the new function. -pub fn createFunction( - zig_object: *ZigObject, - wasm: *Wasm, - symbol_name: []const u8, - func_ty: std.wasm.Type, - function_body: *std.ArrayList(u8), - relocations: *std.ArrayList(Wasm.Relocation), -) !Symbol.Index { - const gpa = wasm.base.comp.gpa; - const sym_index = try zig_object.allocateSymbol(gpa); - const sym = zig_object.symbol(sym_index); - sym.tag = .function; - sym.name = try wasm.internString(symbol_name); - const type_index = try zig_object.putOrGetFuncType(gpa, func_ty); - sym.index = try zig_object.appendFunction(gpa, .{ .type_index = type_index }); - - const atom_index = try wasm.createAtom(sym_index, .zig_object); - const atom = wasm.getAtomPtr(atom_index); - atom.size = @intCast(function_body.items.len); - atom.code = function_body.moveToUnmanaged(); - atom.relocs = relocations.moveToUnmanaged(); - - try zig_object.synthetic_functions.append(gpa, atom_index); - return sym_index; -} - -/// Appends a new `std.wasm.Func` to the list of functions and returns its index. -fn appendFunction(zig_object: *ZigObject, gpa: std.mem.Allocator, func: std.wasm.Func) !u32 { - const index: u32 = if (zig_object.functions_free_list.popOrNull()) |idx| - idx - else idx: { - const len: u32 = @intCast(zig_object.functions.items.len); - _ = try zig_object.functions.addOne(gpa); - break :idx len; - }; - zig_object.functions.items[index] = func; - - return index; -} - -pub fn flushModule(zig_object: *ZigObject, wasm: *Wasm, tid: Zcu.PerThread.Id) !void { - try zig_object.populateErrorNameTable(wasm, tid); - try zig_object.setupErrorsLen(wasm); -} - -const build_options = @import("build_options"); -const builtin = @import("builtin"); -const codegen = @import("../../codegen.zig"); -const link = @import("../../link.zig"); -const log = std.log.scoped(.zig_object); -const std = @import("std"); -const Path = std.Build.Cache.Path; - -const Air = @import("../../Air.zig"); -const Atom = Wasm.Atom; -const Dwarf = @import("../Dwarf.zig"); -const InternPool = @import("../../InternPool.zig"); -const Liveness = @import("../../Liveness.zig"); -const Zcu = @import("../../Zcu.zig"); -const Symbol = @import("Symbol.zig"); -const Type = @import("../../Type.zig"); -const Value = @import("../../Value.zig"); -const Wasm = @import("../Wasm.zig"); -const AnalUnit = InternPool.AnalUnit; -const ZigObject = @This(); diff --git a/src/main.zig b/src/main.zig index 7bb51bbd8ea9..b17a753b2bdc 100644 --- a/src/main.zig +++ b/src/main.zig @@ -75,6 +75,10 @@ pub fn fatal(comptime format: []const u8, args: anytype) noreturn { process.exit(1); } +/// Shaming all the locations that inappropriately use an O(N) search algorithm. +/// Please delete this and fix the compilation errors! +pub const @"bad O(N)" = void; + const normal_usage = \\Usage: zig [command] [options] \\ diff --git a/src/register_manager.zig b/src/register_manager.zig index c24cda6cf591..48b12a59d2f1 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -14,19 +14,14 @@ const link = @import("link.zig"); const log = std.log.scoped(.register_manager); -pub const AllocateRegistersError = error{ - /// No registers are available anymore +pub const AllocationError = error{ OutOfRegisters, - /// Can happen when spilling an instruction in codegen runs out of - /// memory, so we propagate that error OutOfMemory, - /// Can happen when spilling an instruction in codegen triggers integer - /// overflow, so we propagate that error + /// Compiler was asked to operate on a number larger than supported. Overflow, - /// Can happen when spilling an instruction triggers a codegen - /// error, so we propagate that error + /// Indicates the error is already stored in `failed_codegen` on the Zcu. CodegenFail, -} || link.File.UpdateDebugInfoError; +}; pub fn RegisterManager( comptime Function: type, @@ -281,7 +276,7 @@ pub fn RegisterManager( comptime count: comptime_int, insts: [count]?Air.Inst.Index, register_class: RegisterBitSet, - ) AllocateRegistersError![count]Register { + ) AllocationError![count]Register { comptime assert(count > 0 and count <= tracked_registers.len); var locked_registers = self.locked_registers; @@ -338,7 +333,7 @@ pub fn RegisterManager( self: *Self, inst: ?Air.Inst.Index, register_class: RegisterBitSet, - ) AllocateRegistersError!Register { + ) AllocationError!Register { return (try self.allocRegs(1, .{inst}, register_class))[0]; } @@ -349,7 +344,7 @@ pub fn RegisterManager( self: *Self, tracked_index: TrackedIndex, inst: ?Air.Inst.Index, - ) AllocateRegistersError!void { + ) AllocationError!void { log.debug("getReg {} for inst {?}", .{ regAtTrackedIndex(tracked_index), inst }); if (!self.isRegIndexFree(tracked_index)) { // Move the instruction that was previously there to a @@ -362,7 +357,7 @@ pub fn RegisterManager( } self.getRegIndexAssumeFree(tracked_index, inst); } - pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocateRegistersError!void { + pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocationError!void { log.debug("getting reg: {}", .{reg}); return self.getRegIndex(indexOfRegIntoTracked(reg) orelse return, inst); } @@ -370,7 +365,7 @@ pub fn RegisterManager( self: *Self, comptime reg: Register, inst: ?Air.Inst.Index, - ) AllocateRegistersError!void { + ) AllocationError!void { return self.getRegIndex((comptime indexOfRegIntoTracked(reg)) orelse return, inst); } diff --git a/test/behavior.zig b/test/behavior.zig index f5ad19a9215e..e0e07b24cdc5 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -31,8 +31,6 @@ test { _ = @import("behavior/error.zig"); _ = @import("behavior/eval.zig"); _ = @import("behavior/export_builtin.zig"); - _ = @import("behavior/export_self_referential_type_info.zig"); - _ = @import("behavior/extern.zig"); _ = @import("behavior/field_parent_ptr.zig"); _ = @import("behavior/floatop.zig"); _ = @import("behavior/fn.zig"); @@ -45,7 +43,6 @@ test { _ = @import("behavior/hasfield.zig"); _ = @import("behavior/if.zig"); _ = @import("behavior/import.zig"); - _ = @import("behavior/import_c_keywords.zig"); _ = @import("behavior/incomplete_struct_param_tld.zig"); _ = @import("behavior/inline_switch.zig"); _ = @import("behavior/int128.zig"); @@ -127,6 +124,16 @@ test { { _ = @import("behavior/export_keyword.zig"); } + + if (!builtin.cpu.arch.isWasm()) { + // Due to lack of import/export of global support + // (https://github.com/ziglang/zig/issues/4866), these tests correctly + // cause linker errors, since a data symbol cannot be exported when + // building an executable. + _ = @import("behavior/export_self_referential_type_info.zig"); + _ = @import("behavior/extern.zig"); + _ = @import("behavior/import_c_keywords.zig"); + } } // This bug only repros in the root file diff --git a/test/behavior/export_builtin.zig b/test/behavior/export_builtin.zig index 6ca10376a127..bd53a1df9314 100644 --- a/test/behavior/export_builtin.zig +++ b/test/behavior/export_builtin.zig @@ -6,6 +6,11 @@ test "exporting enum value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.cpu.arch.isWasm()) { + // https://github.com/ziglang/zig/issues/4866 + return error.SkipZigTest; + } + const S = struct { const E = enum(c_int) { one, two }; const e: E = .two; @@ -33,6 +38,11 @@ test "exporting using namespace access" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.cpu.arch.isWasm()) { + // https://github.com/ziglang/zig/issues/4866 + return error.SkipZigTest; + } + const S = struct { const Inner = struct { const x: u32 = 5; @@ -46,7 +56,6 @@ test "exporting using namespace access" { } test "exporting comptime-known value" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and (builtin.target.ofmt != .elf and @@ -56,6 +65,11 @@ test "exporting comptime-known value" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.cpu.arch.isWasm()) { + // https://github.com/ziglang/zig/issues/4866 + return error.SkipZigTest; + } + const x: u32 = 10; @export(&x, .{ .name = "exporting_comptime_known_value_foo" }); const S = struct { diff --git a/test/incremental/add_decl b/test/incremental/add_decl index 6b3a0dad8497..e3444220f86b 100644 --- a/test/incremental/add_decl +++ b/test/incremental/add_decl @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const std = @import("std"); diff --git a/test/incremental/add_decl_namespaced b/test/incremental/add_decl_namespaced index 48ed5cfd2ed9..9212a0da5865 100644 --- a/test/incremental/add_decl_namespaced +++ b/test/incremental/add_decl_namespaced @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const std = @import("std"); diff --git a/test/incremental/change_generic_line_number b/test/incremental/change_generic_line_number index e59723306dcb..bed4372b3712 100644 --- a/test/incremental/change_generic_line_number +++ b/test/incremental/change_generic_line_number @@ -1,4 +1,5 @@ #target=x86_64-linux-selfhosted +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const std = @import("std"); diff --git a/test/incremental/change_line_number b/test/incremental/change_line_number index c272bcb5dfa5..887e5ffd21d7 100644 --- a/test/incremental/change_line_number +++ b/test/incremental/change_line_number @@ -1,4 +1,5 @@ #target=x86_64-linux-selfhosted +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const std = @import("std"); diff --git a/test/incremental/change_shift_op b/test/incremental/change_shift_op index bd88a70def9e..bface3a3838d 100644 --- a/test/incremental/change_shift_op +++ b/test/incremental/change_shift_op @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig pub fn main() !void { diff --git a/test/incremental/change_struct_same_fields b/test/incremental/change_struct_same_fields index f742bab87031..650bd8c9387c 100644 --- a/test/incremental/change_struct_same_fields +++ b/test/incremental/change_struct_same_fields @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const S = extern struct { x: u8, y: u8 }; diff --git a/test/incremental/compile_error_then_log b/test/incremental/compile_error_then_log index 00ccef9290d1..9ab844bb4aa3 100644 --- a/test/incremental/compile_error_then_log +++ b/test/incremental/compile_error_then_log @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version with compile error #file=main.zig comptime { diff --git a/test/incremental/delete_comptime_decls b/test/incremental/delete_comptime_decls index 03ddc6812857..45c77048eb4f 100644 --- a/test/incremental/delete_comptime_decls +++ b/test/incremental/delete_comptime_decls @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig pub fn main() void {} diff --git a/test/incremental/fix_astgen_failure b/test/incremental/fix_astgen_failure index 2298bc5248f6..a57b1ebde321 100644 --- a/test/incremental/fix_astgen_failure +++ b/test/incremental/fix_astgen_failure @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version with error #file=main.zig pub fn main() !void { diff --git a/test/incremental/hello b/test/incremental/hello index 3526b47f7cb4..d1bc8760711c 100644 --- a/test/incremental/hello +++ b/test/incremental/hello @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const std = @import("std"); diff --git a/test/incremental/modify_inline_fn b/test/incremental/modify_inline_fn index cd5361fb1776..726b2ca22ad6 100644 --- a/test/incremental/modify_inline_fn +++ b/test/incremental/modify_inline_fn @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const std = @import("std"); diff --git a/test/incremental/move_src b/test/incremental/move_src index 908135485cac..3e935134305a 100644 --- a/test/incremental/move_src +++ b/test/incremental/move_src @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const std = @import("std"); diff --git a/test/incremental/recursive_function_becomes_non_recursive b/test/incremental/recursive_function_becomes_non_recursive index e6f27bf2b416..2ec483e3e5b4 100644 --- a/test/incremental/recursive_function_becomes_non_recursive +++ b/test/incremental/recursive_function_becomes_non_recursive @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig pub fn main() !void { diff --git a/test/incremental/remove_enum_field b/test/incremental/remove_enum_field index 3a882ae0f1b8..a1e5e20fd33f 100644 --- a/test/incremental/remove_enum_field +++ b/test/incremental/remove_enum_field @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const MyEnum = enum(u8) { diff --git a/test/incremental/remove_invalid_union_backing_enum b/test/incremental/remove_invalid_union_backing_enum index ded6304531cf..4308899f9aef 100644 --- a/test/incremental/remove_invalid_union_backing_enum +++ b/test/incremental/remove_invalid_union_backing_enum @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const E = enum { a, b, c }; diff --git a/test/incremental/temporary_parse_error b/test/incremental/temporary_parse_error index 4668c53b32f0..675232ea94bb 100644 --- a/test/incremental/temporary_parse_error +++ b/test/incremental/temporary_parse_error @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const std = @import("std"); diff --git a/test/incremental/type_becomes_comptime_only b/test/incremental/type_becomes_comptime_only index 2da31ec5f2ce..3bcae1cd2112 100644 --- a/test/incremental/type_becomes_comptime_only +++ b/test/incremental/type_becomes_comptime_only @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const SomeType = u32; diff --git a/test/incremental/unreferenced_error b/test/incremental/unreferenced_error index 3dfe0ab758e8..29a9a34d9782 100644 --- a/test/incremental/unreferenced_error +++ b/test/incremental/unreferenced_error @@ -1,6 +1,7 @@ #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted #update=initial version #file=main.zig const std = @import("std"); diff --git a/test/link/build.zig.zon b/test/link/build.zig.zon index d304212c051a..16bba08c4e01 100644 --- a/test/link/build.zig.zon +++ b/test/link/build.zig.zon @@ -24,9 +24,6 @@ .wasm_basic_features = .{ .path = "wasm/basic-features", }, - .wasm_bss = .{ - .path = "wasm/bss", - }, .wasm_export = .{ .path = "wasm/export", }, @@ -48,9 +45,6 @@ .wasm_producers = .{ .path = "wasm/producers", }, - .wasm_segments = .{ - .path = "wasm/segments", - }, .wasm_shared_memory = .{ .path = "wasm/shared-memory", }, diff --git a/test/link/wasm/archive/build.zig b/test/link/wasm/archive/build.zig index e57eb25fc663..4606f1d7cdb7 100644 --- a/test/link/wasm/archive/build.zig +++ b/test/link/wasm/archive/build.zig @@ -1,7 +1,5 @@ const std = @import("std"); -pub const requires_stage2 = true; - pub fn build(b: *std.Build) void { const test_step = b.step("test", "Test it"); b.default_step = test_step; diff --git a/test/link/wasm/basic-features/build.zig b/test/link/wasm/basic-features/build.zig index 1eaeeb5196ae..d8c854c407ce 100644 --- a/test/link/wasm/basic-features/build.zig +++ b/test/link/wasm/basic-features/build.zig @@ -1,7 +1,5 @@ const std = @import("std"); -pub const requires_stage2 = true; - pub fn build(b: *std.Build) void { // Library with explicitly set cpu features const lib = b.addExecutable(.{ diff --git a/test/link/wasm/bss/build.zig b/test/link/wasm/bss/build.zig deleted file mode 100644 index d73c7ed75784..000000000000 --- a/test/link/wasm/bss/build.zig +++ /dev/null @@ -1,95 +0,0 @@ -const std = @import("std"); - -pub const requires_stage2 = true; - -pub fn build(b: *std.Build) void { - const test_step = b.step("test", "Test"); - b.default_step = test_step; - - add(b, test_step, .Debug, true); - add(b, test_step, .ReleaseFast, false); - add(b, test_step, .ReleaseSmall, false); - add(b, test_step, .ReleaseSafe, true); -} - -fn add(b: *std.Build, test_step: *std.Build.Step, optimize_mode: std.builtin.OptimizeMode, is_safe: bool) void { - { - const lib = b.addExecutable(.{ - .name = "lib", - .root_module = b.createModule(.{ - .root_source_file = b.path("lib.zig"), - .target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }), - .optimize = optimize_mode, - .strip = false, - }), - }); - lib.entry = .disabled; - lib.use_llvm = false; - lib.use_lld = false; - // to make sure the bss segment is emitted, we must import memory - lib.import_memory = true; - lib.link_gc_sections = false; - - const check_lib = lib.checkObject(); - - // since we import memory, make sure it exists with the correct naming - check_lib.checkInHeaders(); - check_lib.checkExact("Section import"); - check_lib.checkExact("entries 1"); - check_lib.checkExact("module env"); // default module name is "env" - check_lib.checkExact("name memory"); // as per linker specification - - // since we are importing memory, ensure it's not exported - check_lib.checkInHeaders(); - check_lib.checkNotPresent("Section export"); - - // validate the name of the stack pointer - check_lib.checkInHeaders(); - check_lib.checkExact("Section custom"); - check_lib.checkExact("type data_segment"); - check_lib.checkExact("names 2"); - check_lib.checkExact("index 0"); - check_lib.checkExact("name .rodata"); - // for safe optimization modes `undefined` is stored in data instead of bss. - if (is_safe) { - check_lib.checkExact("index 1"); - check_lib.checkExact("name .data"); - check_lib.checkNotPresent("name .bss"); - } else { - check_lib.checkExact("index 1"); // bss section always last - check_lib.checkExact("name .bss"); - } - test_step.dependOn(&check_lib.step); - } - - // verify zero'd declaration is stored in bss for all optimization modes. - { - const lib = b.addExecutable(.{ - .name = "lib", - .root_module = b.createModule(.{ - .root_source_file = b.path("lib2.zig"), - .target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }), - .optimize = optimize_mode, - .strip = false, - }), - }); - lib.entry = .disabled; - lib.use_llvm = false; - lib.use_lld = false; - // to make sure the bss segment is emitted, we must import memory - lib.import_memory = true; - lib.link_gc_sections = false; - - const check_lib = lib.checkObject(); - check_lib.checkInHeaders(); - check_lib.checkExact("Section custom"); - check_lib.checkExact("type data_segment"); - check_lib.checkExact("names 2"); - check_lib.checkExact("index 0"); - check_lib.checkExact("name .rodata"); - check_lib.checkExact("index 1"); - check_lib.checkExact("name .bss"); - - test_step.dependOn(&check_lib.step); - } -} diff --git a/test/link/wasm/bss/lib.zig b/test/link/wasm/bss/lib.zig deleted file mode 100644 index c1691c608e27..000000000000 --- a/test/link/wasm/bss/lib.zig +++ /dev/null @@ -1,5 +0,0 @@ -pub var bss: u32 = undefined; - -export fn foo() void { - _ = bss; -} diff --git a/test/link/wasm/bss/lib2.zig b/test/link/wasm/bss/lib2.zig deleted file mode 100644 index 9f43128880b0..000000000000 --- a/test/link/wasm/bss/lib2.zig +++ /dev/null @@ -1,5 +0,0 @@ -pub var bss: u32 = 0; - -export fn foo() void { - _ = bss; -} diff --git a/test/link/wasm/export-data/build.zig b/test/link/wasm/export-data/build.zig index 726d6caba553..865017a22f8e 100644 --- a/test/link/wasm/export-data/build.zig +++ b/test/link/wasm/export-data/build.zig @@ -4,48 +4,24 @@ pub fn build(b: *std.Build) void { const test_step = b.step("test", "Test"); b.default_step = test_step; - if (@import("builtin").os.tag == .windows) { - // TODO: Fix open handle in wasm-linker refraining rename from working on Windows. - return; - } - const lib = b.addExecutable(.{ .name = "lib", .root_module = b.createModule(.{ .root_source_file = b.path("lib.zig"), - .optimize = .ReleaseSafe, // to make the output deterministic in address positions + .optimize = .Debug, .target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }), }), }); lib.entry = .disabled; lib.use_lld = false; lib.root_module.export_symbol_names = &.{ "foo", "bar" }; - lib.global_base = 0; // put data section at address 0 to make data symbols easier to parse - - const check_lib = lib.checkObject(); - - check_lib.checkInHeaders(); - check_lib.checkExact("Section global"); - check_lib.checkExact("entries 3"); - check_lib.checkExact("type i32"); // stack pointer so skip other fields - check_lib.checkExact("type i32"); - check_lib.checkExact("mutable false"); - check_lib.checkExtract("i32.const {foo_address}"); - check_lib.checkExact("type i32"); - check_lib.checkExact("mutable false"); - check_lib.checkExtract("i32.const {bar_address}"); - check_lib.checkComputeCompare("foo_address", .{ .op = .eq, .value = .{ .literal = 4 } }); - check_lib.checkComputeCompare("bar_address", .{ .op = .eq, .value = .{ .literal = 0 } }); - - check_lib.checkInHeaders(); - check_lib.checkExact("Section export"); - check_lib.checkExact("entries 3"); - check_lib.checkExact("name foo"); - check_lib.checkExact("kind global"); - check_lib.checkExact("index 1"); - check_lib.checkExact("name bar"); - check_lib.checkExact("kind global"); - check_lib.checkExact("index 2"); + // Object being linked has neither functions nor globals named "foo" or "bar" and + // so these names correctly fail to be exported when creating an executable. + lib.expect_errors = .{ .exact = &.{ + "error: manually specified export name 'foo' undefined", + "error: manually specified export name 'bar' undefined", + } }; + _ = lib.getEmittedBin(); - test_step.dependOn(&check_lib.step); + test_step.dependOn(&lib.step); } diff --git a/test/link/wasm/export/build.zig b/test/link/wasm/export/build.zig index 5c40612ca29b..cf2c75e3b42a 100644 --- a/test/link/wasm/export/build.zig +++ b/test/link/wasm/export/build.zig @@ -1,22 +1,17 @@ const std = @import("std"); -pub const requires_stage2 = true; - pub fn build(b: *std.Build) void { const test_step = b.step("test", "Test it"); b.default_step = test_step; add(b, test_step, .Debug); - add(b, test_step, .ReleaseFast); - add(b, test_step, .ReleaseSmall); - add(b, test_step, .ReleaseSafe); } fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void { const no_export = b.addExecutable(.{ .name = "no-export", .root_module = b.createModule(.{ - .root_source_file = b.path("main.zig"), + .root_source_file = b.path("main-hidden.zig"), .optimize = optimize, .target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }), }), @@ -41,7 +36,7 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize const force_export = b.addExecutable(.{ .name = "force", .root_module = b.createModule(.{ - .root_source_file = b.path("main.zig"), + .root_source_file = b.path("main-hidden.zig"), .optimize = optimize, .target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }), }), diff --git a/test/link/wasm/export/main-hidden.zig b/test/link/wasm/export/main-hidden.zig new file mode 100644 index 000000000000..12589d064c75 --- /dev/null +++ b/test/link/wasm/export/main-hidden.zig @@ -0,0 +1,4 @@ +fn foo() callconv(.c) void {} +comptime { + @export(&foo, .{ .name = "foo", .visibility = .hidden }); +} diff --git a/test/link/wasm/extern/build.zig b/test/link/wasm/extern/build.zig index 236f01fd886e..4976c97b316a 100644 --- a/test/link/wasm/extern/build.zig +++ b/test/link/wasm/extern/build.zig @@ -1,15 +1,10 @@ const std = @import("std"); -pub const requires_stage2 = true; - pub fn build(b: *std.Build) void { const test_step = b.step("test", "Test it"); b.default_step = test_step; add(b, test_step, .Debug); - add(b, test_step, .ReleaseFast); - add(b, test_step, .ReleaseSmall); - add(b, test_step, .ReleaseSafe); } fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void { diff --git a/test/link/wasm/function-table/build.zig b/test/link/wasm/function-table/build.zig index 7fd306285cad..f922b06aecb3 100644 --- a/test/link/wasm/function-table/build.zig +++ b/test/link/wasm/function-table/build.zig @@ -1,32 +1,13 @@ const std = @import("std"); -pub const requires_stage2 = true; - pub fn build(b: *std.Build) void { const test_step = b.step("test", "Test it"); b.default_step = test_step; add(b, test_step, .Debug); - add(b, test_step, .ReleaseFast); - add(b, test_step, .ReleaseSmall); - add(b, test_step, .ReleaseSafe); } fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void { - const import_table = b.addExecutable(.{ - .name = "import_table", - .root_module = b.createModule(.{ - .root_source_file = b.path("lib.zig"), - .target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }), - .optimize = optimize, - }), - }); - import_table.entry = .disabled; - import_table.use_llvm = false; - import_table.use_lld = false; - import_table.import_table = true; - import_table.link_gc_sections = false; - const export_table = b.addExecutable(.{ .name = "export_table", .root_module = b.createModule(.{ @@ -54,24 +35,12 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize regular_table.use_lld = false; regular_table.link_gc_sections = false; // Ensure function table is not empty - const check_import = import_table.checkObject(); const check_export = export_table.checkObject(); const check_regular = regular_table.checkObject(); - check_import.checkInHeaders(); - check_import.checkExact("Section import"); - check_import.checkExact("entries 1"); - check_import.checkExact("module env"); - check_import.checkExact("name __indirect_function_table"); - check_import.checkExact("kind table"); - check_import.checkExact("type funcref"); - check_import.checkExact("min 1"); // 1 function pointer - check_import.checkNotPresent("max"); // when importing, we do not provide a max - check_import.checkNotPresent("Section table"); // we're importing it - check_export.checkInHeaders(); check_export.checkExact("Section export"); - check_export.checkExact("entries 2"); + check_export.checkExact("entries 3"); check_export.checkExact("name __indirect_function_table"); // as per linker specification check_export.checkExact("kind table"); @@ -89,7 +58,6 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize check_regular.checkExact("i32.const 1"); // we want to start function indexes at 1 check_regular.checkExact("indexes 1"); // 1 function pointer - test_step.dependOn(&check_import.step); test_step.dependOn(&check_export.step); test_step.dependOn(&check_regular.step); } diff --git a/test/link/wasm/infer-features/build.zig b/test/link/wasm/infer-features/build.zig index 83aab77841ff..60993af18349 100644 --- a/test/link/wasm/infer-features/build.zig +++ b/test/link/wasm/infer-features/build.zig @@ -1,7 +1,5 @@ const std = @import("std"); -pub const requires_stage2 = true; - pub fn build(b: *std.Build) void { // Wasm Object file which we will use to infer the features from const c_obj = b.addObject(.{ @@ -37,27 +35,10 @@ pub fn build(b: *std.Build) void { lib.use_lld = false; lib.root_module.addObject(c_obj); - // Verify the result contains the features from the C Object file. - const check = lib.checkObject(); - check.checkInHeaders(); - check.checkExact("name target_features"); - check.checkExact("features 14"); - check.checkExact("+ atomics"); - check.checkExact("+ bulk-memory"); - check.checkExact("+ exception-handling"); - check.checkExact("+ extended-const"); - check.checkExact("+ half-precision"); - check.checkExact("+ multimemory"); - check.checkExact("+ multivalue"); - check.checkExact("+ mutable-globals"); - check.checkExact("+ nontrapping-fptoint"); - check.checkExact("+ reference-types"); - check.checkExact("+ relaxed-simd"); - check.checkExact("+ sign-ext"); - check.checkExact("+ simd128"); - check.checkExact("+ tail-call"); + lib.expect_errors = .{ .contains = "error: object requires atomics but specified target features exclude atomics" }; + _ = lib.getEmittedBin(); const test_step = b.step("test", "Run linker test"); - test_step.dependOn(&check.step); + test_step.dependOn(&lib.step); b.default_step = test_step; } diff --git a/test/link/wasm/producers/build.zig b/test/link/wasm/producers/build.zig index e31b07ce8b3b..8989103fc3f8 100644 --- a/test/link/wasm/producers/build.zig +++ b/test/link/wasm/producers/build.zig @@ -1,8 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); -pub const requires_stage2 = true; - pub fn build(b: *std.Build) void { const test_step = b.step("test", "Test it"); b.default_step = test_step; diff --git a/test/link/wasm/segments/build.zig b/test/link/wasm/segments/build.zig deleted file mode 100644 index c2c62a3b8867..000000000000 --- a/test/link/wasm/segments/build.zig +++ /dev/null @@ -1,48 +0,0 @@ -const std = @import("std"); - -pub const requires_stage2 = true; - -pub fn build(b: *std.Build) void { - const test_step = b.step("test", "Test it"); - b.default_step = test_step; - - add(b, test_step, .Debug); - add(b, test_step, .ReleaseFast); - add(b, test_step, .ReleaseSmall); - add(b, test_step, .ReleaseSafe); -} - -fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void { - const lib = b.addExecutable(.{ - .name = "lib", - .root_module = b.createModule(.{ - .root_source_file = b.path("lib.zig"), - .target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }), - .optimize = optimize, - .strip = false, - }), - }); - lib.entry = .disabled; - lib.use_llvm = false; - lib.use_lld = false; - lib.link_gc_sections = false; // so data is not garbage collected and we can verify data section - b.installArtifact(lib); - - const check_lib = lib.checkObject(); - check_lib.checkInHeaders(); - check_lib.checkExact("Section data"); - check_lib.checkExact("entries 2"); // rodata & data, no bss because we're exporting memory - - check_lib.checkInHeaders(); - check_lib.checkExact("Section custom"); - check_lib.checkInHeaders(); - check_lib.checkExact("name name"); // names custom section - check_lib.checkInHeaders(); - check_lib.checkExact("type data_segment"); - check_lib.checkExact("names 2"); - check_lib.checkExact("index 0"); - check_lib.checkExact("name .rodata"); - check_lib.checkExact("index 1"); - check_lib.checkExact("name .data"); - test_step.dependOn(&check_lib.step); -} diff --git a/test/link/wasm/segments/lib.zig b/test/link/wasm/segments/lib.zig deleted file mode 100644 index 65bf7e32a284..000000000000 --- a/test/link/wasm/segments/lib.zig +++ /dev/null @@ -1,9 +0,0 @@ -pub const rodata: u32 = 5; -pub var data: u32 = 10; -pub var bss: u32 = undefined; - -export fn foo() void { - _ = rodata; - _ = data; - _ = bss; -} diff --git a/test/link/wasm/shared-memory/build.zig b/test/link/wasm/shared-memory/build.zig index 7725454f3f56..02dc08a282c7 100644 --- a/test/link/wasm/shared-memory/build.zig +++ b/test/link/wasm/shared-memory/build.zig @@ -6,8 +6,6 @@ pub fn build(b: *std.Build) void { add(b, test_step, .Debug); add(b, test_step, .ReleaseFast); - add(b, test_step, .ReleaseSmall); - add(b, test_step, .ReleaseSafe); } fn add(b: *std.Build, test_step: *std.Build.Step, optimize_mode: std.builtin.OptimizeMode) void { @@ -45,6 +43,7 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize_mode: std.builtin.Opt check_exe.checkInHeaders(); check_exe.checkExact("Section export"); check_exe.checkExact("entries 2"); + check_exe.checkExact("name foo"); check_exe.checkExact("name memory"); // ensure we also export memory again // This section *must* be emit as the start function is set to the index @@ -71,23 +70,27 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize_mode: std.builtin.Opt check_exe.checkExact("type function"); if (optimize_mode == .Debug) { check_exe.checkExact("name __wasm_init_memory"); + check_exe.checkExact("name __wasm_init_tls"); } - check_exe.checkExact("name __wasm_init_tls"); check_exe.checkExact("type global"); // In debug mode the symbol __tls_base is resolved to an undefined symbol // from the object file, hence its placement differs than in release modes // where the entire tls segment is optimized away, and tls_base will have // its original position. - check_exe.checkExact("name __tls_base"); - check_exe.checkExact("name __tls_size"); - check_exe.checkExact("name __tls_align"); - - check_exe.checkExact("type data_segment"); if (optimize_mode == .Debug) { + check_exe.checkExact("name __tls_base"); + check_exe.checkExact("name __tls_size"); + check_exe.checkExact("name __tls_align"); + + check_exe.checkExact("type data_segment"); check_exe.checkExact("names 1"); check_exe.checkExact("index 0"); check_exe.checkExact("name .tdata"); + } else { + check_exe.checkNotPresent("name __tls_base"); + check_exe.checkNotPresent("name __tls_size"); + check_exe.checkNotPresent("name __tls_align"); } test_step.dependOn(&check_exe.step); diff --git a/test/link/wasm/stack_pointer/build.zig b/test/link/wasm/stack_pointer/build.zig index 57ce5ddefdeb..793e3ae94f91 100644 --- a/test/link/wasm/stack_pointer/build.zig +++ b/test/link/wasm/stack_pointer/build.zig @@ -1,7 +1,5 @@ const std = @import("std"); -pub const requires_stage2 = true; - pub fn build(b: *std.Build) void { const test_step = b.step("test", "Test it"); b.default_step = test_step; diff --git a/test/link/wasm/type/build.zig b/test/link/wasm/type/build.zig index aa19d4d40393..063fd779b378 100644 --- a/test/link/wasm/type/build.zig +++ b/test/link/wasm/type/build.zig @@ -1,15 +1,10 @@ const std = @import("std"); -pub const requires_stage2 = true; - pub fn build(b: *std.Build) void { const test_step = b.step("test", "Test it"); b.default_step = test_step; add(b, test_step, .Debug); - add(b, test_step, .ReleaseFast); - add(b, test_step, .ReleaseSmall); - add(b, test_step, .ReleaseSafe); } fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void { diff --git a/test/standalone/test_runner_path/build.zig b/test/standalone/test_runner_path/build.zig index a7b8eaf32136..1666031ba39c 100644 --- a/test/standalone/test_runner_path/build.zig +++ b/test/standalone/test_runner_path/build.zig @@ -1,7 +1,5 @@ const std = @import("std"); -pub const requires_stage2 = true; - pub fn build(b: *std.Build) void { const test_step = b.step("test", "Test the program"); b.default_step = test_step; diff --git a/test/tests.zig b/test/tests.zig index ecbaa2456aa0..306b0d04f016 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -1375,6 +1375,7 @@ const ModuleTestOptions = struct { skip_single_threaded: bool, skip_non_native: bool, skip_libc: bool, + use_llvm: ?bool = null, max_rss: usize = 0, no_builtin: bool = false, build_options: ?*std.Build.Step.Options = null, @@ -1411,6 +1412,10 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step { if (options.skip_single_threaded and test_target.single_threaded == true) continue; + if (options.use_llvm) |use_llvm| { + if (test_target.use_llvm != use_llvm) continue; + } + // TODO get compiler-rt tests passing for self-hosted backends. if ((target.cpu.arch != .x86_64 or target.ofmt != .elf) and test_target.use_llvm == false and mem.eql(u8, options.name, "compiler-rt"))