diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index c772d8e6f950..430a29c9d72e 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -869,8 +869,10 @@ pub noinline fn returnError(st: *StackTrace) void { } pub inline fn addErrRetTraceAddr(st: *StackTrace, addr: usize) void { - st.instruction_addresses[st.index & (st.instruction_addresses.len - 1)] = addr; - st.index +%= 1; + if (st.index < st.instruction_addresses.len) + st.instruction_addresses[st.index] = addr; + + st.index += 1; } const std = @import("std.zig"); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 93216f00586f..21b05249a1f2 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -411,6 +411,14 @@ pub fn writeStackTrace( const return_address = stack_trace.instruction_addresses[frame_index]; try printSourceAtAddress(debug_info, out_stream, return_address - 1, tty_config); } + + if (stack_trace.index > stack_trace.instruction_addresses.len) { + const dropped_frames = stack_trace.index - stack_trace.instruction_addresses.len; + + tty_config.setColor(out_stream, .Bold); + try out_stream.print("({d} additional stack frames skipped...)\n", .{dropped_frames}); + tty_config.setColor(out_stream, .Reset); + } } pub const StackIterator = struct { diff --git a/src/Air.zig b/src/Air.zig index 57479af5908d..3bcbdb8e98ab 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -733,6 +733,10 @@ pub const Inst = struct { /// Uses the `ty_op` field. addrspace_cast, + /// Saves the error return trace index, if any. Otherwise, returns 0. + /// Uses the `ty_pl` field. + save_err_return_trace_index, + pub fn fromCmpOp(op: std.math.CompareOperator, optimized: bool) Tag { switch (op) { .lt => return if (optimized) .cmp_lt_optimized else .cmp_lt, @@ -1179,6 +1183,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .slice_len, .ret_addr, .frame_addr, + .save_err_return_trace_index, => return Type.usize, .wasm_memory_grow => return Type.i32, diff --git a/src/AstGen.zig b/src/AstGen.zig index d04608b30028..07a972eaab9b 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -213,123 +213,149 @@ pub fn deinit(astgen: *AstGen, gpa: Allocator) void { astgen.ref_table.deinit(gpa); } -pub const ResultLoc = union(enum) { - /// The expression is the right-hand side of assignment to `_`. Only the side-effects of the - /// expression should be generated. The result instruction from the expression must - /// be ignored. - discard, - /// The expression has an inferred type, and it will be evaluated as an rvalue. - none, - /// The expression must generate a pointer rather than a value. For example, the left hand side - /// of an assignment uses this kind of result location. - ref, - /// The expression will be coerced into this type, but it will be evaluated as an rvalue. - ty: Zir.Inst.Ref, - /// Same as `ty` but for shift operands. - ty_shift_operand: Zir.Inst.Ref, - /// Same as `ty` but it is guaranteed that Sema will additionally perform the coercion, - /// so no `as` instruction needs to be emitted. - coerced_ty: Zir.Inst.Ref, - /// The expression must store its result into this typed pointer. The result instruction - /// from the expression must be ignored. - ptr: PtrResultLoc, - /// The expression must store its result into this allocation, which has an inferred type. - /// The result instruction from the expression must be ignored. - /// Always an instruction with tag `alloc_inferred`. - inferred_ptr: Zir.Inst.Ref, - /// There is a pointer for the expression to store its result into, however, its type - /// is inferred based on peer type resolution for a `Zir.Inst.Block`. - /// The result instruction from the expression must be ignored. - block_ptr: *GenZir, - - const PtrResultLoc = struct { - inst: Zir.Inst.Ref, - src_node: ?Ast.Node.Index = null, - }; +pub const ResultInfo = struct { + /// The semantics requested for the result location + rl: Loc, - pub const Strategy = struct { - elide_store_to_block_ptr_instructions: bool, - tag: Tag, - - pub const Tag = enum { - /// Both branches will use break_void; result location is used to communicate the - /// result instruction. - break_void, - /// Use break statements to pass the block result value, and call rvalue() at - /// the end depending on rl. Also elide the store_to_block_ptr instructions - /// depending on rl. - break_operand, - }; - }; + /// The "operator" consuming the result location + ctx: Context = .none, - fn strategy(rl: ResultLoc, block_scope: *GenZir) Strategy { - switch (rl) { - // In this branch there will not be any store_to_block_ptr instructions. - .none, .ty, .ty_shift_operand, .coerced_ty, .ref => return .{ - .tag = .break_operand, - .elide_store_to_block_ptr_instructions = false, - }, - .discard => return .{ - .tag = .break_void, - .elide_store_to_block_ptr_instructions = false, - }, - // The pointer got passed through to the sub-expressions, so we will use - // break_void here. - // In this branch there will not be any store_to_block_ptr instructions. - .ptr => return .{ - .tag = .break_void, - .elide_store_to_block_ptr_instructions = false, + /// Turns a `coerced_ty` back into a `ty`. Should be called at branch points + /// such as if and switch expressions. + fn br(ri: ResultInfo) ResultInfo { + return switch (ri.rl) { + .coerced_ty => |ty| .{ + .rl = .{ .ty = ty }, + .ctx = ri.ctx, }, - .inferred_ptr, .block_ptr => { - if (block_scope.rvalue_rl_count == block_scope.break_count) { - // Neither prong of the if consumed the result location, so we can - // use break instructions to create an rvalue. - return .{ - .tag = .break_operand, - .elide_store_to_block_ptr_instructions = true, - }; - } else { - // Allow the store_to_block_ptr instructions to remain so that - // semantic analysis can turn them into bitcasts. - return .{ - .tag = .break_void, - .elide_store_to_block_ptr_instructions = false, - }; - } + else => ri, + }; + } + + fn zirTag(ri: ResultInfo) Zir.Inst.Tag { + switch (ri.rl) { + .ty => return switch (ri.ctx) { + .shift_op => .as_shift_operand, + else => .as_node, }, + else => unreachable, } } - /// Turns a `coerced_ty` back into a `ty`. Should be called at branch points - /// such as if and switch expressions. - fn br(rl: ResultLoc) ResultLoc { - return switch (rl) { - .coerced_ty => |ty| .{ .ty = ty }, - else => rl, + pub const Loc = union(enum) { + /// The expression is the right-hand side of assignment to `_`. Only the side-effects of the + /// expression should be generated. The result instruction from the expression must + /// be ignored. + discard, + /// The expression has an inferred type, and it will be evaluated as an rvalue. + none, + /// The expression must generate a pointer rather than a value. For example, the left hand side + /// of an assignment uses this kind of result location. + ref, + /// The expression will be coerced into this type, but it will be evaluated as an rvalue. + ty: Zir.Inst.Ref, + /// Same as `ty` but it is guaranteed that Sema will additionally perform the coercion, + /// so no `as` instruction needs to be emitted. + coerced_ty: Zir.Inst.Ref, + /// The expression must store its result into this typed pointer. The result instruction + /// from the expression must be ignored. + ptr: PtrResultLoc, + /// The expression must store its result into this allocation, which has an inferred type. + /// The result instruction from the expression must be ignored. + /// Always an instruction with tag `alloc_inferred`. + inferred_ptr: Zir.Inst.Ref, + /// There is a pointer for the expression to store its result into, however, its type + /// is inferred based on peer type resolution for a `Zir.Inst.Block`. + /// The result instruction from the expression must be ignored. + block_ptr: *GenZir, + + const PtrResultLoc = struct { + inst: Zir.Inst.Ref, + src_node: ?Ast.Node.Index = null, }; - } - fn zirTag(rl: ResultLoc) Zir.Inst.Tag { - return switch (rl) { - .ty => .as_node, - .ty_shift_operand => .as_shift_operand, - else => unreachable, + pub const Strategy = struct { + elide_store_to_block_ptr_instructions: bool, + tag: Tag, + + pub const Tag = enum { + /// Both branches will use break_void; result location is used to communicate the + /// result instruction. + break_void, + /// Use break statements to pass the block result value, and call rvalue() at + /// the end depending on rl. Also elide the store_to_block_ptr instructions + /// depending on rl. + break_operand, + }; }; - } + + fn strategy(rl: Loc, block_scope: *GenZir) Strategy { + switch (rl) { + // In this branch there will not be any store_to_block_ptr instructions. + .none, .ty, .coerced_ty, .ref => return .{ + .tag = .break_operand, + .elide_store_to_block_ptr_instructions = false, + }, + .discard => return .{ + .tag = .break_void, + .elide_store_to_block_ptr_instructions = false, + }, + // The pointer got passed through to the sub-expressions, so we will use + // break_void here. + // In this branch there will not be any store_to_block_ptr instructions. + .ptr => return .{ + .tag = .break_void, + .elide_store_to_block_ptr_instructions = false, + }, + .inferred_ptr, .block_ptr => { + if (block_scope.rvalue_rl_count == block_scope.break_count) { + // Neither prong of the if consumed the result location, so we can + // use break instructions to create an rvalue. + return .{ + .tag = .break_operand, + .elide_store_to_block_ptr_instructions = true, + }; + } else { + // Allow the store_to_block_ptr instructions to remain so that + // semantic analysis can turn them into bitcasts. + return .{ + .tag = .break_void, + .elide_store_to_block_ptr_instructions = false, + }; + } + }, + } + } + }; + + pub const Context = enum { + /// The expression is the operand to a return expression. + @"return", + /// The expression is the input to an error-handling operator (if-else, try, or catch). + error_handling_expr, + /// The expression is the right-hand side of a shift operation. + shift_op, + /// The expression is an argument in a function call. + fn_arg, + /// The expression is the right-hand side of an initializer for a `const` variable + const_init, + /// No specific operator in particular. + none, + }; }; -pub const align_rl: ResultLoc = .{ .ty = .u29_type }; -pub const coerced_align_rl: ResultLoc = .{ .coerced_ty = .u29_type }; -pub const bool_rl: ResultLoc = .{ .ty = .bool_type }; -pub const type_rl: ResultLoc = .{ .ty = .type_type }; -pub const coerced_type_rl: ResultLoc = .{ .coerced_ty = .type_type }; +pub const align_ri: ResultInfo = .{ .rl = .{ .ty = .u29_type } }; +pub const coerced_align_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .u29_type } }; +pub const bool_ri: ResultInfo = .{ .rl = .{ .ty = .bool_type } }; +pub const type_ri: ResultInfo = .{ .rl = .{ .ty = .type_type } }; +pub const coerced_type_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .type_type } }; fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const prev_force_comptime = gz.force_comptime; gz.force_comptime = true; defer gz.force_comptime = prev_force_comptime; - return expr(gz, scope, coerced_type_rl, type_node); + return expr(gz, scope, coerced_type_ri, type_node); } fn reachableTypeExpr( @@ -342,24 +368,24 @@ fn reachableTypeExpr( gz.force_comptime = true; defer gz.force_comptime = prev_force_comptime; - return reachableExpr(gz, scope, coerced_type_rl, type_node, reachable_node); + return reachableExpr(gz, scope, coerced_type_ri, type_node, reachable_node); } /// Same as `expr` but fails with a compile error if the result type is `noreturn`. fn reachableExpr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, reachable_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { - return reachableExprComptime(gz, scope, rl, node, reachable_node, false); + return reachableExprComptime(gz, scope, ri, node, reachable_node, false); } fn reachableExprComptime( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, reachable_node: Ast.Node.Index, force_comptime: bool, @@ -368,7 +394,7 @@ fn reachableExprComptime( gz.force_comptime = prev_force_comptime or force_comptime; defer gz.force_comptime = prev_force_comptime; - const result_inst = try expr(gz, scope, rl, node); + const result_inst = try expr(gz, scope, ri, node); if (gz.refIsNoReturn(result_inst)) { try gz.astgen.appendErrorNodeNotes(reachable_node, "unreachable code", .{}, &[_]u32{ try gz.astgen.errNoteNode(node, "control flow is diverted here", .{}), @@ -569,14 +595,14 @@ fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Ins .@"orelse", => {}, } - return expr(gz, scope, .ref, node); + return expr(gz, scope, .{ .rl = .ref }, node); } /// Turn Zig AST into untyped ZIR instructions. /// When `rl` is discard, ptr, inferred_ptr, or inferred_ptr, the /// result instruction can be used to inspect whether it is isNoReturn() but that is it, /// it must otherwise not be used. -fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { +fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); @@ -617,161 +643,161 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr .assign => { try assign(gz, scope, node); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_shl => { try assignShift(gz, scope, node, .shl); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_shl_sat => { try assignShiftSat(gz, scope, node); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_shr => { try assignShift(gz, scope, node, .shr); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_bit_and => { try assignOp(gz, scope, node, .bit_and); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_bit_or => { try assignOp(gz, scope, node, .bit_or); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_bit_xor => { try assignOp(gz, scope, node, .xor); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_div => { try assignOp(gz, scope, node, .div); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_sub => { try assignOp(gz, scope, node, .sub); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_sub_wrap => { try assignOp(gz, scope, node, .subwrap); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_sub_sat => { try assignOp(gz, scope, node, .sub_sat); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_mod => { try assignOp(gz, scope, node, .mod_rem); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_add => { try assignOp(gz, scope, node, .add); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_add_wrap => { try assignOp(gz, scope, node, .addwrap); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_add_sat => { try assignOp(gz, scope, node, .add_sat); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_mul => { try assignOp(gz, scope, node, .mul); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_mul_wrap => { try assignOp(gz, scope, node, .mulwrap); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .assign_mul_sat => { try assignOp(gz, scope, node, .mul_sat); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, // zig fmt: off - .shl => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl), - .shr => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shr), - - .add => return simpleBinOp(gz, scope, rl, node, .add), - .add_wrap => return simpleBinOp(gz, scope, rl, node, .addwrap), - .add_sat => return simpleBinOp(gz, scope, rl, node, .add_sat), - .sub => return simpleBinOp(gz, scope, rl, node, .sub), - .sub_wrap => return simpleBinOp(gz, scope, rl, node, .subwrap), - .sub_sat => return simpleBinOp(gz, scope, rl, node, .sub_sat), - .mul => return simpleBinOp(gz, scope, rl, node, .mul), - .mul_wrap => return simpleBinOp(gz, scope, rl, node, .mulwrap), - .mul_sat => return simpleBinOp(gz, scope, rl, node, .mul_sat), - .div => return simpleBinOp(gz, scope, rl, node, .div), - .mod => return simpleBinOp(gz, scope, rl, node, .mod_rem), - .shl_sat => return simpleBinOp(gz, scope, rl, node, .shl_sat), - - .bit_and => return simpleBinOp(gz, scope, rl, node, .bit_and), - .bit_or => return simpleBinOp(gz, scope, rl, node, .bit_or), - .bit_xor => return simpleBinOp(gz, scope, rl, node, .xor), - .bang_equal => return simpleBinOp(gz, scope, rl, node, .cmp_neq), - .equal_equal => return simpleBinOp(gz, scope, rl, node, .cmp_eq), - .greater_than => return simpleBinOp(gz, scope, rl, node, .cmp_gt), - .greater_or_equal => return simpleBinOp(gz, scope, rl, node, .cmp_gte), - .less_than => return simpleBinOp(gz, scope, rl, node, .cmp_lt), - .less_or_equal => return simpleBinOp(gz, scope, rl, node, .cmp_lte), - .array_cat => return simpleBinOp(gz, scope, rl, node, .array_cat), + .shl => return shiftOp(gz, scope, ri, node, node_datas[node].lhs, node_datas[node].rhs, .shl), + .shr => return shiftOp(gz, scope, ri, node, node_datas[node].lhs, node_datas[node].rhs, .shr), + + .add => return simpleBinOp(gz, scope, ri, node, .add), + .add_wrap => return simpleBinOp(gz, scope, ri, node, .addwrap), + .add_sat => return simpleBinOp(gz, scope, ri, node, .add_sat), + .sub => return simpleBinOp(gz, scope, ri, node, .sub), + .sub_wrap => return simpleBinOp(gz, scope, ri, node, .subwrap), + .sub_sat => return simpleBinOp(gz, scope, ri, node, .sub_sat), + .mul => return simpleBinOp(gz, scope, ri, node, .mul), + .mul_wrap => return simpleBinOp(gz, scope, ri, node, .mulwrap), + .mul_sat => return simpleBinOp(gz, scope, ri, node, .mul_sat), + .div => return simpleBinOp(gz, scope, ri, node, .div), + .mod => return simpleBinOp(gz, scope, ri, node, .mod_rem), + .shl_sat => return simpleBinOp(gz, scope, ri, node, .shl_sat), + + .bit_and => return simpleBinOp(gz, scope, ri, node, .bit_and), + .bit_or => return simpleBinOp(gz, scope, ri, node, .bit_or), + .bit_xor => return simpleBinOp(gz, scope, ri, node, .xor), + .bang_equal => return simpleBinOp(gz, scope, ri, node, .cmp_neq), + .equal_equal => return simpleBinOp(gz, scope, ri, node, .cmp_eq), + .greater_than => return simpleBinOp(gz, scope, ri, node, .cmp_gt), + .greater_or_equal => return simpleBinOp(gz, scope, ri, node, .cmp_gte), + .less_than => return simpleBinOp(gz, scope, ri, node, .cmp_lt), + .less_or_equal => return simpleBinOp(gz, scope, ri, node, .cmp_lte), + .array_cat => return simpleBinOp(gz, scope, ri, node, .array_cat), .array_mult => { const result = try gz.addPlNode(.array_mul, node, Zir.Inst.Bin{ - .lhs = try expr(gz, scope, .none, node_datas[node].lhs), - .rhs = try comptimeExpr(gz, scope, .{ .coerced_ty = .usize_type }, node_datas[node].rhs), + .lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs), + .rhs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, - .error_union => return simpleBinOp(gz, scope, rl, node, .error_union_type), - .merge_error_sets => return simpleBinOp(gz, scope, rl, node, .merge_error_sets), + .error_union => return simpleBinOp(gz, scope, ri, node, .error_union_type), + .merge_error_sets => return simpleBinOp(gz, scope, ri, node, .merge_error_sets), - .bool_and => return boolBinOp(gz, scope, rl, node, .bool_br_and), - .bool_or => return boolBinOp(gz, scope, rl, node, .bool_br_or), + .bool_and => return boolBinOp(gz, scope, ri, node, .bool_br_and), + .bool_or => return boolBinOp(gz, scope, ri, node, .bool_br_or), - .bool_not => return simpleUnOp(gz, scope, rl, node, bool_rl, node_datas[node].lhs, .bool_not), - .bit_not => return simpleUnOp(gz, scope, rl, node, .none, node_datas[node].lhs, .bit_not), + .bool_not => return simpleUnOp(gz, scope, ri, node, bool_ri, node_datas[node].lhs, .bool_not), + .bit_not => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, node_datas[node].lhs, .bit_not), - .negation => return negation(gz, scope, rl, node), - .negation_wrap => return simpleUnOp(gz, scope, rl, node, .none, node_datas[node].lhs, .negate_wrap), + .negation => return negation(gz, scope, ri, node), + .negation_wrap => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, node_datas[node].lhs, .negate_wrap), - .identifier => return identifier(gz, scope, rl, node), + .identifier => return identifier(gz, scope, ri, node), - .asm_simple => return asmExpr(gz, scope, rl, node, tree.asmSimple(node)), - .@"asm" => return asmExpr(gz, scope, rl, node, tree.asmFull(node)), + .asm_simple => return asmExpr(gz, scope, ri, node, tree.asmSimple(node)), + .@"asm" => return asmExpr(gz, scope, ri, node, tree.asmFull(node)), - .string_literal => return stringLiteral(gz, rl, node), - .multiline_string_literal => return multilineStringLiteral(gz, rl, node), + .string_literal => return stringLiteral(gz, ri, node), + .multiline_string_literal => return multilineStringLiteral(gz, ri, node), - .number_literal => return numberLiteral(gz, rl, node, node, .positive), + .number_literal => return numberLiteral(gz, ri, node, node, .positive), // zig fmt: on .builtin_call_two, .builtin_call_two_comma => { if (node_datas[node].lhs == 0) { const params = [_]Ast.Node.Index{}; - return builtinCall(gz, scope, rl, node, ¶ms); + return builtinCall(gz, scope, ri, node, ¶ms); } else if (node_datas[node].rhs == 0) { const params = [_]Ast.Node.Index{node_datas[node].lhs}; - return builtinCall(gz, scope, rl, node, ¶ms); + return builtinCall(gz, scope, ri, node, ¶ms); } else { const params = [_]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs }; - return builtinCall(gz, scope, rl, node, ¶ms); + return builtinCall(gz, scope, ri, node, ¶ms); } }, .builtin_call, .builtin_call_comma => { const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; - return builtinCall(gz, scope, rl, node, params); + return builtinCall(gz, scope, ri, node, params); }, .call_one, .call_one_comma, .async_call_one, .async_call_one_comma => { var params: [1]Ast.Node.Index = undefined; - return callExpr(gz, scope, rl, node, tree.callOne(¶ms, node)); + return callExpr(gz, scope, ri, node, tree.callOne(¶ms, node)); }, .call, .call_comma, .async_call, .async_call_comma => { - return callExpr(gz, scope, rl, node, tree.callFull(node)); + return callExpr(gz, scope, ri, node, tree.callFull(node)); }, .unreachable_literal => { @@ -786,112 +812,112 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr return Zir.Inst.Ref.unreachable_value; }, .@"return" => return ret(gz, scope, node), - .field_access => return fieldAccess(gz, scope, rl, node), + .field_access => return fieldAccess(gz, scope, ri, node), - .if_simple => return ifExpr(gz, scope, rl.br(), node, tree.ifSimple(node)), - .@"if" => return ifExpr(gz, scope, rl.br(), node, tree.ifFull(node)), + .if_simple => return ifExpr(gz, scope, ri.br(), node, tree.ifSimple(node)), + .@"if" => return ifExpr(gz, scope, ri.br(), node, tree.ifFull(node)), - .while_simple => return whileExpr(gz, scope, rl.br(), node, tree.whileSimple(node), false), - .while_cont => return whileExpr(gz, scope, rl.br(), node, tree.whileCont(node), false), - .@"while" => return whileExpr(gz, scope, rl.br(), node, tree.whileFull(node), false), + .while_simple => return whileExpr(gz, scope, ri.br(), node, tree.whileSimple(node), false), + .while_cont => return whileExpr(gz, scope, ri.br(), node, tree.whileCont(node), false), + .@"while" => return whileExpr(gz, scope, ri.br(), node, tree.whileFull(node), false), - .for_simple => return forExpr(gz, scope, rl.br(), node, tree.forSimple(node), false), - .@"for" => return forExpr(gz, scope, rl.br(), node, tree.forFull(node), false), + .for_simple => return forExpr(gz, scope, ri.br(), node, tree.forSimple(node), false), + .@"for" => return forExpr(gz, scope, ri.br(), node, tree.forFull(node), false), .slice_open => { - const lhs = try expr(gz, scope, .ref, node_datas[node].lhs); - const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, node_datas[node].rhs); + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs); const result = try gz.addPlNode(.slice_start, node, Zir.Inst.SliceStart{ .lhs = lhs, .start = start, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .slice => { - const lhs = try expr(gz, scope, .ref, node_datas[node].lhs); + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); const extra = tree.extraData(node_datas[node].rhs, Ast.Node.Slice); - const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.start); - const end = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.end); + const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.start); + const end = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end); const result = try gz.addPlNode(.slice_end, node, Zir.Inst.SliceEnd{ .lhs = lhs, .start = start, .end = end, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .slice_sentinel => { - const lhs = try expr(gz, scope, .ref, node_datas[node].lhs); + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SliceSentinel); - const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.start); - const end = if (extra.end != 0) try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.end) else .none; - const sentinel = try expr(gz, scope, .none, extra.sentinel); + const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.start); + const end = if (extra.end != 0) try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end) else .none; + const sentinel = try expr(gz, scope, .{ .rl = .none }, extra.sentinel); const result = try gz.addPlNode(.slice_sentinel, node, Zir.Inst.SliceSentinel{ .lhs = lhs, .start = start, .end = end, .sentinel = sentinel, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .deref => { - const lhs = try expr(gz, scope, .none, node_datas[node].lhs); + const lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs); _ = try gz.addUnNode(.validate_deref, lhs, node); - switch (rl) { + switch (ri.rl) { .ref => return lhs, else => { const result = try gz.addUnNode(.load, lhs, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, } }, .address_of => { - const result = try expr(gz, scope, .ref, node_datas[node].lhs); - return rvalue(gz, rl, result, node); + const result = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + return rvalue(gz, ri, result, node); }, .optional_type => { const operand = try typeExpr(gz, scope, node_datas[node].lhs); const result = try gz.addUnNode(.optional_type, operand, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, - .unwrap_optional => switch (rl) { + .unwrap_optional => switch (ri.rl) { .ref => return gz.addUnNode( .optional_payload_safe_ptr, - try expr(gz, scope, .ref, node_datas[node].lhs), + try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs), node, ), - else => return rvalue(gz, rl, try gz.addUnNode( + else => return rvalue(gz, ri, try gz.addUnNode( .optional_payload_safe, - try expr(gz, scope, .none, node_datas[node].lhs), + try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs), node, ), node), }, .block_two, .block_two_semicolon => { const statements = [2]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs }; if (node_datas[node].lhs == 0) { - return blockExpr(gz, scope, rl, node, statements[0..0]); + return blockExpr(gz, scope, ri, node, statements[0..0]); } else if (node_datas[node].rhs == 0) { - return blockExpr(gz, scope, rl, node, statements[0..1]); + return blockExpr(gz, scope, ri, node, statements[0..1]); } else { - return blockExpr(gz, scope, rl, node, statements[0..2]); + return blockExpr(gz, scope, ri, node, statements[0..2]); } }, .block, .block_semicolon => { const statements = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; - return blockExpr(gz, scope, rl, node, statements); + return blockExpr(gz, scope, ri, node, statements); }, - .enum_literal => return simpleStrTok(gz, rl, main_tokens[node], node, .enum_literal), - .error_value => return simpleStrTok(gz, rl, node_datas[node].rhs, node, .error_value), + .enum_literal => return simpleStrTok(gz, ri, main_tokens[node], node, .enum_literal), + .error_value => return simpleStrTok(gz, ri, node_datas[node].rhs, node, .error_value), // TODO restore this when implementing https://github.com/ziglang/zig/issues/6025 - // .anyframe_literal => return rvalue(gz, rl, .anyframe_type, node), + // .anyframe_literal => return rvalue(gz, ri, .anyframe_type, node), .anyframe_literal => { const result = try gz.addUnNode(.anyframe_type, .void_type, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .anyframe_type => { const return_type = try typeExpr(gz, scope, node_datas[node].rhs); const result = try gz.addUnNode(.anyframe_type, return_type, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .@"catch" => { const catch_token = main_tokens[node]; @@ -899,11 +925,11 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr catch_token + 2 else null; - switch (rl) { + switch (ri.rl) { .ref => return orelseCatchExpr( gz, scope, - rl, + ri, node, node_datas[node].lhs, .is_non_err_ptr, @@ -915,7 +941,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr else => return orelseCatchExpr( gz, scope, - rl, + ri, node, node_datas[node].lhs, .is_non_err, @@ -926,11 +952,11 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr ), } }, - .@"orelse" => switch (rl) { + .@"orelse" => switch (ri.rl) { .ref => return orelseCatchExpr( gz, scope, - rl, + ri, node, node_datas[node].lhs, .is_non_null_ptr, @@ -942,7 +968,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr else => return orelseCatchExpr( gz, scope, - rl, + ri, node, node_datas[node].lhs, .is_non_null, @@ -953,94 +979,94 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr ), }, - .ptr_type_aligned => return ptrType(gz, scope, rl, node, tree.ptrTypeAligned(node)), - .ptr_type_sentinel => return ptrType(gz, scope, rl, node, tree.ptrTypeSentinel(node)), - .ptr_type => return ptrType(gz, scope, rl, node, tree.ptrType(node)), - .ptr_type_bit_range => return ptrType(gz, scope, rl, node, tree.ptrTypeBitRange(node)), + .ptr_type_aligned => return ptrType(gz, scope, ri, node, tree.ptrTypeAligned(node)), + .ptr_type_sentinel => return ptrType(gz, scope, ri, node, tree.ptrTypeSentinel(node)), + .ptr_type => return ptrType(gz, scope, ri, node, tree.ptrType(node)), + .ptr_type_bit_range => return ptrType(gz, scope, ri, node, tree.ptrTypeBitRange(node)), .container_decl, .container_decl_trailing, - => return containerDecl(gz, scope, rl, node, tree.containerDecl(node)), + => return containerDecl(gz, scope, ri, node, tree.containerDecl(node)), .container_decl_two, .container_decl_two_trailing => { var buffer: [2]Ast.Node.Index = undefined; - return containerDecl(gz, scope, rl, node, tree.containerDeclTwo(&buffer, node)); + return containerDecl(gz, scope, ri, node, tree.containerDeclTwo(&buffer, node)); }, .container_decl_arg, .container_decl_arg_trailing, - => return containerDecl(gz, scope, rl, node, tree.containerDeclArg(node)), + => return containerDecl(gz, scope, ri, node, tree.containerDeclArg(node)), .tagged_union, .tagged_union_trailing, - => return containerDecl(gz, scope, rl, node, tree.taggedUnion(node)), + => return containerDecl(gz, scope, ri, node, tree.taggedUnion(node)), .tagged_union_two, .tagged_union_two_trailing => { var buffer: [2]Ast.Node.Index = undefined; - return containerDecl(gz, scope, rl, node, tree.taggedUnionTwo(&buffer, node)); + return containerDecl(gz, scope, ri, node, tree.taggedUnionTwo(&buffer, node)); }, .tagged_union_enum_tag, .tagged_union_enum_tag_trailing, - => return containerDecl(gz, scope, rl, node, tree.taggedUnionEnumTag(node)), + => return containerDecl(gz, scope, ri, node, tree.taggedUnionEnumTag(node)), .@"break" => return breakExpr(gz, scope, node), .@"continue" => return continueExpr(gz, scope, node), - .grouped_expression => return expr(gz, scope, rl, node_datas[node].lhs), - .array_type => return arrayType(gz, scope, rl, node), - .array_type_sentinel => return arrayTypeSentinel(gz, scope, rl, node), - .char_literal => return charLiteral(gz, rl, node), - .error_set_decl => return errorSetDecl(gz, rl, node), - .array_access => return arrayAccess(gz, scope, rl, node), - .@"comptime" => return comptimeExprAst(gz, scope, rl, node), - .@"switch", .switch_comma => return switchExpr(gz, scope, rl.br(), node), - - .@"nosuspend" => return nosuspendExpr(gz, scope, rl, node), + .grouped_expression => return expr(gz, scope, ri, node_datas[node].lhs), + .array_type => return arrayType(gz, scope, ri, node), + .array_type_sentinel => return arrayTypeSentinel(gz, scope, ri, node), + .char_literal => return charLiteral(gz, ri, node), + .error_set_decl => return errorSetDecl(gz, ri, node), + .array_access => return arrayAccess(gz, scope, ri, node), + .@"comptime" => return comptimeExprAst(gz, scope, ri, node), + .@"switch", .switch_comma => return switchExpr(gz, scope, ri.br(), node), + + .@"nosuspend" => return nosuspendExpr(gz, scope, ri, node), .@"suspend" => return suspendExpr(gz, scope, node), - .@"await" => return awaitExpr(gz, scope, rl, node), - .@"resume" => return resumeExpr(gz, scope, rl, node), + .@"await" => return awaitExpr(gz, scope, ri, node), + .@"resume" => return resumeExpr(gz, scope, ri, node), - .@"try" => return tryExpr(gz, scope, rl, node, node_datas[node].lhs), + .@"try" => return tryExpr(gz, scope, ri, node, node_datas[node].lhs), .array_init_one, .array_init_one_comma => { var elements: [1]Ast.Node.Index = undefined; - return arrayInitExpr(gz, scope, rl, node, tree.arrayInitOne(&elements, node)); + return arrayInitExpr(gz, scope, ri, node, tree.arrayInitOne(&elements, node)); }, .array_init_dot_two, .array_init_dot_two_comma => { var elements: [2]Ast.Node.Index = undefined; - return arrayInitExpr(gz, scope, rl, node, tree.arrayInitDotTwo(&elements, node)); + return arrayInitExpr(gz, scope, ri, node, tree.arrayInitDotTwo(&elements, node)); }, .array_init_dot, .array_init_dot_comma, - => return arrayInitExpr(gz, scope, rl, node, tree.arrayInitDot(node)), + => return arrayInitExpr(gz, scope, ri, node, tree.arrayInitDot(node)), .array_init, .array_init_comma, - => return arrayInitExpr(gz, scope, rl, node, tree.arrayInit(node)), + => return arrayInitExpr(gz, scope, ri, node, tree.arrayInit(node)), .struct_init_one, .struct_init_one_comma => { var fields: [1]Ast.Node.Index = undefined; - return structInitExpr(gz, scope, rl, node, tree.structInitOne(&fields, node)); + return structInitExpr(gz, scope, ri, node, tree.structInitOne(&fields, node)); }, .struct_init_dot_two, .struct_init_dot_two_comma => { var fields: [2]Ast.Node.Index = undefined; - return structInitExpr(gz, scope, rl, node, tree.structInitDotTwo(&fields, node)); + return structInitExpr(gz, scope, ri, node, tree.structInitDotTwo(&fields, node)); }, .struct_init_dot, .struct_init_dot_comma, - => return structInitExpr(gz, scope, rl, node, tree.structInitDot(node)), + => return structInitExpr(gz, scope, ri, node, tree.structInitDot(node)), .struct_init, .struct_init_comma, - => return structInitExpr(gz, scope, rl, node, tree.structInit(node)), + => return structInitExpr(gz, scope, ri, node, tree.structInit(node)), .fn_proto_simple => { var params: [1]Ast.Node.Index = undefined; - return fnProtoExpr(gz, scope, rl, node, tree.fnProtoSimple(¶ms, node)); + return fnProtoExpr(gz, scope, ri, node, tree.fnProtoSimple(¶ms, node)); }, .fn_proto_multi => { - return fnProtoExpr(gz, scope, rl, node, tree.fnProtoMulti(node)); + return fnProtoExpr(gz, scope, ri, node, tree.fnProtoMulti(node)); }, .fn_proto_one => { var params: [1]Ast.Node.Index = undefined; - return fnProtoExpr(gz, scope, rl, node, tree.fnProtoOne(¶ms, node)); + return fnProtoExpr(gz, scope, ri, node, tree.fnProtoOne(¶ms, node)); }, .fn_proto => { - return fnProtoExpr(gz, scope, rl, node, tree.fnProto(node)); + return fnProtoExpr(gz, scope, ri, node, tree.fnProto(node)); }, } } @@ -1048,7 +1074,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr fn nosuspendExpr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; @@ -1063,7 +1089,7 @@ fn nosuspendExpr( } gz.nosuspend_node = node; defer gz.nosuspend_node = 0; - return expr(gz, scope, rl, body_node); + return expr(gz, scope, ri, body_node); } fn suspendExpr( @@ -1096,7 +1122,7 @@ fn suspendExpr( suspend_scope.suspend_node = node; defer suspend_scope.unstack(); - const body_result = try expr(&suspend_scope, &suspend_scope.base, .none, body_node); + const body_result = try expr(&suspend_scope, &suspend_scope.base, .{ .rl = .none }, body_node); if (!gz.refIsNoReturn(body_result)) { _ = try suspend_scope.addBreak(.break_inline, suspend_inst, .void_value); } @@ -1108,7 +1134,7 @@ fn suspendExpr( fn awaitExpr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; @@ -1121,7 +1147,7 @@ fn awaitExpr( try astgen.errNoteNode(gz.suspend_node, "suspend block here", .{}), }); } - const operand = try expr(gz, scope, .none, rhs_node); + const operand = try expr(gz, scope, .{ .rl = .none }, rhs_node); const result = if (gz.nosuspend_node != 0) try gz.addExtendedPayload(.await_nosuspend, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), @@ -1130,28 +1156,28 @@ fn awaitExpr( else try gz.addUnNode(.@"await", operand, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn resumeExpr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const rhs_node = node_datas[node].lhs; - const operand = try expr(gz, scope, .none, rhs_node); + const operand = try expr(gz, scope, .{ .rl = .none }, rhs_node); const result = try gz.addUnNode(.@"resume", operand, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn fnProtoExpr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, fn_proto: Ast.full.FnProto, ) InnerError!Zir.Inst.Ref { @@ -1217,7 +1243,7 @@ fn fnProtoExpr( assert(param_type_node != 0); var param_gz = block_scope.makeSubBlock(scope); defer param_gz.unstack(); - const param_type = try expr(¶m_gz, scope, coerced_type_rl, param_type_node); + const param_type = try expr(¶m_gz, scope, coerced_type_ri, param_type_node); const param_inst_expected = @intCast(u32, astgen.instructions.len + 1); _ = try param_gz.addBreak(.break_inline, param_inst_expected, param_type); const main_tokens = tree.nodes.items(.main_token); @@ -1231,7 +1257,7 @@ fn fnProtoExpr( }; const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { - break :inst try expr(&block_scope, scope, align_rl, fn_proto.ast.align_expr); + break :inst try expr(&block_scope, scope, align_ri, fn_proto.ast.align_expr); }; if (fn_proto.ast.addrspace_expr != 0) { @@ -1246,7 +1272,7 @@ fn fnProtoExpr( try expr( &block_scope, scope, - .{ .ty = .calling_convention_type }, + .{ .rl = .{ .ty = .calling_convention_type } }, fn_proto.ast.callconv_expr, ) else @@ -1257,7 +1283,7 @@ fn fnProtoExpr( if (is_inferred_error) { return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{}); } - const ret_ty = try expr(&block_scope, scope, coerced_type_rl, fn_proto.ast.return_type); + const ret_ty = try expr(&block_scope, scope, coerced_type_ri, fn_proto.ast.return_type); const result = try block_scope.addFunc(.{ .src_node = fn_proto.ast.proto_node, @@ -1288,13 +1314,13 @@ fn fnProtoExpr( try block_scope.setBlockBody(block_inst); try gz.instructions.append(astgen.gpa, block_inst); - return rvalue(gz, rl, indexToRef(block_inst), fn_proto.ast.proto_node); + return rvalue(gz, ri, indexToRef(block_inst), fn_proto.ast.proto_node); } fn arrayInitExpr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, array_init: Ast.full.ArrayInit, ) InnerError!Zir.Inst.Ref { @@ -1336,7 +1362,7 @@ fn arrayInitExpr( .elem = elem_type, }; } else { - const sentinel = try comptimeExpr(gz, scope, .{ .ty = elem_type }, array_type.ast.sentinel); + const sentinel = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, array_type.ast.sentinel); const array_type_inst = try gz.addPlNode( .array_type_sentinel, array_init.ast.type_expr, @@ -1364,11 +1390,11 @@ fn arrayInitExpr( }; }; - switch (rl) { + switch (ri.rl) { .discard => { // TODO elements should still be coerced if type is provided for (array_init.ast.elements) |elem_init| { - _ = try expr(gz, scope, .discard, elem_init); + _ = try expr(gz, scope, .{ .rl = .discard }, elem_init); } return Zir.Inst.Ref.void_value; }, @@ -1380,13 +1406,13 @@ fn arrayInitExpr( const tag: Zir.Inst.Tag = if (types.array != .none) .array_init else .array_init_anon; return arrayInitExprInner(gz, scope, node, array_init.ast.elements, types.array, types.elem, tag); }, - .ty, .ty_shift_operand, .coerced_ty => { + .ty, .coerced_ty => { const tag: Zir.Inst.Tag = if (types.array != .none) .array_init else .array_init_anon; const result = try arrayInitExprInner(gz, scope, node, array_init.ast.elements, types.array, types.elem, tag); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .ptr => |ptr_res| { - return arrayInitExprRlPtr(gz, scope, rl, node, ptr_res.inst, array_init.ast.elements, types.array); + return arrayInitExprRlPtr(gz, scope, ri, node, ptr_res.inst, array_init.ast.elements, types.array); }, .inferred_ptr => |ptr_inst| { if (types.array == .none) { @@ -1394,9 +1420,9 @@ fn arrayInitExpr( // analyzing array_base_ptr against an alloc_inferred_mut. // See corresponding logic in structInitExpr. const result = try arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } else { - return arrayInitExprRlPtr(gz, scope, rl, node, ptr_inst, array_init.ast.elements, types.array); + return arrayInitExprRlPtr(gz, scope, ri, node, ptr_inst, array_init.ast.elements, types.array); } }, .block_ptr => |block_gz| { @@ -1404,9 +1430,9 @@ fn arrayInitExpr( // See corresponding logic in structInitExpr. if (types.array == .none and astgen.isInferred(block_gz.rl_ptr)) { const result = try arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } - return arrayInitExprRlPtr(gz, scope, rl, node, block_gz.rl_ptr, array_init.ast.elements, types.array); + return arrayInitExprRlPtr(gz, scope, ri, node, block_gz.rl_ptr, array_init.ast.elements, types.array); }, } } @@ -1426,7 +1452,7 @@ fn arrayInitExprRlNone( var extra_index = try reserveExtra(astgen, elements.len); for (elements) |elem_init| { - const elem_ref = try expr(gz, scope, .none, elem_init); + const elem_ref = try expr(gz, scope, .{ .rl = .none }, elem_init); astgen.extra.items[extra_index] = @enumToInt(elem_ref); extra_index += 1; } @@ -1455,9 +1481,9 @@ fn arrayInitExprInner( } for (elements) |elem_init, i| { - const rl = if (elem_ty != .none) - ResultLoc{ .coerced_ty = elem_ty } - else if (array_ty_inst != .none and nodeMayNeedMemoryLocation(astgen.tree, elem_init, true)) rl: { + const ri = if (elem_ty != .none) + ResultInfo{ .rl = .{ .coerced_ty = elem_ty } } + else if (array_ty_inst != .none and nodeMayNeedMemoryLocation(astgen.tree, elem_init, true)) ri: { const ty_expr = try gz.add(.{ .tag = .elem_type_index, .data = .{ .bin = .{ @@ -1465,10 +1491,10 @@ fn arrayInitExprInner( .rhs = @intToEnum(Zir.Inst.Ref, i), } }, }); - break :rl ResultLoc{ .coerced_ty = ty_expr }; - } else ResultLoc{ .none = {} }; + break :ri ResultInfo{ .rl = .{ .coerced_ty = ty_expr } }; + } else ResultInfo{ .rl = .{ .none = {} } }; - const elem_ref = try expr(gz, scope, rl, elem_init); + const elem_ref = try expr(gz, scope, ri, elem_init); astgen.extra.items[extra_index] = @enumToInt(elem_ref); extra_index += 1; } @@ -1479,7 +1505,7 @@ fn arrayInitExprInner( fn arrayInitExprRlPtr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, result_ptr: Zir.Inst.Ref, elements: []const Ast.Node.Index, @@ -1494,7 +1520,7 @@ fn arrayInitExprRlPtr( defer as_scope.unstack(); const result = try arrayInitExprRlPtrInner(&as_scope, scope, node, as_scope.rl_ptr, elements); - return as_scope.finishCoercion(gz, rl, node, result, array_ty); + return as_scope.finishCoercion(gz, ri, node, result, array_ty); } fn arrayInitExprRlPtrInner( @@ -1518,7 +1544,7 @@ fn arrayInitExprRlPtrInner( }); astgen.extra.items[extra_index] = refToIndex(elem_ptr).?; extra_index += 1; - _ = try expr(gz, scope, .{ .ptr = .{ .inst = elem_ptr } }, elem_init); + _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ .inst = elem_ptr } } }, elem_init); } const tag: Zir.Inst.Tag = if (gz.force_comptime) @@ -1533,7 +1559,7 @@ fn arrayInitExprRlPtrInner( fn structInitExpr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, struct_init: Ast.full.StructInit, ) InnerError!Zir.Inst.Ref { @@ -1542,7 +1568,7 @@ fn structInitExpr( if (struct_init.ast.type_expr == 0) { if (struct_init.ast.fields.len == 0) { - return rvalue(gz, rl, .empty_struct, node); + return rvalue(gz, ri, .empty_struct, node); } } else array: { const node_tags = tree.nodes.items(.tag); @@ -1554,7 +1580,7 @@ fn structInitExpr( if (struct_init.ast.fields.len == 0) { const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); const result = try gz.addUnNode(.struct_init_empty, ty_inst, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } break :array; }, @@ -1571,7 +1597,7 @@ fn structInitExpr( .rhs = elem_type, }); } else blk: { - const sentinel = try comptimeExpr(gz, scope, .{ .ty = elem_type }, array_type.ast.sentinel); + const sentinel = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, array_type.ast.sentinel); break :blk try gz.addPlNode( .array_type_sentinel, struct_init.ast.type_expr, @@ -1583,11 +1609,11 @@ fn structInitExpr( ); }; const result = try gz.addUnNode(.struct_init_empty, array_type_inst, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); const result = try gz.addUnNode(.struct_init_empty, ty_inst, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } else { return astgen.failNode( struct_init.ast.type_expr, @@ -1597,7 +1623,7 @@ fn structInitExpr( } } - switch (rl) { + switch (ri.rl) { .discard => { if (struct_init.ast.type_expr != 0) { const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); @@ -1626,26 +1652,26 @@ fn structInitExpr( return structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon); } }, - .ty, .ty_shift_operand, .coerced_ty => |ty_inst| { + .ty, .coerced_ty => |ty_inst| { if (struct_init.ast.type_expr == 0) { const result = try structInitExprRlNone(gz, scope, node, struct_init, ty_inst, .struct_init_anon); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } const inner_ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); _ = try gz.addUnNode(.validate_struct_init_ty, inner_ty_inst, node); const result = try structInitExprRlTy(gz, scope, node, struct_init, inner_ty_inst, .struct_init); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, - .ptr => |ptr_res| return structInitExprRlPtr(gz, scope, rl, node, struct_init, ptr_res.inst), + .ptr => |ptr_res| return structInitExprRlPtr(gz, scope, ri, node, struct_init, ptr_res.inst), .inferred_ptr => |ptr_inst| { if (struct_init.ast.type_expr == 0) { // We treat this case differently so that we don't get a crash when // analyzing field_base_ptr against an alloc_inferred_mut. // See corresponding logic in arrayInitExpr. const result = try structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } else { - return structInitExprRlPtr(gz, scope, rl, node, struct_init, ptr_inst); + return structInitExprRlPtr(gz, scope, ri, node, struct_init, ptr_inst); } }, .block_ptr => |block_gz| { @@ -1653,10 +1679,10 @@ fn structInitExpr( // See corresponding logic in arrayInitExpr. if (struct_init.ast.type_expr == 0 and astgen.isInferred(block_gz.rl_ptr)) { const result = try structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } - return structInitExprRlPtr(gz, scope, rl, node, struct_init, block_gz.rl_ptr); + return structInitExprRlPtr(gz, scope, ri, node, struct_init, block_gz.rl_ptr); }, } } @@ -1681,16 +1707,15 @@ fn structInitExprRlNone( for (struct_init.ast.fields) |field_init| { const name_token = tree.firstToken(field_init) - 2; const str_index = try astgen.identAsString(name_token); - const sub_rl: ResultLoc = if (ty_inst != .none) - ResultLoc{ .ty = try gz.addPlNode(.field_type, field_init, Zir.Inst.FieldType{ + const sub_ri: ResultInfo = if (ty_inst != .none) + ResultInfo{ .rl = .{ .ty = try gz.addPlNode(.field_type, field_init, Zir.Inst.FieldType{ .container_type = ty_inst, .name_start = str_index, - }) } - else - .none; + }) } } + else .{ .rl = .none }; setExtra(astgen, extra_index, Zir.Inst.StructInitAnon.Item{ .field_name = str_index, - .init = try expr(gz, scope, sub_rl, field_init), + .init = try expr(gz, scope, sub_ri, field_init), }); extra_index += field_size; } @@ -1701,7 +1726,7 @@ fn structInitExprRlNone( fn structInitExprRlPtr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, struct_init: Ast.full.StructInit, result_ptr: Zir.Inst.Ref, @@ -1717,7 +1742,7 @@ fn structInitExprRlPtr( defer as_scope.unstack(); const result = try structInitExprRlPtrInner(&as_scope, scope, node, struct_init, as_scope.rl_ptr); - return as_scope.finishCoercion(gz, rl, node, result, ty_inst); + return as_scope.finishCoercion(gz, ri, node, result, ty_inst); } fn structInitExprRlPtrInner( @@ -1744,7 +1769,7 @@ fn structInitExprRlPtrInner( }); astgen.extra.items[extra_index] = refToIndex(field_ptr).?; extra_index += 1; - _ = try expr(gz, scope, .{ .ptr = .{ .inst = field_ptr } }, field_init); + _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ .inst = field_ptr } } }, field_init); } const tag: Zir.Inst.Tag = if (gz.force_comptime) @@ -1782,7 +1807,7 @@ fn structInitExprRlTy( }); setExtra(astgen, extra_index, Zir.Inst.StructInit.Item{ .field_type = refToIndex(field_ty_inst).?, - .init = try expr(gz, scope, .{ .ty = field_ty_inst }, field_init), + .init = try expr(gz, scope, .{ .rl = .{ .ty = field_ty_inst } }, field_init), }); extra_index += field_size; } @@ -1795,14 +1820,14 @@ fn structInitExprRlTy( fn comptimeExpr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const prev_force_comptime = gz.force_comptime; gz.force_comptime = true; defer gz.force_comptime = prev_force_comptime; - return expr(gz, scope, rl, node); + return expr(gz, scope, ri, node); } /// This one is for an actual `comptime` syntax, and will emit a compile error if @@ -1811,7 +1836,7 @@ fn comptimeExpr( fn comptimeExprAst( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; @@ -1822,11 +1847,50 @@ fn comptimeExprAst( const node_datas = tree.nodes.items(.data); const body_node = node_datas[node].lhs; gz.force_comptime = true; - const result = try expr(gz, scope, rl, body_node); + const result = try expr(gz, scope, ri, body_node); gz.force_comptime = false; return result; } +/// Restore the error return trace index. Performs the restore only if the result is a non-error or +/// if the result location is a non-error-handling expression. +fn restoreErrRetIndex( + gz: *GenZir, + bt: GenZir.BranchTarget, + ri: ResultInfo, + node: Ast.Node.Index, + result: Zir.Inst.Ref, +) !void { + const op = switch (nodeMayEvalToError(gz.astgen.tree, node)) { + .always => return, // never restore/pop + .never => .none, // always restore/pop + .maybe => switch (ri.ctx) { + .error_handling_expr, .@"return", .fn_arg, .const_init => switch (ri.rl) { + .ptr => |ptr_res| try gz.addUnNode(.load, ptr_res.inst, node), + .inferred_ptr => |ptr| try gz.addUnNode(.load, ptr, node), + .block_ptr => |block_scope| if (block_scope.rvalue_rl_count != block_scope.break_count) b: { + // The result location may have been used by this expression, in which case + // the operand is not the result and we need to load the rl ptr. + switch (gz.astgen.instructions.items(.tag)[Zir.refToIndex(block_scope.rl_ptr).?]) { + .alloc_inferred, .alloc_inferred_mut => { + // This is a terrible workaround for Sema's inability to load from a .alloc_inferred ptr + // before its type has been resolved. The operand we use here instead is not guaranteed + // to be valid, and when it's not, we will pop error traces prematurely. + // + // TODO: Update this to do a proper load from the rl_ptr, once Sema can support it. + break :b result; + }, + else => break :b try gz.addUnNode(.load, block_scope.rl_ptr, node), + } + } else result, + else => result, + }, + else => .none, // always restore/pop + }, + }; + _ = try gz.addRestoreErrRetIndex(bt, .{ .if_non_error = op }); +} + fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; const tree = astgen.tree; @@ -1842,6 +1906,7 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn const block_gz = scope.cast(GenZir).?; if (block_gz.cur_defer_node != 0) { + // We are breaking out of a `defer` block. return astgen.failNodeNotes(node, "cannot break out of defer expression", .{}, &.{ try astgen.errNoteNode( block_gz.cur_defer_node, @@ -1862,9 +1927,11 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn } else if (block_gz.break_block != 0) { break :blk block_gz.break_block; } + // If not the target, start over with the parent scope = block_gz.parent; continue; }; + // If we made it here, this block is the target of the break expr const break_tag: Zir.Inst.Tag = if (block_gz.is_inline or block_gz.force_comptime) .break_inline @@ -1874,17 +1941,25 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn if (rhs == 0) { try genDefers(parent_gz, scope, parent_scope, .normal_only); + // As our last action before the break, "pop" the error trace if needed + if (!block_gz.force_comptime) + _ = try parent_gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always); + _ = try parent_gz.addBreak(break_tag, block_inst, .void_value); return Zir.Inst.Ref.unreachable_value; } block_gz.break_count += 1; - const operand = try reachableExpr(parent_gz, parent_scope, block_gz.break_result_loc, rhs, node); + const operand = try reachableExpr(parent_gz, parent_scope, block_gz.break_result_info, rhs, node); const search_index = @intCast(Zir.Inst.Index, astgen.instructions.len); try genDefers(parent_gz, scope, parent_scope, .normal_only); - switch (block_gz.break_result_loc) { + // As our last action before the break, "pop" the error trace if needed + if (!block_gz.force_comptime) + try restoreErrRetIndex(parent_gz, .{ .block = block_inst }, block_gz.break_result_info, rhs, operand); + + switch (block_gz.break_result_info.rl) { .block_ptr => { const br = try parent_gz.addBreak(break_tag, block_inst, operand); try block_gz.labeled_breaks.append(astgen.gpa, .{ .br = br, .search = search_index }); @@ -1990,7 +2065,7 @@ fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) fn blockExpr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, block_node: Ast.Node.Index, statements: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { @@ -2006,12 +2081,38 @@ fn blockExpr( if (token_tags[lbrace - 1] == .colon and token_tags[lbrace - 2] == .identifier) { - return labeledBlockExpr(gz, scope, rl, block_node, statements); + return labeledBlockExpr(gz, scope, ri, block_node, statements); + } + + if (!gz.force_comptime) { + // Since this block is unlabeled, its control flow is effectively linear and we + // can *almost* get away with inlining the block here. However, we actually need + // to preserve the .block for Sema, to properly pop the error return trace. + + const block_tag: Zir.Inst.Tag = .block; + const block_inst = try gz.makeBlockInst(block_tag, block_node); + try gz.instructions.append(astgen.gpa, block_inst); + + var block_scope = gz.makeSubBlock(scope); + defer block_scope.unstack(); + + try blockExprStmts(&block_scope, &block_scope.base, statements); + + if (!block_scope.endsWithNoReturn()) { + // As our last action before the break, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always); + + const break_tag: Zir.Inst.Tag = if (block_scope.force_comptime) .break_inline else .@"break"; + _ = try block_scope.addBreak(break_tag, block_inst, .void_value); + } + + try block_scope.setBlockBody(block_inst); + } else { + var sub_gz = gz.makeSubBlock(scope); + try blockExprStmts(&sub_gz, &sub_gz.base, statements); } - var sub_gz = gz.makeSubBlock(scope); - try blockExprStmts(&sub_gz, &sub_gz.base, statements); - return rvalue(gz, rl, .void_value, block_node); + return rvalue(gz, ri, .void_value, block_node); } fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: Ast.TokenIndex) !void { @@ -2049,7 +2150,7 @@ fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: Ast.Toke fn labeledBlockExpr( gz: *GenZir, parent_scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, block_node: Ast.Node.Index, statements: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { @@ -2078,12 +2179,15 @@ fn labeledBlockExpr( .token = label_token, .block_inst = block_inst, }; - block_scope.setBreakResultLoc(rl); + block_scope.setBreakResultInfo(ri); defer block_scope.unstack(); defer block_scope.labeled_breaks.deinit(astgen.gpa); try blockExprStmts(&block_scope, &block_scope.base, statements); if (!block_scope.endsWithNoReturn()) { + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always); + const break_tag: Zir.Inst.Tag = if (block_scope.force_comptime) .break_inline else .@"break"; _ = try block_scope.addBreak(break_tag, block_inst, .void_value); } @@ -2094,7 +2198,7 @@ fn labeledBlockExpr( const zir_datas = gz.astgen.instructions.items(.data); const zir_tags = gz.astgen.instructions.items(.tag); - const strat = rl.strategy(&block_scope); + const strat = ri.rl.strategy(&block_scope); switch (strat.tag) { .break_void => { // The code took advantage of the result location as a pointer. @@ -2107,7 +2211,8 @@ fn labeledBlockExpr( return indexToRef(block_inst); }, .break_operand => { - // All break operands are values that did not use the result location pointer. + // All break operands are values that did not use the result location pointer + // (except for a single .store_to_block_ptr inst which we re-write here). // The break instructions need to have their operands coerced if the // block's result location is a `ty`. In this case we overwrite the // `store_to_block_ptr` instruction with an `as` instruction and repurpose @@ -2135,9 +2240,9 @@ fn labeledBlockExpr( } try block_scope.setBlockBody(block_inst); const block_ref = indexToRef(block_inst); - switch (rl) { + switch (ri.rl) { .ref => return block_ref, - else => return rvalue(gz, rl, block_ref, block_node), + else => return rvalue(gz, ri, block_ref, block_node), } }, } @@ -2208,12 +2313,12 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod continue; }, - .while_simple => _ = try whileExpr(gz, scope, .discard, inner_node, tree.whileSimple(inner_node), true), - .while_cont => _ = try whileExpr(gz, scope, .discard, inner_node, tree.whileCont(inner_node), true), - .@"while" => _ = try whileExpr(gz, scope, .discard, inner_node, tree.whileFull(inner_node), true), + .while_simple => _ = try whileExpr(gz, scope, .{ .rl = .discard }, inner_node, tree.whileSimple(inner_node), true), + .while_cont => _ = try whileExpr(gz, scope, .{ .rl = .discard }, inner_node, tree.whileCont(inner_node), true), + .@"while" => _ = try whileExpr(gz, scope, .{ .rl = .discard }, inner_node, tree.whileFull(inner_node), true), - .for_simple => _ = try forExpr(gz, scope, .discard, inner_node, tree.forSimple(inner_node), true), - .@"for" => _ = try forExpr(gz, scope, .discard, inner_node, tree.forFull(inner_node), true), + .for_simple => _ = try forExpr(gz, scope, .{ .rl = .discard }, inner_node, tree.forSimple(inner_node), true), + .@"for" => _ = try forExpr(gz, scope, .{ .rl = .discard }, inner_node, tree.forFull(inner_node), true), else => noreturn_src_node = try unusedResultExpr(gz, scope, inner_node), // zig fmt: on @@ -2234,7 +2339,7 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner try emitDbgNode(gz, statement); // We need to emit an error if the result is not `noreturn` or `void`, but // we want to avoid adding the ZIR instruction if possible for performance. - const maybe_unused_result = try expr(gz, scope, .none, statement); + const maybe_unused_result = try expr(gz, scope, .{ .rl = .none }, statement); return addEnsureResult(gz, maybe_unused_result, statement); } @@ -2533,6 +2638,8 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .validate_array_init_ty, .validate_struct_init_ty, .validate_deref, + .save_err_ret_index, + .restore_err_ret_index, => break :b true, .@"defer" => unreachable, @@ -2799,7 +2906,7 @@ fn varDecl( } const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node != 0) - try expr(gz, scope, align_rl, var_decl.ast.align_node) + try expr(gz, scope, align_ri, var_decl.ast.align_node) else .none; @@ -2816,16 +2923,22 @@ fn varDecl( if (align_inst == .none and !nodeMayNeedMemoryLocation(tree, var_decl.ast.init_node, type_node != 0)) { - const result_loc: ResultLoc = if (type_node != 0) .{ - .ty = try typeExpr(gz, scope, type_node), - } else .none; + const result_info: ResultInfo = if (type_node != 0) .{ + .rl = .{ .ty = try typeExpr(gz, scope, type_node) }, + .ctx = .const_init, + } else .{ .rl = .none, .ctx = .const_init }; const prev_anon_name_strategy = gz.anon_name_strategy; gz.anon_name_strategy = .dbg_var; - const init_inst = try reachableExpr(gz, scope, result_loc, var_decl.ast.init_node, node); + const init_inst = try reachableExpr(gz, scope, result_info, var_decl.ast.init_node, node); gz.anon_name_strategy = prev_anon_name_strategy; try gz.addDbgVar(.dbg_var_val, ident_name, init_inst); + // The const init expression may have modified the error return trace, so signal + // to Sema that it should save the new index for restoring later. + if (nodeMayAppendToErrorTrace(tree, var_decl.ast.init_node)) + _ = try gz.addSaveErrRetIndex(.{ .if_of_error_type = init_inst }); + const sub_scope = try block_arena.create(Scope.LocalVal); sub_scope.* = .{ .parent = scope, @@ -2891,8 +3004,13 @@ fn varDecl( init_scope.rl_ptr = alloc; init_scope.rl_ty_inst = .none; } - const init_result_loc: ResultLoc = .{ .block_ptr = &init_scope }; - const init_inst = try reachableExpr(&init_scope, &init_scope.base, init_result_loc, var_decl.ast.init_node, node); + const init_result_info: ResultInfo = .{ .rl = .{ .block_ptr = &init_scope }, .ctx = .const_init }; + const init_inst = try reachableExpr(&init_scope, &init_scope.base, init_result_info, var_decl.ast.init_node, node); + + // The const init expression may have modified the error return trace, so signal + // to Sema that it should save the new index for restoring later. + if (nodeMayAppendToErrorTrace(tree, var_decl.ast.init_node)) + _ = try init_scope.addSaveErrRetIndex(.{ .if_of_error_type = init_inst }); const zir_tags = astgen.instructions.items(.tag); const zir_datas = astgen.instructions.items(.data); @@ -2981,7 +3099,7 @@ fn varDecl( const is_comptime = var_decl.comptime_token != null or gz.force_comptime; var resolve_inferred_alloc: Zir.Inst.Ref = .none; const var_data: struct { - result_loc: ResultLoc, + result_info: ResultInfo, alloc: Zir.Inst.Ref, } = if (var_decl.ast.type_node != 0) a: { const type_inst = try typeExpr(gz, scope, var_decl.ast.type_node); @@ -3003,7 +3121,7 @@ fn varDecl( } }; gz.rl_ty_inst = type_inst; - break :a .{ .alloc = alloc, .result_loc = .{ .ptr = .{ .inst = alloc } } }; + break :a .{ .alloc = alloc, .result_info = .{ .rl = .{ .ptr = .{ .inst = alloc } } } }; } else a: { const alloc = alloc: { if (align_inst == .none) { @@ -3024,11 +3142,11 @@ fn varDecl( }; gz.rl_ty_inst = .none; resolve_inferred_alloc = alloc; - break :a .{ .alloc = alloc, .result_loc = .{ .inferred_ptr = alloc } }; + break :a .{ .alloc = alloc, .result_info = .{ .rl = .{ .inferred_ptr = alloc } } }; }; const prev_anon_name_strategy = gz.anon_name_strategy; gz.anon_name_strategy = .dbg_var; - _ = try reachableExprComptime(gz, scope, var_data.result_loc, var_decl.ast.init_node, node, is_comptime); + _ = try reachableExprComptime(gz, scope, var_data.result_info, var_decl.ast.init_node, node, is_comptime); gz.anon_name_strategy = prev_anon_name_strategy; if (resolve_inferred_alloc != .none) { _ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node); @@ -3098,15 +3216,15 @@ fn assign(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!voi // This intentionally does not support `@"_"` syntax. const ident_name = tree.tokenSlice(main_tokens[lhs]); if (mem.eql(u8, ident_name, "_")) { - _ = try expr(gz, scope, .discard, rhs); + _ = try expr(gz, scope, .{ .rl = .discard }, rhs); return; } } const lvalue = try lvalExpr(gz, scope, lhs); - _ = try expr(gz, scope, .{ .ptr = .{ + _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ .inst = lvalue, .src_node = infix_node, - } }, rhs); + } } }, rhs); } fn assignOp( @@ -3123,7 +3241,7 @@ fn assignOp( const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); const lhs_type = try gz.addUnNode(.typeof, lhs, infix_node); - const rhs = try expr(gz, scope, .{ .coerced_ty = lhs_type }, node_datas[infix_node].rhs); + const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = lhs_type } }, node_datas[infix_node].rhs); const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ .lhs = lhs, @@ -3146,7 +3264,7 @@ fn assignShift( const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); const rhs_type = try gz.addUnNode(.typeof_log2_int_type, lhs, infix_node); - const rhs = try expr(gz, scope, .{ .ty = rhs_type }, node_datas[infix_node].rhs); + const rhs = try expr(gz, scope, .{ .rl = .{ .ty = rhs_type } }, node_datas[infix_node].rhs); const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ .lhs = lhs, @@ -3164,7 +3282,7 @@ fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerE const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); // Saturating shift-left allows any integer type for both the LHS and RHS. - const rhs = try expr(gz, scope, .none, node_datas[infix_node].rhs); + const rhs = try expr(gz, scope, .{ .rl = .none }, node_datas[infix_node].rhs); const result = try gz.addPlNode(.shl_sat, infix_node, Zir.Inst.Bin{ .lhs = lhs, @@ -3176,7 +3294,7 @@ fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerE fn ptrType( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, ptr_info: Ast.full.PtrType, ) InnerError!Zir.Inst.Ref { @@ -3194,21 +3312,21 @@ fn ptrType( var trailing_count: u32 = 0; if (ptr_info.ast.sentinel != 0) { - sentinel_ref = try expr(gz, scope, .{ .ty = elem_type }, ptr_info.ast.sentinel); + sentinel_ref = try expr(gz, scope, .{ .rl = .{ .ty = elem_type } }, ptr_info.ast.sentinel); trailing_count += 1; } if (ptr_info.ast.align_node != 0) { - align_ref = try expr(gz, scope, coerced_align_rl, ptr_info.ast.align_node); + align_ref = try expr(gz, scope, coerced_align_ri, ptr_info.ast.align_node); trailing_count += 1; } if (ptr_info.ast.addrspace_node != 0) { - addrspace_ref = try expr(gz, scope, .{ .ty = .address_space_type }, ptr_info.ast.addrspace_node); + addrspace_ref = try expr(gz, scope, .{ .rl = .{ .ty = .address_space_type } }, ptr_info.ast.addrspace_node); trailing_count += 1; } if (ptr_info.ast.bit_range_start != 0) { assert(ptr_info.ast.bit_range_end != 0); - bit_start_ref = try expr(gz, scope, .{ .coerced_ty = .u16_type }, ptr_info.ast.bit_range_start); - bit_end_ref = try expr(gz, scope, .{ .coerced_ty = .u16_type }, ptr_info.ast.bit_range_end); + bit_start_ref = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u16_type } }, ptr_info.ast.bit_range_start); + bit_end_ref = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u16_type } }, ptr_info.ast.bit_range_end); trailing_count += 2; } @@ -3255,10 +3373,10 @@ fn ptrType( } }); gz.instructions.appendAssumeCapacity(new_index); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } -fn arrayType(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) !Zir.Inst.Ref { +fn arrayType(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) !Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); @@ -3271,17 +3389,17 @@ fn arrayType(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) !Z { return astgen.failNode(len_node, "unable to infer array size", .{}); } - const len = try expr(gz, scope, .{ .coerced_ty = .usize_type }, len_node); + const len = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, len_node); const elem_type = try typeExpr(gz, scope, node_datas[node].rhs); const result = try gz.addPlNode(.array_type, node, Zir.Inst.Bin{ .lhs = len, .rhs = elem_type, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } -fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) !Zir.Inst.Ref { +fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) !Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); @@ -3295,16 +3413,16 @@ fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.I { return astgen.failNode(len_node, "unable to infer array size", .{}); } - const len = try reachableExpr(gz, scope, .{ .coerced_ty = .usize_type }, len_node, node); + const len = try reachableExpr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, len_node, node); const elem_type = try typeExpr(gz, scope, extra.elem_type); - const sentinel = try reachableExpr(gz, scope, .{ .coerced_ty = elem_type }, extra.sentinel, node); + const sentinel = try reachableExpr(gz, scope, .{ .rl = .{ .coerced_ty = elem_type } }, extra.sentinel, node); const result = try gz.addPlNode(.array_type_sentinel, node, Zir.Inst.ArrayTypeSentinel{ .len = len, .elem_type = elem_type, .sentinel = sentinel, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } const WipMembers = struct { @@ -3540,7 +3658,7 @@ fn fnDecl( assert(param_type_node != 0); var param_gz = decl_gz.makeSubBlock(scope); defer param_gz.unstack(); - const param_type = try expr(¶m_gz, params_scope, coerced_type_rl, param_type_node); + const param_type = try expr(¶m_gz, params_scope, coerced_type_ri, param_type_node); const param_inst_expected = @intCast(u32, astgen.instructions.len + 1); _ = try param_gz.addBreak(.break_inline, param_inst_expected, param_type); @@ -3589,7 +3707,7 @@ fn fnDecl( var align_gz = decl_gz.makeSubBlock(params_scope); defer align_gz.unstack(); const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, coerced_align_rl, fn_proto.ast.align_expr); + const inst = try expr(&decl_gz, params_scope, coerced_align_ri, fn_proto.ast.align_expr); if (align_gz.instructionsSlice().len == 0) { // In this case we will send a len=0 body which can be encoded more efficiently. break :inst inst; @@ -3601,7 +3719,7 @@ fn fnDecl( var addrspace_gz = decl_gz.makeSubBlock(params_scope); defer addrspace_gz.unstack(); const addrspace_ref: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, .{ .coerced_ty = .address_space_type }, fn_proto.ast.addrspace_expr); + const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .address_space_type } }, fn_proto.ast.addrspace_expr); if (addrspace_gz.instructionsSlice().len == 0) { // In this case we will send a len=0 body which can be encoded more efficiently. break :inst inst; @@ -3613,7 +3731,7 @@ fn fnDecl( var section_gz = decl_gz.makeSubBlock(params_scope); defer section_gz.unstack(); const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, .{ .coerced_ty = .const_slice_u8_type }, fn_proto.ast.section_expr); + const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .const_slice_u8_type } }, fn_proto.ast.section_expr); if (section_gz.instructionsSlice().len == 0) { // In this case we will send a len=0 body which can be encoded more efficiently. break :inst inst; @@ -3636,7 +3754,7 @@ fn fnDecl( const inst = try expr( &decl_gz, params_scope, - .{ .coerced_ty = .calling_convention_type }, + .{ .rl = .{ .coerced_ty = .calling_convention_type } }, fn_proto.ast.callconv_expr, ); if (cc_gz.instructionsSlice().len == 0) { @@ -3658,7 +3776,7 @@ fn fnDecl( var ret_gz = decl_gz.makeSubBlock(params_scope); defer ret_gz.unstack(); const ret_ref: Zir.Inst.Ref = inst: { - const inst = try expr(&ret_gz, params_scope, coerced_type_rl, fn_proto.ast.return_type); + const inst = try expr(&ret_gz, params_scope, coerced_type_ri, fn_proto.ast.return_type); if (ret_gz.instructionsSlice().len == 0) { // In this case we will send a len=0 body which can be encoded more efficiently. break :inst inst; @@ -3712,10 +3830,13 @@ fn fnDecl( const lbrace_line = astgen.source_line - decl_gz.decl_line; const lbrace_column = astgen.source_column; - _ = try expr(&fn_gz, params_scope, .none, body_node); + _ = try expr(&fn_gz, params_scope, .{ .rl = .none }, body_node); try checkUsed(gz, &fn_gz.base, params_scope); if (!fn_gz.endsWithNoReturn()) { + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.ret, .always); + // Since we are adding the return instruction here, we must handle the coercion. // We do this by using the `ret_tok` instruction. _ = try fn_gz.addUnTok(.ret_tok, .void_value, tree.lastToken(body_node)); @@ -3808,13 +3929,13 @@ fn globalVarDecl( break :blk token_tags[maybe_extern_token] == .keyword_extern; }; const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node == 0) .none else inst: { - break :inst try expr(&block_scope, &block_scope.base, align_rl, var_decl.ast.align_node); + break :inst try expr(&block_scope, &block_scope.base, align_ri, var_decl.ast.align_node); }; const addrspace_inst: Zir.Inst.Ref = if (var_decl.ast.addrspace_node == 0) .none else inst: { - break :inst try expr(&block_scope, &block_scope.base, .{ .ty = .address_space_type }, var_decl.ast.addrspace_node); + break :inst try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .address_space_type } }, var_decl.ast.addrspace_node); }; const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: { - break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .ty = .const_slice_u8_type }, var_decl.ast.section_node); + break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .const_slice_u8_type } }, var_decl.ast.section_node); }; const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none; wip_members.nextDecl(is_pub, is_export, align_inst != .none, has_section_or_addrspace); @@ -3854,7 +3975,7 @@ fn globalVarDecl( try expr( &block_scope, &block_scope.base, - .{ .ty = .type_type }, + .{ .rl = .{ .ty = .type_type } }, var_decl.ast.type_node, ) else @@ -3863,7 +3984,7 @@ fn globalVarDecl( const init_inst = try expr( &block_scope, &block_scope.base, - if (type_inst != .none) .{ .ty = type_inst } else .none, + if (type_inst != .none) .{ .rl = .{ .ty = type_inst } } else .{ .rl = .none }, var_decl.ast.init_node, ); @@ -3952,7 +4073,7 @@ fn comptimeDecl( }; defer decl_block.unstack(); - const block_result = try expr(&decl_block, &decl_block.base, .none, body_node); + const block_result = try expr(&decl_block, &decl_block.base, .{ .rl = .none }, body_node); if (decl_block.isEmpty() or !decl_block.refIsNoReturn(block_result)) { _ = try decl_block.addBreak(.break_inline, block_inst, .void_value); } @@ -4156,8 +4277,12 @@ fn testDecl( const lbrace_line = astgen.source_line - decl_block.decl_line; const lbrace_column = astgen.source_column; - const block_result = try expr(&fn_block, &fn_block.base, .none, body_node); + const block_result = try expr(&fn_block, &fn_block.base, .{ .rl = .none }, body_node); if (fn_block.isEmpty() or !fn_block.refIsNoReturn(block_result)) { + + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.ret, .always); + // Since we are adding the return instruction here, we must handle the coercion. // We do this by using the `ret_tok` instruction. _ = try fn_block.addUnTok(.ret_tok, .void_value, tree.lastToken(body_node)); @@ -4370,7 +4495,7 @@ fn structDeclInner( if (layout == .Packed) { try astgen.appendErrorNode(member.ast.align_expr, "unable to override alignment of packed struct fields", .{}); } - const align_ref = try expr(&block_scope, &namespace.base, coerced_align_rl, member.ast.align_expr); + const align_ref = try expr(&block_scope, &namespace.base, coerced_align_ri, member.ast.align_expr); if (!block_scope.endsWithNoReturn()) { _ = try block_scope.addBreak(.break_inline, decl_inst, align_ref); } @@ -4383,9 +4508,9 @@ fn structDeclInner( } if (have_value) { - const rl: ResultLoc = if (field_type == .none) .none else .{ .coerced_ty = field_type }; + const ri: ResultInfo = .{ .rl = if (field_type == .none) .none else .{ .coerced_ty = field_type } }; - const default_inst = try expr(&block_scope, &namespace.base, rl, member.ast.value_expr); + const default_inst = try expr(&block_scope, &namespace.base, ri, member.ast.value_expr); if (!block_scope.endsWithNoReturn()) { _ = try block_scope.addBreak(.break_inline, decl_inst, default_inst); } @@ -4514,7 +4639,7 @@ fn unionDeclInner( return astgen.failNode(member_node, "union field missing type", .{}); } if (have_align) { - const align_inst = try expr(&block_scope, &block_scope.base, .{ .ty = .u32_type }, member.ast.align_expr); + const align_inst = try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .u32_type } }, member.ast.align_expr); wip_members.appendToField(@enumToInt(align_inst)); } if (have_value) { @@ -4546,7 +4671,7 @@ fn unionDeclInner( }, ); } - const tag_value = try expr(&block_scope, &block_scope.base, .{ .ty = arg_inst }, member.ast.value_expr); + const tag_value = try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = arg_inst } }, member.ast.value_expr); wip_members.appendToField(@enumToInt(tag_value)); } } @@ -4584,7 +4709,7 @@ fn unionDeclInner( fn containerDecl( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, container_decl: Ast.full.ContainerDecl, ) InnerError!Zir.Inst.Ref { @@ -4610,7 +4735,7 @@ fn containerDecl( } else std.builtin.Type.ContainerLayout.Auto; const result = try structDeclInner(gz, scope, node, container_decl, layout, container_decl.ast.arg); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .keyword_union => { const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) { @@ -4620,7 +4745,7 @@ fn containerDecl( } else std.builtin.Type.ContainerLayout.Auto; const result = try unionDeclInner(gz, scope, node, container_decl.ast.members, layout, container_decl.ast.arg, container_decl.ast.enum_token); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .keyword_enum => { if (container_decl.layout_token) |t| { @@ -4750,7 +4875,7 @@ fn containerDecl( _ = try astgen.scanDecls(&namespace, container_decl.ast.members); const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0) - try comptimeExpr(&block_scope, &namespace.base, .{ .ty = .type_type }, container_decl.ast.arg) + try comptimeExpr(&block_scope, &namespace.base, .{ .rl = .{ .ty = .type_type } }, container_decl.ast.arg) else .none; @@ -4794,7 +4919,7 @@ fn containerDecl( }, ); } - const tag_value_inst = try expr(&block_scope, &namespace.base, .{ .ty = arg_inst }, member.ast.value_expr); + const tag_value_inst = try expr(&block_scope, &namespace.base, .{ .rl = .{ .ty = arg_inst } }, member.ast.value_expr); wip_members.appendToField(@enumToInt(tag_value_inst)); } } @@ -4825,7 +4950,7 @@ fn containerDecl( block_scope.unstack(); try gz.addNamespaceCaptures(&namespace); - return rvalue(gz, rl, indexToRef(decl_inst), node); + return rvalue(gz, ri, indexToRef(decl_inst), node); }, .keyword_opaque => { assert(container_decl.ast.arg == 0); @@ -4875,7 +5000,7 @@ fn containerDecl( astgen.extra.appendSliceAssumeCapacity(decls_slice); try gz.addNamespaceCaptures(&namespace); - return rvalue(gz, rl, indexToRef(decl_inst), node); + return rvalue(gz, ri, indexToRef(decl_inst), node); }, else => unreachable, } @@ -5006,7 +5131,7 @@ fn containerMember( return .decl; } -fn errorSetDecl(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { +fn errorSetDecl(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; @@ -5061,13 +5186,13 @@ fn errorSetDecl(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir .fields_len = @intCast(u32, fields_len), }); const result = try gz.addPlNodePayloadIndex(.error_set_decl, node, payload_index); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn tryExpr( parent_gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, operand_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { @@ -5097,15 +5222,15 @@ fn tryExpr( const try_line = astgen.source_line - parent_gz.decl_line; const try_column = astgen.source_column; - const operand_rl: ResultLoc = switch (rl) { - .ref => .ref, - else => .none, + const operand_ri: ResultInfo = switch (ri.rl) { + .ref => .{ .rl = .ref, .ctx = .error_handling_expr }, + else => .{ .rl = .none, .ctx = .error_handling_expr }, }; - // This could be a pointer or value depending on the `rl` parameter. - const operand = try reachableExpr(parent_gz, scope, operand_rl, operand_node, node); + // This could be a pointer or value depending on the `ri` parameter. + const operand = try reachableExpr(parent_gz, scope, operand_ri, operand_node, node); const is_inline = parent_gz.force_comptime; const is_inline_bit = @as(u2, @boolToInt(is_inline)); - const is_ptr_bit = @as(u2, @boolToInt(operand_rl == .ref)) << 1; + const is_ptr_bit = @as(u2, @boolToInt(operand_ri.rl == .ref)) << 1; const block_tag: Zir.Inst.Tag = switch (is_inline_bit | is_ptr_bit) { 0b00 => .@"try", 0b01 => .@"try", @@ -5120,7 +5245,7 @@ fn tryExpr( var else_scope = parent_gz.makeSubBlock(scope); defer else_scope.unstack(); - const err_tag = switch (rl) { + const err_tag = switch (ri.rl) { .ref => Zir.Inst.Tag.err_union_code_ptr, else => Zir.Inst.Tag.err_union_code, }; @@ -5131,16 +5256,16 @@ fn tryExpr( try else_scope.setTryBody(try_inst, operand); const result = indexToRef(try_inst); - switch (rl) { + switch (ri.rl) { .ref => return result, - else => return rvalue(parent_gz, rl, result, node), + else => return rvalue(parent_gz, ri, result, node), } } fn orelseCatchExpr( parent_gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, lhs: Ast.Node.Index, cond_op: Zir.Inst.Tag, @@ -5152,20 +5277,22 @@ fn orelseCatchExpr( const astgen = parent_gz.astgen; const tree = astgen.tree; + const do_err_trace = astgen.fn_block != null and (cond_op == .is_non_err or cond_op == .is_non_err_ptr); + var block_scope = parent_gz.makeSubBlock(scope); - block_scope.setBreakResultLoc(rl); + block_scope.setBreakResultInfo(ri); defer block_scope.unstack(); - const operand_rl: ResultLoc = switch (block_scope.break_result_loc) { - .ref => .ref, - else => .none, + const operand_ri: ResultInfo = switch (block_scope.break_result_info.rl) { + .ref => .{ .rl = .ref, .ctx = if (do_err_trace) .error_handling_expr else .none }, + else => .{ .rl = .none, .ctx = if (do_err_trace) .error_handling_expr else .none }, }; block_scope.break_count += 1; - // This could be a pointer or value depending on the `operand_rl` parameter. - // We cannot use `block_scope.break_result_loc` because that has the bare + // This could be a pointer or value depending on the `operand_ri` parameter. + // We cannot use `block_scope.break_result_info` because that has the bare // type, whereas this expression has the optional type. Later we make // up for this fact by calling rvalue on the else branch. - const operand = try reachableExpr(&block_scope, &block_scope.base, operand_rl, lhs, rhs); + const operand = try reachableExpr(&block_scope, &block_scope.base, operand_ri, lhs, rhs); const cond = try block_scope.addUnNode(cond_op, operand, node); const condbr = try block_scope.addCondBr(.condbr, node); @@ -5179,14 +5306,19 @@ fn orelseCatchExpr( // This could be a pointer or value depending on `unwrap_op`. const unwrapped_payload = try then_scope.addUnNode(unwrap_op, operand, node); - const then_result = switch (rl) { + const then_result = switch (ri.rl) { .ref => unwrapped_payload, - else => try rvalue(&then_scope, block_scope.break_result_loc, unwrapped_payload, node), + else => try rvalue(&then_scope, block_scope.break_result_info, unwrapped_payload, node), }; var else_scope = block_scope.makeSubBlock(scope); defer else_scope.unstack(); + // We know that the operand (almost certainly) modified the error return trace, + // so signal to Sema that it should save the new index for restoring later. + if (do_err_trace and nodeMayAppendToErrorTrace(tree, lhs)) + _ = try else_scope.addSaveErrRetIndex(.always); + var err_val_scope: Scope.LocalVal = undefined; const else_sub_scope = blk: { const payload = payload_token orelse break :blk &else_scope.base; @@ -5209,9 +5341,13 @@ fn orelseCatchExpr( break :blk &err_val_scope.base; }; - const else_result = try expr(&else_scope, else_sub_scope, block_scope.break_result_loc, rhs); + const else_result = try expr(&else_scope, else_sub_scope, block_scope.break_result_info, rhs); if (!else_scope.endsWithNoReturn()) { block_scope.break_count += 1; + + // As our last action before the break, "pop" the error trace if needed + if (do_err_trace) + try restoreErrRetIndex(&else_scope, .{ .block = block }, block_scope.break_result_info, rhs, else_result); } try checkUsed(parent_gz, &else_scope.base, else_sub_scope); @@ -5220,9 +5356,9 @@ fn orelseCatchExpr( // instructions or not. const break_tag: Zir.Inst.Tag = if (parent_gz.force_comptime) .break_inline else .@"break"; - return finishThenElseBlock( + const result = try finishThenElseBlock( parent_gz, - rl, + ri, node, &block_scope, &then_scope, @@ -5235,12 +5371,13 @@ fn orelseCatchExpr( block, break_tag, ); + return result; } /// Supports `else_scope` stacked on `then_scope` stacked on `block_scope`. Unstacks `else_scope` then `then_scope`. fn finishThenElseBlock( parent_gz: *GenZir, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, block_scope: *GenZir, then_scope: *GenZir, @@ -5255,7 +5392,7 @@ fn finishThenElseBlock( ) InnerError!Zir.Inst.Ref { // We now have enough information to decide whether the result instruction should // be communicated via result location pointer or break instructions. - const strat = rl.strategy(block_scope); + const strat = ri.rl.strategy(block_scope); // else_scope may be stacked on then_scope, so check for no-return on then_scope manually const tags = parent_gz.astgen.instructions.items(.tag); const then_slice = then_scope.instructionsSliceUpto(else_scope); @@ -5285,9 +5422,9 @@ fn finishThenElseBlock( try setCondBrPayload(condbr, cond, then_scope, then_break, else_scope, else_break); } const block_ref = indexToRef(main_block); - switch (rl) { + switch (ri.rl) { .ref => return block_ref, - else => return rvalue(parent_gz, rl, block_ref, node), + else => return rvalue(parent_gz, ri, block_ref, node), } }, } @@ -5306,14 +5443,14 @@ fn tokenIdentEql(astgen: *AstGen, token1: Ast.TokenIndex, token2: Ast.TokenIndex fn fieldAccess( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { - switch (rl) { - .ref => return addFieldAccess(.field_ptr, gz, scope, .ref, node), + switch (ri.rl) { + .ref => return addFieldAccess(.field_ptr, gz, scope, .{ .rl = .ref }, node), else => { - const access = try addFieldAccess(.field_val, gz, scope, .none, node); - return rvalue(gz, rl, access, node); + const access = try addFieldAccess(.field_val, gz, scope, .{ .rl = .none }, node); + return rvalue(gz, ri, access, node); }, } } @@ -5322,7 +5459,7 @@ fn addFieldAccess( tag: Zir.Inst.Tag, gz: *GenZir, scope: *Scope, - lhs_rl: ResultLoc, + lhs_ri: ResultInfo, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; @@ -5336,7 +5473,7 @@ fn addFieldAccess( const str_index = try astgen.identAsString(field_ident); return gz.addPlNode(tag, node, Zir.Inst.Field{ - .lhs = try expr(gz, scope, lhs_rl, object_node), + .lhs = try expr(gz, scope, lhs_ri, object_node), .field_name_start = str_index, }); } @@ -5344,20 +5481,20 @@ fn addFieldAccess( fn arrayAccess( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); - switch (rl) { + switch (ri.rl) { .ref => return gz.addPlNode(.elem_ptr_node, node, Zir.Inst.Bin{ - .lhs = try expr(gz, scope, .ref, node_datas[node].lhs), - .rhs = try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs), + .lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs), + .rhs = try expr(gz, scope, .{ .rl = .{ .ty = .usize_type } }, node_datas[node].rhs), }), - else => return rvalue(gz, rl, try gz.addPlNode(.elem_val_node, node, Zir.Inst.Bin{ - .lhs = try expr(gz, scope, .none, node_datas[node].lhs), - .rhs = try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs), + else => return rvalue(gz, ri, try gz.addPlNode(.elem_val_node, node, Zir.Inst.Bin{ + .lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs), + .rhs = try expr(gz, scope, .{ .rl = .{ .ty = .usize_type } }, node_datas[node].rhs), }), node), } } @@ -5365,7 +5502,7 @@ fn arrayAccess( fn simpleBinOp( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, op_inst_tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { @@ -5374,15 +5511,15 @@ fn simpleBinOp( const node_datas = tree.nodes.items(.data); const result = try gz.addPlNode(op_inst_tag, node, Zir.Inst.Bin{ - .lhs = try reachableExpr(gz, scope, .none, node_datas[node].lhs, node), - .rhs = try reachableExpr(gz, scope, .none, node_datas[node].rhs, node), + .lhs = try reachableExpr(gz, scope, .{ .rl = .none }, node_datas[node].lhs, node), + .rhs = try reachableExpr(gz, scope, .{ .rl = .none }, node_datas[node].rhs, node), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn simpleStrTok( gz: *GenZir, - rl: ResultLoc, + ri: ResultInfo, ident_token: Ast.TokenIndex, node: Ast.Node.Index, op_inst_tag: Zir.Inst.Tag, @@ -5390,13 +5527,13 @@ fn simpleStrTok( const astgen = gz.astgen; const str_index = try astgen.identAsString(ident_token); const result = try gz.addStrTok(op_inst_tag, str_index, ident_token); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn boolBinOp( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, zir_tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { @@ -5404,25 +5541,25 @@ fn boolBinOp( const tree = astgen.tree; const node_datas = tree.nodes.items(.data); - const lhs = try expr(gz, scope, bool_rl, node_datas[node].lhs); + const lhs = try expr(gz, scope, bool_ri, node_datas[node].lhs); const bool_br = try gz.addBoolBr(zir_tag, lhs); var rhs_scope = gz.makeSubBlock(scope); defer rhs_scope.unstack(); - const rhs = try expr(&rhs_scope, &rhs_scope.base, bool_rl, node_datas[node].rhs); + const rhs = try expr(&rhs_scope, &rhs_scope.base, bool_ri, node_datas[node].rhs); if (!gz.refIsNoReturn(rhs)) { _ = try rhs_scope.addBreak(.break_inline, bool_br, rhs); } try rhs_scope.setBoolBrBody(bool_br); const block_ref = indexToRef(bool_br); - return rvalue(gz, rl, block_ref, node); + return rvalue(gz, ri, block_ref, node); } fn ifExpr( parent_gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, if_full: Ast.full.If, ) InnerError!Zir.Inst.Ref { @@ -5430,8 +5567,10 @@ fn ifExpr( const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); + const do_err_trace = astgen.fn_block != null and if_full.error_token != null; + var block_scope = parent_gz.makeSubBlock(scope); - block_scope.setBreakResultLoc(rl); + block_scope.setBreakResultInfo(ri); defer block_scope.unstack(); const payload_is_ref = if (if_full.payload_token) |payload_token| @@ -5445,23 +5584,23 @@ fn ifExpr( bool_bit: Zir.Inst.Ref, } = c: { if (if_full.error_token) |_| { - const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; - const err_union = try expr(&block_scope, &block_scope.base, cond_rl, if_full.ast.cond_expr); + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none, .ctx = .error_handling_expr }; + const err_union = try expr(&block_scope, &block_scope.base, cond_ri, if_full.ast.cond_expr); const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; break :c .{ .inst = err_union, .bool_bit = try block_scope.addUnNode(tag, err_union, if_full.ast.cond_expr), }; } else if (if_full.payload_token) |_| { - const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; - const optional = try expr(&block_scope, &block_scope.base, cond_rl, if_full.ast.cond_expr); + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; + const optional = try expr(&block_scope, &block_scope.base, cond_ri, if_full.ast.cond_expr); const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null; break :c .{ .inst = optional, .bool_bit = try block_scope.addUnNode(tag, optional, if_full.ast.cond_expr), }; } else { - const cond = try expr(&block_scope, &block_scope.base, bool_rl, if_full.ast.cond_expr); + const cond = try expr(&block_scope, &block_scope.base, bool_ri, if_full.ast.cond_expr); break :c .{ .inst = cond, .bool_bit = cond, @@ -5537,7 +5676,7 @@ fn ifExpr( } }; - const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_loc, if_full.ast.then_expr); + const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_info, if_full.ast.then_expr); if (!then_scope.endsWithNoReturn()) { block_scope.break_count += 1; } @@ -5550,6 +5689,11 @@ fn ifExpr( var else_scope = parent_gz.makeSubBlock(scope); defer else_scope.unstack(); + // We know that the operand (almost certainly) modified the error return trace, + // so signal to Sema that it should save the new index for restoring later. + if (do_err_trace and nodeMayAppendToErrorTrace(tree, if_full.ast.cond_expr)) + _ = try else_scope.addSaveErrRetIndex(.always); + const else_node = if_full.ast.else_expr; const else_info: struct { src: Ast.Node.Index, @@ -5582,9 +5726,13 @@ fn ifExpr( break :s &else_scope.base; } }; - const e = try expr(&else_scope, sub_scope, block_scope.break_result_loc, else_node); + const e = try expr(&else_scope, sub_scope, block_scope.break_result_info, else_node); if (!else_scope.endsWithNoReturn()) { block_scope.break_count += 1; + + // As our last action before the break, "pop" the error trace if needed + if (do_err_trace) + try restoreErrRetIndex(&else_scope, .{ .block = block }, block_scope.break_result_info, else_node, e); } try checkUsed(parent_gz, &else_scope.base, sub_scope); try else_scope.addDbgBlockEnd(); @@ -5594,17 +5742,17 @@ fn ifExpr( }; } else .{ .src = if_full.ast.then_expr, - .result = switch (rl) { + .result = switch (ri.rl) { // Explicitly store void to ptr result loc if there is no else branch - .ptr, .block_ptr => try rvalue(&else_scope, rl, .void_value, node), + .ptr, .block_ptr => try rvalue(&else_scope, ri, .void_value, node), else => .none, }, }; const break_tag: Zir.Inst.Tag = if (parent_gz.force_comptime) .break_inline else .@"break"; - return finishThenElseBlock( + const result = try finishThenElseBlock( parent_gz, - rl, + ri, node, &block_scope, &then_scope, @@ -5617,6 +5765,7 @@ fn ifExpr( block, break_tag, ); + return result; } /// Supports `else_scope` stacked on `then_scope`. Unstacks `else_scope` then `then_scope`. @@ -5737,7 +5886,7 @@ fn setCondBrPayloadElideBlockStorePtr( fn whileExpr( parent_gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, while_full: Ast.full.While, is_statement: bool, @@ -5757,7 +5906,7 @@ fn whileExpr( var loop_scope = parent_gz.makeSubBlock(scope); loop_scope.is_inline = is_inline; - loop_scope.setBreakResultLoc(rl); + loop_scope.setBreakResultInfo(ri); defer loop_scope.unstack(); defer loop_scope.labeled_breaks.deinit(astgen.gpa); @@ -5775,23 +5924,23 @@ fn whileExpr( bool_bit: Zir.Inst.Ref, } = c: { if (while_full.error_token) |_| { - const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; - const err_union = try expr(&continue_scope, &continue_scope.base, cond_rl, while_full.ast.cond_expr); + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; + const err_union = try expr(&continue_scope, &continue_scope.base, cond_ri, while_full.ast.cond_expr); const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; break :c .{ .inst = err_union, .bool_bit = try continue_scope.addUnNode(tag, err_union, while_full.ast.then_expr), }; } else if (while_full.payload_token) |_| { - const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; - const optional = try expr(&continue_scope, &continue_scope.base, cond_rl, while_full.ast.cond_expr); + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; + const optional = try expr(&continue_scope, &continue_scope.base, cond_ri, while_full.ast.cond_expr); const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null; break :c .{ .inst = optional, .bool_bit = try continue_scope.addUnNode(tag, optional, while_full.ast.then_expr), }; } else { - const cond = try expr(&continue_scope, &continue_scope.base, bool_rl, while_full.ast.cond_expr); + const cond = try expr(&continue_scope, &continue_scope.base, bool_ri, while_full.ast.cond_expr); break :c .{ .inst = cond, .bool_bit = cond, @@ -5910,7 +6059,7 @@ fn whileExpr( if (dbg_var_name) |some| { try then_scope.addDbgVar(.dbg_var_val, some, dbg_var_inst); } - const then_result = try expr(&then_scope, then_sub_scope, .none, while_full.ast.then_expr); + const then_result = try expr(&then_scope, then_sub_scope, .{ .rl = .none }, while_full.ast.then_expr); _ = try addEnsureResult(&then_scope, then_result, while_full.ast.then_expr); try checkUsed(parent_gz, &then_scope.base, then_sub_scope); @@ -5955,7 +6104,7 @@ fn whileExpr( // control flow apply to outer loops; not this one. loop_scope.continue_block = 0; loop_scope.break_block = 0; - const else_result = try expr(&else_scope, sub_scope, loop_scope.break_result_loc, else_node); + const else_result = try expr(&else_scope, sub_scope, loop_scope.break_result_info, else_node); if (is_statement) { _ = try addEnsureResult(&else_scope, else_result, else_node); } @@ -5982,7 +6131,7 @@ fn whileExpr( const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; const result = try finishThenElseBlock( parent_gz, - rl, + ri, node, &loop_scope, &then_scope, @@ -6004,7 +6153,7 @@ fn whileExpr( fn forExpr( parent_gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, for_full: Ast.full.While, is_statement: bool, @@ -6027,8 +6176,8 @@ fn forExpr( try emitDbgNode(parent_gz, for_full.ast.cond_expr); - const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; - const array_ptr = try expr(parent_gz, scope, cond_rl, for_full.ast.cond_expr); + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; + const array_ptr = try expr(parent_gz, scope, cond_ri, for_full.ast.cond_expr); const len = try parent_gz.addUnNode(.indexable_ptr_len, array_ptr, for_full.ast.cond_expr); const index_ptr = blk: { @@ -6045,7 +6194,7 @@ fn forExpr( var loop_scope = parent_gz.makeSubBlock(scope); loop_scope.is_inline = is_inline; - loop_scope.setBreakResultLoc(rl); + loop_scope.setBreakResultInfo(ri); defer loop_scope.unstack(); defer loop_scope.labeled_breaks.deinit(astgen.gpa); @@ -6149,7 +6298,7 @@ fn forExpr( break :blk &index_scope.base; }; - const then_result = try expr(&then_scope, then_sub_scope, .none, for_full.ast.then_expr); + const then_result = try expr(&then_scope, then_sub_scope, .{ .rl = .none }, for_full.ast.then_expr); _ = try addEnsureResult(&then_scope, then_result, for_full.ast.then_expr); try checkUsed(parent_gz, &then_scope.base, then_sub_scope); @@ -6168,7 +6317,7 @@ fn forExpr( // control flow apply to outer loops; not this one. loop_scope.continue_block = 0; loop_scope.break_block = 0; - const else_result = try expr(&else_scope, sub_scope, loop_scope.break_result_loc, else_node); + const else_result = try expr(&else_scope, sub_scope, loop_scope.break_result_info, else_node); if (is_statement) { _ = try addEnsureResult(&else_scope, else_result, else_node); } @@ -6193,7 +6342,7 @@ fn forExpr( const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; const result = try finishThenElseBlock( parent_gz, - rl, + ri, node, &loop_scope, &then_scope, @@ -6215,7 +6364,7 @@ fn forExpr( fn switchExpr( parent_gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, switch_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; @@ -6346,13 +6495,13 @@ fn switchExpr( } } - const operand_rl: ResultLoc = if (any_payload_is_ref) .ref else .none; - const raw_operand = try expr(parent_gz, scope, operand_rl, operand_node); + const operand_ri: ResultInfo = .{ .rl = if (any_payload_is_ref) .ref else .none }; + const raw_operand = try expr(parent_gz, scope, operand_ri, operand_node); const cond_tag: Zir.Inst.Tag = if (any_payload_is_ref) .switch_cond_ref else .switch_cond; const cond = try parent_gz.addUnNode(cond_tag, raw_operand, operand_node); // We need the type of the operand to use as the result location for all the prong items. const cond_ty_inst = try parent_gz.addUnNode(.typeof, cond, operand_node); - const item_rl: ResultLoc = .{ .ty = cond_ty_inst }; + const item_ri: ResultInfo = .{ .rl = .{ .ty = cond_ty_inst } }; // This contains the data that goes into the `extra` array for the SwitchBlock/SwitchBlockMulti, // except the first cases_nodes.len slots are a table that indexes payloads later in the array, with @@ -6369,7 +6518,7 @@ fn switchExpr( var block_scope = parent_gz.makeSubBlock(scope); // block_scope not used for collecting instructions block_scope.instructions_top = GenZir.unstacked_top; - block_scope.setBreakResultLoc(rl); + block_scope.setBreakResultInfo(ri); // This gets added to the parent block later, after the item expressions. const switch_block = try parent_gz.makeBlockInst(.switch_block, switch_node); @@ -6510,7 +6659,7 @@ fn switchExpr( if (node_tags[item_node] == .switch_range) continue; items_len += 1; - const item_inst = try comptimeExpr(parent_gz, scope, item_rl, item_node); + const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); try payloads.append(gpa, @enumToInt(item_inst)); } @@ -6520,8 +6669,8 @@ fn switchExpr( if (node_tags[range] != .switch_range) continue; ranges_len += 1; - const first = try comptimeExpr(parent_gz, scope, item_rl, node_datas[range].lhs); - const last = try comptimeExpr(parent_gz, scope, item_rl, node_datas[range].rhs); + const first = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].lhs); + const last = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].rhs); try payloads.appendSlice(gpa, &[_]u32{ @enumToInt(first), @enumToInt(last), }); @@ -6539,7 +6688,7 @@ fn switchExpr( scalar_case_index += 1; try payloads.resize(gpa, header_index + 2); // item, body_len const item_node = case.ast.values[0]; - const item_inst = try comptimeExpr(parent_gz, scope, item_rl, item_node); + const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); payloads.items[header_index] = @enumToInt(item_inst); break :blk header_index + 1; }; @@ -6558,7 +6707,7 @@ fn switchExpr( if (dbg_var_tag_name) |some| { try case_scope.addDbgVar(.dbg_var_val, some, dbg_var_tag_inst); } - const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_loc, case.ast.target_expr); + const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_info, case.ast.target_expr); try checkUsed(parent_gz, &case_scope.base, sub_scope); try case_scope.addDbgBlockEnd(); if (!parent_gz.refIsNoReturn(case_result)) { @@ -6600,7 +6749,7 @@ fn switchExpr( zir_datas[switch_block].pl_node.payload_index = payload_index; - const strat = rl.strategy(&block_scope); + const strat = ri.rl.strategy(&block_scope); for (payloads.items[case_table_start..case_table_end]) |start_index, i| { var body_len_index = start_index; var end_index = start_index; @@ -6672,8 +6821,8 @@ fn switchExpr( } const block_ref = indexToRef(switch_block); - if (strat.tag == .break_operand and strat.elide_store_to_block_ptr_instructions and rl != .ref) - return rvalue(parent_gz, rl, block_ref, switch_node); + if (strat.tag == .break_operand and strat.elide_store_to_block_ptr_instructions and ri.rl != .ref) + return rvalue(parent_gz, ri, block_ref, switch_node); return block_ref; } @@ -6713,6 +6862,10 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref if (operand_node == 0) { // Returning a void value; skip error defers. try genDefers(gz, defer_outer, scope, .normal_only); + + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.ret, .always); + _ = try gz.addUnNode(.ret_node, .void_value, node); return Zir.Inst.Ref.unreachable_value; } @@ -6736,30 +6889,36 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref return Zir.Inst.Ref.unreachable_value; } - const rl: ResultLoc = if (nodeMayNeedMemoryLocation(tree, operand_node, true)) .{ - .ptr = .{ .inst = try gz.addNode(.ret_ptr, node) }, + const ri: ResultInfo = if (nodeMayNeedMemoryLocation(tree, operand_node, true)) .{ + .rl = .{ .ptr = .{ .inst = try gz.addNode(.ret_ptr, node) } }, + .ctx = .@"return", } else .{ - .ty = try gz.addNode(.ret_type, node), + .rl = .{ .ty = try gz.addNode(.ret_type, node) }, + .ctx = .@"return", }; const prev_anon_name_strategy = gz.anon_name_strategy; gz.anon_name_strategy = .func; - const operand = try reachableExpr(gz, scope, rl, operand_node, node); + const operand = try reachableExpr(gz, scope, ri, operand_node, node); gz.anon_name_strategy = prev_anon_name_strategy; switch (nodeMayEvalToError(tree, operand_node)) { .never => { // Returning a value that cannot be an error; skip error defers. try genDefers(gz, defer_outer, scope, .normal_only); + + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.ret, .always); + try emitDbgStmt(gz, ret_line, ret_column); - try gz.addRet(rl, operand, node); + try gz.addRet(ri, operand, node); return Zir.Inst.Ref.unreachable_value; }, .always => { // Value is always an error. Emit both error defers and regular defers. - const err_code = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr.inst, node) else operand; + const err_code = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; try genDefers(gz, defer_outer, scope, .{ .both = err_code }); try emitDbgStmt(gz, ret_line, ret_column); - try gz.addRet(rl, operand, node); + try gz.addRet(ri, operand, node); return Zir.Inst.Ref.unreachable_value; }, .maybe => { @@ -6768,12 +6927,17 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref // Only regular defers; no branch needed. try genDefers(gz, defer_outer, scope, .normal_only); try emitDbgStmt(gz, ret_line, ret_column); - try gz.addRet(rl, operand, node); + + // As our last action before the return, "pop" the error trace if needed + const result = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; + _ = try gz.addRestoreErrRetIndex(.ret, .{ .if_non_error = result }); + + try gz.addRet(ri, operand, node); return Zir.Inst.Ref.unreachable_value; } // Emit conditional branch for generating errdefers. - const result = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr.inst, node) else operand; + const result = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; const is_non_err = try gz.addUnNode(.is_non_err, result, node); const condbr = try gz.addCondBr(.condbr, node); @@ -6781,8 +6945,12 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref defer then_scope.unstack(); try genDefers(&then_scope, defer_outer, scope, .normal_only); + + // As our last action before the return, "pop" the error trace if needed + _ = try then_scope.addRestoreErrRetIndex(.ret, .always); + try emitDbgStmt(&then_scope, ret_line, ret_column); - try then_scope.addRet(rl, operand, node); + try then_scope.addRet(ri, operand, node); var else_scope = gz.makeSubBlock(scope); defer else_scope.unstack(); @@ -6792,7 +6960,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref }; try genDefers(&else_scope, defer_outer, scope, which_ones); try emitDbgStmt(&else_scope, ret_line, ret_column); - try else_scope.addRet(rl, operand, node); + try else_scope.addRet(ri, operand, node); try setCondBrPayload(condbr, is_non_err, &then_scope, 0, &else_scope, 0); @@ -6825,7 +6993,7 @@ fn parseBitCount(buf: []const u8) std.fmt.ParseIntError!u16 { fn identifier( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, ident: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const tracy = trace(@src()); @@ -6844,7 +7012,7 @@ fn identifier( // if not @"" syntax, just use raw token slice if (ident_name_raw[0] != '@') { if (primitives.get(ident_name_raw)) |zir_const_ref| { - return rvalue(gz, rl, zir_const_ref, ident); + return rvalue(gz, ri, zir_const_ref, ident); } if (ident_name_raw.len >= 2) integer: { @@ -6877,19 +7045,19 @@ fn identifier( .bit_count = bit_count, } }, }); - return rvalue(gz, rl, result, ident); + return rvalue(gz, ri, result, ident); } } } // Local variables, including function parameters. - return localVarRef(gz, scope, rl, ident, ident_token); + return localVarRef(gz, scope, ri, ident, ident_token); } fn localVarRef( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, ident: Ast.Node.Index, ident_token: Ast.TokenIndex, ) InnerError!Zir.Inst.Ref { @@ -6907,7 +7075,7 @@ fn localVarRef( if (local_val.name == name_str_index) { // Locals cannot shadow anything, so we do not need to look for ambiguous // references in this case. - if (rl == .discard) { + if (ri.rl == .discard) { local_val.discarded = ident_token; } else { local_val.used = ident_token; @@ -6923,14 +7091,14 @@ fn localVarRef( gpa, ); - return rvalue(gz, rl, value_inst, ident); + return rvalue(gz, ri, value_inst, ident); } s = local_val.parent; }, .local_ptr => { const local_ptr = s.cast(Scope.LocalPtr).?; if (local_ptr.name == name_str_index) { - if (rl == .discard) { + if (ri.rl == .discard) { local_ptr.discarded = ident_token; } else { local_ptr.used = ident_token; @@ -6955,11 +7123,11 @@ fn localVarRef( gpa, ); - switch (rl) { + switch (ri.rl) { .ref => return ptr_inst, else => { const loaded = try gz.addUnNode(.load, ptr_inst, ident); - return rvalue(gz, rl, loaded, ident); + return rvalue(gz, ri, loaded, ident); }, } } @@ -6992,11 +7160,11 @@ fn localVarRef( // Decl references happen by name rather than ZIR index so that when unrelated // decls are modified, ZIR code containing references to them can be unmodified. - switch (rl) { + switch (ri.rl) { .ref => return gz.addStrTok(.decl_ref, name_str_index, ident_token), else => { const result = try gz.addStrTok(.decl_val, name_str_index, ident_token); - return rvalue(gz, rl, result, ident); + return rvalue(gz, ri, result, ident); }, } } @@ -7040,7 +7208,7 @@ fn tunnelThroughClosure( fn stringLiteral( gz: *GenZir, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; @@ -7055,12 +7223,12 @@ fn stringLiteral( .len = str.len, } }, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn multilineStringLiteral( gz: *GenZir, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; @@ -7072,10 +7240,10 @@ fn multilineStringLiteral( .len = str.len, } }, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } -fn charLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { +fn charLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); @@ -7085,7 +7253,7 @@ fn charLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir. switch (std.zig.parseCharLiteral(slice)) { .success => |codepoint| { const result = try gz.addInt(codepoint); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .failure => |err| return astgen.failWithStrLitError(err, main_token, slice, 0), } @@ -7093,7 +7261,7 @@ fn charLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir. const Sign = enum { negative, positive }; -fn numberLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index, source_node: Ast.Node.Index, sign: Sign) InnerError!Zir.Inst.Ref { +fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node: Ast.Node.Index, sign: Sign) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); @@ -7135,7 +7303,7 @@ fn numberLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index, source_node: const bigger_again: f128 = smaller_float; if (bigger_again == float_number) { const result = try gz.addFloat(smaller_float); - return rvalue(gz, rl, result, source_node); + return rvalue(gz, ri, result, source_node); } // We need to use 128 bits. Break the float into 4 u32 values so we can // put it into the `extra` array. @@ -7146,16 +7314,16 @@ fn numberLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index, source_node: .piece2 = @truncate(u32, int_bits >> 64), .piece3 = @truncate(u32, int_bits >> 96), }); - return rvalue(gz, rl, result, source_node); + return rvalue(gz, ri, result, source_node); }, .failure => |err| return astgen.failWithNumberError(err, num_token, bytes), }; if (sign == .positive) { - return rvalue(gz, rl, result, source_node); + return rvalue(gz, ri, result, source_node); } else { const negated = try gz.addUnNode(.negate, result, source_node); - return rvalue(gz, rl, negated, source_node); + return rvalue(gz, ri, negated, source_node); } } @@ -7191,7 +7359,7 @@ fn failWithNumberError(astgen: *AstGen, err: std.zig.number_literal.Error, token fn asmExpr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, full: Ast.full.Asm, ) InnerError!Zir.Inst.Ref { @@ -7214,7 +7382,7 @@ fn asmExpr( }, else => .{ .tag = .asm_expr, - .tmpl = @enumToInt(try comptimeExpr(gz, scope, .none, full.ast.template)), + .tmpl = @enumToInt(try comptimeExpr(gz, scope, .{ .rl = .none }, full.ast.template)), }, }; @@ -7266,7 +7434,7 @@ fn asmExpr( outputs[i] = .{ .name = name, .constraint = constraint, - .operand = try localVarRef(gz, scope, .ref, node, ident_token), + .operand = try localVarRef(gz, scope, .{ .rl = .ref }, node, ident_token), }; } } @@ -7282,7 +7450,7 @@ fn asmExpr( const name = try astgen.identAsString(symbolic_name); const constraint_token = symbolic_name + 2; const constraint = (try astgen.strLitAsString(constraint_token)).index; - const operand = try expr(gz, scope, .none, node_datas[input_node].lhs); + const operand = try expr(gz, scope, .{ .rl = .none }, node_datas[input_node].lhs); inputs[i] = .{ .name = name, .constraint = constraint, @@ -7327,31 +7495,31 @@ fn asmExpr( .inputs = inputs, .clobbers = clobbers_buffer[0..clobber_i], }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn as( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, lhs: Ast.Node.Index, rhs: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const dest_type = try typeExpr(gz, scope, lhs); - switch (rl) { - .none, .discard, .ref, .ty, .ty_shift_operand, .coerced_ty => { - const result = try reachableExpr(gz, scope, .{ .ty = dest_type }, rhs, node); - return rvalue(gz, rl, result, node); + switch (ri.rl) { + .none, .discard, .ref, .ty, .coerced_ty => { + const result = try reachableExpr(gz, scope, .{ .rl = .{ .ty = dest_type } }, rhs, node); + return rvalue(gz, ri, result, node); }, .ptr => |result_ptr| { - return asRlPtr(gz, scope, rl, node, result_ptr.inst, rhs, dest_type); + return asRlPtr(gz, scope, ri, node, result_ptr.inst, rhs, dest_type); }, .inferred_ptr => |result_ptr| { - return asRlPtr(gz, scope, rl, node, result_ptr, rhs, dest_type); + return asRlPtr(gz, scope, ri, node, result_ptr, rhs, dest_type); }, .block_ptr => |block_scope| { - return asRlPtr(gz, scope, rl, node, block_scope.rl_ptr, rhs, dest_type); + return asRlPtr(gz, scope, ri, node, block_scope.rl_ptr, rhs, dest_type); }, } } @@ -7359,29 +7527,29 @@ fn as( fn unionInit( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, params: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const union_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); const field_type = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{ .container_type = union_type, .field_name = field_name, }); - const init = try reachableExpr(gz, scope, .{ .ty = field_type }, params[2], node); + const init = try reachableExpr(gz, scope, .{ .rl = .{ .ty = field_type } }, params[2], node); const result = try gz.addPlNode(.union_init, node, Zir.Inst.UnionInit{ .union_type = union_type, .init = init, .field_name = field_name, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn asRlPtr( parent_gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, src_node: Ast.Node.Index, result_ptr: Zir.Inst.Ref, operand_node: Ast.Node.Index, @@ -7390,31 +7558,31 @@ fn asRlPtr( var as_scope = try parent_gz.makeCoercionScope(scope, dest_type, result_ptr, src_node); defer as_scope.unstack(); - const result = try reachableExpr(&as_scope, &as_scope.base, .{ .block_ptr = &as_scope }, operand_node, src_node); - return as_scope.finishCoercion(parent_gz, rl, operand_node, result, dest_type); + const result = try reachableExpr(&as_scope, &as_scope.base, .{ .rl = .{ .block_ptr = &as_scope } }, operand_node, src_node); + return as_scope.finishCoercion(parent_gz, ri, operand_node, result, dest_type); } fn bitCast( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, lhs: Ast.Node.Index, rhs: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const dest_type = try reachableTypeExpr(gz, scope, lhs, node); - const operand = try reachableExpr(gz, scope, .none, rhs, node); + const operand = try reachableExpr(gz, scope, .{ .rl = .none }, rhs, node); const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{ .lhs = dest_type, .rhs = operand, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn typeOf( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, args: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { @@ -7430,7 +7598,7 @@ fn typeOf( typeof_scope.force_comptime = false; defer typeof_scope.unstack(); - const ty_expr = try reachableExpr(&typeof_scope, &typeof_scope.base, .none, args[0], node); + const ty_expr = try reachableExpr(&typeof_scope, &typeof_scope.base, .{ .rl = .none }, args[0], node); if (!gz.refIsNoReturn(ty_expr)) { _ = try typeof_scope.addBreak(.break_inline, typeof_inst, ty_expr); } @@ -7438,7 +7606,7 @@ fn typeOf( // typeof_scope unstacked now, can add new instructions to gz try gz.instructions.append(gpa, typeof_inst); - return rvalue(gz, rl, indexToRef(typeof_inst), node); + return rvalue(gz, ri, indexToRef(typeof_inst), node); } const payload_size: u32 = std.meta.fields(Zir.Inst.TypeOfPeer).len; const payload_index = try reserveExtra(astgen, payload_size + args.len); @@ -7450,7 +7618,7 @@ fn typeOf( typeof_scope.force_comptime = false; for (args) |arg, i| { - const param_ref = try reachableExpr(&typeof_scope, &typeof_scope.base, .none, arg, node); + const param_ref = try reachableExpr(&typeof_scope, &typeof_scope.base, .{ .rl = .none }, arg, node); astgen.extra.items[args_index + i] = @enumToInt(param_ref); } _ = try typeof_scope.addBreak(.break_inline, refToIndex(typeof_inst).?, .void_value); @@ -7466,13 +7634,13 @@ fn typeOf( astgen.appendBodyWithFixups(body); typeof_scope.unstack(); - return rvalue(gz, rl, typeof_inst, node); + return rvalue(gz, ri, typeof_inst, node); } fn builtinCall( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, params: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { @@ -7524,7 +7692,7 @@ fn builtinCall( if (!gop.found_existing) { gop.value_ptr.* = str_lit_token; } - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .compile_log => { const payload_index = try addExtra(gz.astgen, Zir.Inst.NodeMultiOp{ @@ -7532,32 +7700,32 @@ fn builtinCall( }); var extra_index = try reserveExtra(gz.astgen, params.len); for (params) |param| { - const param_ref = try expr(gz, scope, .none, param); + const param_ref = try expr(gz, scope, .{ .rl = .none }, param); astgen.extra.items[extra_index] = @enumToInt(param_ref); extra_index += 1; } const result = try gz.addExtendedMultiOpPayloadIndex(.compile_log, payload_index, params.len); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .field => { - if (rl == .ref) { + if (ri.rl == .ref) { return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ - .lhs = try expr(gz, scope, .ref, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]), + .lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), }); } const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{ - .lhs = try expr(gz, scope, .none, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]), + .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, // zig fmt: off - .as => return as( gz, scope, rl, node, params[0], params[1]), - .bit_cast => return bitCast( gz, scope, rl, node, params[0], params[1]), - .TypeOf => return typeOf( gz, scope, rl, node, params), - .union_init => return unionInit(gz, scope, rl, node, params), + .as => return as( gz, scope, ri, node, params[0], params[1]), + .bit_cast => return bitCast( gz, scope, ri, node, params[0], params[1]), + .TypeOf => return typeOf( gz, scope, ri, node, params), + .union_init => return unionInit(gz, scope, ri, node, params), .c_import => return cImport( gz, scope, node, params[0]), // zig fmt: on @@ -7582,9 +7750,9 @@ fn builtinCall( local_val.used = ident_token; _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ .operand = local_val.inst, - .options = try comptimeExpr(gz, scope, .{ .coerced_ty = .export_options_type }, params[1]), + .options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]), }); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); } s = local_val.parent; }, @@ -7597,9 +7765,9 @@ fn builtinCall( const loaded = try gz.addUnNode(.load, local_ptr.ptr, node); _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ .operand = loaded, - .options = try comptimeExpr(gz, scope, .{ .coerced_ty = .export_options_type }, params[1]), + .options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]), }); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); } s = local_ptr.parent; }, @@ -7631,47 +7799,47 @@ fn builtinCall( }, else => return astgen.failNode(params[0], "symbol to export must identify a declaration", .{}), } - const options = try comptimeExpr(gz, scope, .{ .ty = .export_options_type }, params[1]); + const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .export_options_type } }, params[1]); _ = try gz.addPlNode(.@"export", node, Zir.Inst.Export{ .namespace = namespace, .decl_name = decl_name, .options = options, }); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); }, .@"extern" => { const type_inst = try typeExpr(gz, scope, params[0]); - const options = try comptimeExpr(gz, scope, .{ .ty = .extern_options_type }, params[1]); + const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .extern_options_type } }, params[1]); const result = try gz.addExtendedPayload(.builtin_extern, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), .lhs = type_inst, .rhs = options, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .fence => { - const order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[0]); + const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[0]); const result = try gz.addExtendedPayload(.fence, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = order, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .set_float_mode => { - const order = try expr(gz, scope, .{ .coerced_ty = .float_mode_type }, params[0]); + const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .float_mode_type } }, params[0]); const result = try gz.addExtendedPayload(.set_float_mode, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = order, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .set_align_stack => { - const order = try expr(gz, scope, align_rl, params[0]); + const order = try expr(gz, scope, align_ri, params[0]); const result = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = order, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .src => { @@ -7683,62 +7851,62 @@ fn builtinCall( .line = astgen.source_line, .column = astgen.source_column, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, // zig fmt: off - .This => return rvalue(gz, rl, try gz.addNodeExtended(.this, node), node), - .return_address => return rvalue(gz, rl, try gz.addNodeExtended(.ret_addr, node), node), - .error_return_trace => return rvalue(gz, rl, try gz.addNodeExtended(.error_return_trace, node), node), - .frame => return rvalue(gz, rl, try gz.addNodeExtended(.frame, node), node), - .frame_address => return rvalue(gz, rl, try gz.addNodeExtended(.frame_address, node), node), - .breakpoint => return rvalue(gz, rl, try gz.addNodeExtended(.breakpoint, node), node), - - .type_info => return simpleUnOpType(gz, scope, rl, node, params[0], .type_info), - .size_of => return simpleUnOpType(gz, scope, rl, node, params[0], .size_of), - .bit_size_of => return simpleUnOpType(gz, scope, rl, node, params[0], .bit_size_of), - .align_of => return simpleUnOpType(gz, scope, rl, node, params[0], .align_of), - - .ptr_to_int => return simpleUnOp(gz, scope, rl, node, .none, params[0], .ptr_to_int), - .compile_error => return simpleUnOp(gz, scope, rl, node, .{ .ty = .const_slice_u8_type }, params[0], .compile_error), - .set_eval_branch_quota => return simpleUnOp(gz, scope, rl, node, .{ .coerced_ty = .u32_type }, params[0], .set_eval_branch_quota), - .enum_to_int => return simpleUnOp(gz, scope, rl, node, .none, params[0], .enum_to_int), - .bool_to_int => return simpleUnOp(gz, scope, rl, node, bool_rl, params[0], .bool_to_int), - .embed_file => return simpleUnOp(gz, scope, rl, node, .{ .ty = .const_slice_u8_type }, params[0], .embed_file), - .error_name => return simpleUnOp(gz, scope, rl, node, .{ .ty = .anyerror_type }, params[0], .error_name), - .set_cold => return simpleUnOp(gz, scope, rl, node, bool_rl, params[0], .set_cold), - .set_runtime_safety => return simpleUnOp(gz, scope, rl, node, bool_rl, params[0], .set_runtime_safety), - .sqrt => return simpleUnOp(gz, scope, rl, node, .none, params[0], .sqrt), - .sin => return simpleUnOp(gz, scope, rl, node, .none, params[0], .sin), - .cos => return simpleUnOp(gz, scope, rl, node, .none, params[0], .cos), - .tan => return simpleUnOp(gz, scope, rl, node, .none, params[0], .tan), - .exp => return simpleUnOp(gz, scope, rl, node, .none, params[0], .exp), - .exp2 => return simpleUnOp(gz, scope, rl, node, .none, params[0], .exp2), - .log => return simpleUnOp(gz, scope, rl, node, .none, params[0], .log), - .log2 => return simpleUnOp(gz, scope, rl, node, .none, params[0], .log2), - .log10 => return simpleUnOp(gz, scope, rl, node, .none, params[0], .log10), - .fabs => return simpleUnOp(gz, scope, rl, node, .none, params[0], .fabs), - .floor => return simpleUnOp(gz, scope, rl, node, .none, params[0], .floor), - .ceil => return simpleUnOp(gz, scope, rl, node, .none, params[0], .ceil), - .trunc => return simpleUnOp(gz, scope, rl, node, .none, params[0], .trunc), - .round => return simpleUnOp(gz, scope, rl, node, .none, params[0], .round), - .tag_name => return simpleUnOp(gz, scope, rl, node, .none, params[0], .tag_name), - .type_name => return simpleUnOp(gz, scope, rl, node, .none, params[0], .type_name), - .Frame => return simpleUnOp(gz, scope, rl, node, .none, params[0], .frame_type), - .frame_size => return simpleUnOp(gz, scope, rl, node, .none, params[0], .frame_size), - - .float_to_int => return typeCast(gz, scope, rl, node, params[0], params[1], .float_to_int), - .int_to_float => return typeCast(gz, scope, rl, node, params[0], params[1], .int_to_float), - .int_to_ptr => return typeCast(gz, scope, rl, node, params[0], params[1], .int_to_ptr), - .int_to_enum => return typeCast(gz, scope, rl, node, params[0], params[1], .int_to_enum), - .float_cast => return typeCast(gz, scope, rl, node, params[0], params[1], .float_cast), - .int_cast => return typeCast(gz, scope, rl, node, params[0], params[1], .int_cast), - .ptr_cast => return typeCast(gz, scope, rl, node, params[0], params[1], .ptr_cast), - .truncate => return typeCast(gz, scope, rl, node, params[0], params[1], .truncate), + .This => return rvalue(gz, ri, try gz.addNodeExtended(.this, node), node), + .return_address => return rvalue(gz, ri, try gz.addNodeExtended(.ret_addr, node), node), + .error_return_trace => return rvalue(gz, ri, try gz.addNodeExtended(.error_return_trace, node), node), + .frame => return rvalue(gz, ri, try gz.addNodeExtended(.frame, node), node), + .frame_address => return rvalue(gz, ri, try gz.addNodeExtended(.frame_address, node), node), + .breakpoint => return rvalue(gz, ri, try gz.addNodeExtended(.breakpoint, node), node), + + .type_info => return simpleUnOpType(gz, scope, ri, node, params[0], .type_info), + .size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .size_of), + .bit_size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .bit_size_of), + .align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of), + + .ptr_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ptr_to_int), + .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .compile_error), + .set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota), + .enum_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .enum_to_int), + .bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int), + .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file), + .error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name), + .set_cold => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_cold), + .set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety), + .sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt), + .sin => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sin), + .cos => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .cos), + .tan => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tan), + .exp => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp), + .exp2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp2), + .log => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log), + .log2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log2), + .log10 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log10), + .fabs => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .fabs), + .floor => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .floor), + .ceil => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ceil), + .trunc => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .trunc), + .round => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .round), + .tag_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tag_name), + .type_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .type_name), + .Frame => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_type), + .frame_size => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_size), + + .float_to_int => return typeCast(gz, scope, ri, node, params[0], params[1], .float_to_int), + .int_to_float => return typeCast(gz, scope, ri, node, params[0], params[1], .int_to_float), + .int_to_ptr => return typeCast(gz, scope, ri, node, params[0], params[1], .int_to_ptr), + .int_to_enum => return typeCast(gz, scope, ri, node, params[0], params[1], .int_to_enum), + .float_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .float_cast), + .int_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .int_cast), + .ptr_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_cast), + .truncate => return typeCast(gz, scope, ri, node, params[0], params[1], .truncate), // zig fmt: on .Type => { - const operand = try expr(gz, scope, .{ .coerced_ty = .type_info_type }, params[0]); + const operand = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .type_info_type } }, params[0]); const gpa = gz.astgen.gpa; @@ -7760,219 +7928,219 @@ fn builtinCall( }); gz.instructions.appendAssumeCapacity(new_index); const result = indexToRef(new_index); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .panic => { try emitDbgNode(gz, node); - return simpleUnOp(gz, scope, rl, node, .{ .ty = .const_slice_u8_type }, params[0], if (gz.force_comptime) .panic_comptime else .panic); + return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], if (gz.force_comptime) .panic_comptime else .panic); }, .error_to_int => { - const operand = try expr(gz, scope, .none, params[0]); + const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); const result = try gz.addExtendedPayload(.error_to_int, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .int_to_error => { - const operand = try expr(gz, scope, .{ .coerced_ty = .u16_type }, params[0]); + const operand = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u16_type } }, params[0]); const result = try gz.addExtendedPayload(.int_to_error, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .align_cast => { - const dest_align = try comptimeExpr(gz, scope, align_rl, params[0]); - const rhs = try expr(gz, scope, .none, params[1]); + const dest_align = try comptimeExpr(gz, scope, align_ri, params[0]); + const rhs = try expr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addPlNode(.align_cast, node, Zir.Inst.Bin{ .lhs = dest_align, .rhs = rhs, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .err_set_cast => { const result = try gz.addExtendedPayload(.err_set_cast, Zir.Inst.BinNode{ .lhs = try typeExpr(gz, scope, params[0]), - .rhs = try expr(gz, scope, .none, params[1]), + .rhs = try expr(gz, scope, .{ .rl = .none }, params[1]), .node = gz.nodeIndexToRelative(node), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .addrspace_cast => { const result = try gz.addExtendedPayload(.addrspace_cast, Zir.Inst.BinNode{ - .lhs = try comptimeExpr(gz, scope, .{ .ty = .address_space_type }, params[0]), - .rhs = try expr(gz, scope, .none, params[1]), + .lhs = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .address_space_type } }, params[0]), + .rhs = try expr(gz, scope, .{ .rl = .none }, params[1]), .node = gz.nodeIndexToRelative(node), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, // zig fmt: off - .has_decl => return hasDeclOrField(gz, scope, rl, node, params[0], params[1], .has_decl), - .has_field => return hasDeclOrField(gz, scope, rl, node, params[0], params[1], .has_field), + .has_decl => return hasDeclOrField(gz, scope, ri, node, params[0], params[1], .has_decl), + .has_field => return hasDeclOrField(gz, scope, ri, node, params[0], params[1], .has_field), - .clz => return bitBuiltin(gz, scope, rl, node, params[0], .clz), - .ctz => return bitBuiltin(gz, scope, rl, node, params[0], .ctz), - .pop_count => return bitBuiltin(gz, scope, rl, node, params[0], .pop_count), - .byte_swap => return bitBuiltin(gz, scope, rl, node, params[0], .byte_swap), - .bit_reverse => return bitBuiltin(gz, scope, rl, node, params[0], .bit_reverse), + .clz => return bitBuiltin(gz, scope, ri, node, params[0], .clz), + .ctz => return bitBuiltin(gz, scope, ri, node, params[0], .ctz), + .pop_count => return bitBuiltin(gz, scope, ri, node, params[0], .pop_count), + .byte_swap => return bitBuiltin(gz, scope, ri, node, params[0], .byte_swap), + .bit_reverse => return bitBuiltin(gz, scope, ri, node, params[0], .bit_reverse), - .div_exact => return divBuiltin(gz, scope, rl, node, params[0], params[1], .div_exact), - .div_floor => return divBuiltin(gz, scope, rl, node, params[0], params[1], .div_floor), - .div_trunc => return divBuiltin(gz, scope, rl, node, params[0], params[1], .div_trunc), - .mod => return divBuiltin(gz, scope, rl, node, params[0], params[1], .mod), - .rem => return divBuiltin(gz, scope, rl, node, params[0], params[1], .rem), + .div_exact => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_exact), + .div_floor => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_floor), + .div_trunc => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_trunc), + .mod => return divBuiltin(gz, scope, ri, node, params[0], params[1], .mod), + .rem => return divBuiltin(gz, scope, ri, node, params[0], params[1], .rem), - .shl_exact => return shiftOp(gz, scope, rl, node, params[0], params[1], .shl_exact), - .shr_exact => return shiftOp(gz, scope, rl, node, params[0], params[1], .shr_exact), + .shl_exact => return shiftOp(gz, scope, ri, node, params[0], params[1], .shl_exact), + .shr_exact => return shiftOp(gz, scope, ri, node, params[0], params[1], .shr_exact), - .bit_offset_of => return offsetOf(gz, scope, rl, node, params[0], params[1], .bit_offset_of), - .offset_of => return offsetOf(gz, scope, rl, node, params[0], params[1], .offset_of), + .bit_offset_of => return offsetOf(gz, scope, ri, node, params[0], params[1], .bit_offset_of), + .offset_of => return offsetOf(gz, scope, ri, node, params[0], params[1], .offset_of), - .c_undef => return simpleCBuiltin(gz, scope, rl, node, params[0], .c_undef), - .c_include => return simpleCBuiltin(gz, scope, rl, node, params[0], .c_include), + .c_undef => return simpleCBuiltin(gz, scope, ri, node, params[0], .c_undef), + .c_include => return simpleCBuiltin(gz, scope, ri, node, params[0], .c_include), - .cmpxchg_strong => return cmpxchg(gz, scope, rl, node, params, 1), - .cmpxchg_weak => return cmpxchg(gz, scope, rl, node, params, 0), + .cmpxchg_strong => return cmpxchg(gz, scope, ri, node, params, 1), + .cmpxchg_weak => return cmpxchg(gz, scope, ri, node, params, 0), // zig fmt: on .wasm_memory_size => { - const operand = try comptimeExpr(gz, scope, .{ .coerced_ty = .u32_type }, params[0]); + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); const result = try gz.addExtendedPayload(.wasm_memory_size, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .wasm_memory_grow => { - const index_arg = try comptimeExpr(gz, scope, .{ .coerced_ty = .u32_type }, params[0]); - const delta_arg = try expr(gz, scope, .{ .coerced_ty = .u32_type }, params[1]); + const index_arg = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const delta_arg = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[1]); const result = try gz.addExtendedPayload(.wasm_memory_grow, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), .lhs = index_arg, .rhs = delta_arg, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .c_define => { if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{}); - const name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[0]); - const value = try comptimeExpr(gz, scope, .none, params[1]); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0]); + const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), .lhs = name, .rhs = value, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .splat => { - const len = try expr(gz, scope, .{ .coerced_ty = .u32_type }, params[0]); - const scalar = try expr(gz, scope, .none, params[1]); + const len = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const scalar = try expr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addPlNode(.splat, node, Zir.Inst.Bin{ .lhs = len, .rhs = scalar, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .reduce => { - const op = try expr(gz, scope, .{ .ty = .reduce_op_type }, params[0]); - const scalar = try expr(gz, scope, .none, params[1]); + const op = try expr(gz, scope, .{ .rl = .{ .ty = .reduce_op_type } }, params[0]); + const scalar = try expr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addPlNode(.reduce, node, Zir.Inst.Bin{ .lhs = op, .rhs = scalar, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .max => { - const a = try expr(gz, scope, .none, params[0]); - const b = try expr(gz, scope, .none, params[1]); + const a = try expr(gz, scope, .{ .rl = .none }, params[0]); + const b = try expr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addPlNode(.max, node, Zir.Inst.Bin{ .lhs = a, .rhs = b, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .min => { - const a = try expr(gz, scope, .none, params[0]); - const b = try expr(gz, scope, .none, params[1]); + const a = try expr(gz, scope, .{ .rl = .none }, params[0]); + const b = try expr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addPlNode(.min, node, Zir.Inst.Bin{ .lhs = a, .rhs = b, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, - .add_with_overflow => return overflowArithmetic(gz, scope, rl, node, params, .add_with_overflow), - .sub_with_overflow => return overflowArithmetic(gz, scope, rl, node, params, .sub_with_overflow), - .mul_with_overflow => return overflowArithmetic(gz, scope, rl, node, params, .mul_with_overflow), + .add_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .add_with_overflow), + .sub_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .sub_with_overflow), + .mul_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .mul_with_overflow), .shl_with_overflow => { const int_type = try typeExpr(gz, scope, params[0]); const log2_int_type = try gz.addUnNode(.log2_int_type, int_type, params[0]); const ptr_type = try gz.addUnNode(.overflow_arithmetic_ptr, int_type, params[0]); - const lhs = try expr(gz, scope, .{ .ty = int_type }, params[1]); - const rhs = try expr(gz, scope, .{ .ty = log2_int_type }, params[2]); - const ptr = try expr(gz, scope, .{ .ty = ptr_type }, params[3]); + const lhs = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[1]); + const rhs = try expr(gz, scope, .{ .rl = .{ .ty = log2_int_type } }, params[2]); + const ptr = try expr(gz, scope, .{ .rl = .{ .ty = ptr_type } }, params[3]); const result = try gz.addExtendedPayload(.shl_with_overflow, Zir.Inst.OverflowArithmetic{ .node = gz.nodeIndexToRelative(node), .lhs = lhs, .rhs = rhs, .ptr = ptr, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .atomic_load => { const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.AtomicLoad{ // zig fmt: off - .elem_type = try typeExpr(gz, scope, params[0]), - .ptr = try expr (gz, scope, .none, params[1]), - .ordering = try expr (gz, scope, .{ .coerced_ty = .atomic_order_type }, params[2]), + .elem_type = try typeExpr(gz, scope, params[0]), + .ptr = try expr (gz, scope, .{ .rl = .none }, params[1]), + .ordering = try expr (gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[2]), // zig fmt: on }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .atomic_rmw => { const int_type = try typeExpr(gz, scope, params[0]); const result = try gz.addPlNode(.atomic_rmw, node, Zir.Inst.AtomicRmw{ // zig fmt: off - .ptr = try expr(gz, scope, .none, params[1]), - .operation = try expr(gz, scope, .{ .coerced_ty = .atomic_rmw_op_type }, params[2]), - .operand = try expr(gz, scope, .{ .ty = int_type }, params[3]), - .ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]), + .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .operation = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_rmw_op_type } }, params[2]), + .operand = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[3]), + .ordering = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[4]), // zig fmt: on }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .atomic_store => { const int_type = try typeExpr(gz, scope, params[0]); const result = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{ // zig fmt: off - .ptr = try expr(gz, scope, .none, params[1]), - .operand = try expr(gz, scope, .{ .ty = int_type }, params[2]), - .ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[3]), + .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .operand = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]), + .ordering = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[3]), // zig fmt: on }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .mul_add => { const float_type = try typeExpr(gz, scope, params[0]); - const mulend1 = try expr(gz, scope, .{ .coerced_ty = float_type }, params[1]); - const mulend2 = try expr(gz, scope, .{ .coerced_ty = float_type }, params[2]); - const addend = try expr(gz, scope, .{ .ty = float_type }, params[3]); + const mulend1 = try expr(gz, scope, .{ .rl = .{ .coerced_ty = float_type } }, params[1]); + const mulend2 = try expr(gz, scope, .{ .rl = .{ .coerced_ty = float_type } }, params[2]); + const addend = try expr(gz, scope, .{ .rl = .{ .ty = float_type } }, params[3]); const result = try gz.addPlNode(.mul_add, node, Zir.Inst.MulAdd{ .mulend1 = mulend1, .mulend2 = mulend2, .addend = addend, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .call => { - const options = try comptimeExpr(gz, scope, .{ .ty = .call_options_type }, params[0]); + const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .call_options_type } }, params[0]); const callee = try calleeExpr(gz, scope, params[1]); - const args = try expr(gz, scope, .none, params[2]); + const args = try expr(gz, scope, .{ .rl = .none }, params[2]); const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{ .options = options, .callee = callee, @@ -7983,115 +8151,115 @@ fn builtinCall( .ensure_result_used = false, }, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .field_parent_ptr => { const parent_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{ .parent_type = parent_type, .field_name = field_name, - .field_ptr = try expr(gz, scope, .none, params[2]), + .field_ptr = try expr(gz, scope, .{ .rl = .none }, params[2]), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .memcpy => { const result = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{ - .dest = try expr(gz, scope, .{ .coerced_ty = .manyptr_u8_type }, params[0]), - .source = try expr(gz, scope, .{ .coerced_ty = .manyptr_const_u8_type }, params[1]), - .byte_count = try expr(gz, scope, .{ .coerced_ty = .usize_type }, params[2]), + .dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]), + .source = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_const_u8_type } }, params[1]), + .byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .memset => { const result = try gz.addPlNode(.memset, node, Zir.Inst.Memset{ - .dest = try expr(gz, scope, .{ .coerced_ty = .manyptr_u8_type }, params[0]), - .byte = try expr(gz, scope, .{ .coerced_ty = .u8_type }, params[1]), - .byte_count = try expr(gz, scope, .{ .coerced_ty = .usize_type }, params[2]), + .dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]), + .byte = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u8_type } }, params[1]), + .byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .shuffle => { const result = try gz.addPlNode(.shuffle, node, Zir.Inst.Shuffle{ .elem_type = try typeExpr(gz, scope, params[0]), - .a = try expr(gz, scope, .none, params[1]), - .b = try expr(gz, scope, .none, params[2]), - .mask = try comptimeExpr(gz, scope, .none, params[3]), + .a = try expr(gz, scope, .{ .rl = .none }, params[1]), + .b = try expr(gz, scope, .{ .rl = .none }, params[2]), + .mask = try comptimeExpr(gz, scope, .{ .rl = .none }, params[3]), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .select => { const result = try gz.addExtendedPayload(.select, Zir.Inst.Select{ .node = gz.nodeIndexToRelative(node), .elem_type = try typeExpr(gz, scope, params[0]), - .pred = try expr(gz, scope, .none, params[1]), - .a = try expr(gz, scope, .none, params[2]), - .b = try expr(gz, scope, .none, params[3]), + .pred = try expr(gz, scope, .{ .rl = .none }, params[1]), + .a = try expr(gz, scope, .{ .rl = .none }, params[2]), + .b = try expr(gz, scope, .{ .rl = .none }, params[3]), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .async_call => { const result = try gz.addExtendedPayload(.builtin_async_call, Zir.Inst.AsyncCall{ .node = gz.nodeIndexToRelative(node), - .frame_buffer = try expr(gz, scope, .none, params[0]), - .result_ptr = try expr(gz, scope, .none, params[1]), - .fn_ptr = try expr(gz, scope, .none, params[2]), - .args = try expr(gz, scope, .none, params[3]), + .frame_buffer = try expr(gz, scope, .{ .rl = .none }, params[0]), + .result_ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .fn_ptr = try expr(gz, scope, .{ .rl = .none }, params[2]), + .args = try expr(gz, scope, .{ .rl = .none }, params[3]), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .Vector => { const result = try gz.addPlNode(.vector_type, node, Zir.Inst.Bin{ - .lhs = try comptimeExpr(gz, scope, .{ .coerced_ty = .u32_type }, params[0]), + .lhs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]), .rhs = try typeExpr(gz, scope, params[1]), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, .prefetch => { - const ptr = try expr(gz, scope, .none, params[0]); - const options = try comptimeExpr(gz, scope, .{ .ty = .prefetch_options_type }, params[1]); + const ptr = try expr(gz, scope, .{ .rl = .none }, params[0]); + const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .prefetch_options_type } }, params[1]); const result = try gz.addExtendedPayload(.prefetch, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), .lhs = ptr, .rhs = options, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); }, } } fn simpleNoOpVoid( gz: *GenZir, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { _ = try gz.addNode(tag, node); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); } fn hasDeclOrField( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, lhs_node: Ast.Node.Index, rhs_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const container_type = try typeExpr(gz, scope, lhs_node); - const name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, rhs_node); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = container_type, .rhs = name, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn typeCast( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, lhs_node: Ast.Node.Index, rhs_node: Ast.Node.Index, @@ -8099,42 +8267,42 @@ fn typeCast( ) InnerError!Zir.Inst.Ref { const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = try typeExpr(gz, scope, lhs_node), - .rhs = try expr(gz, scope, .none, rhs_node), + .rhs = try expr(gz, scope, .{ .rl = .none }, rhs_node), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn simpleUnOpType( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, operand_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const operand = try typeExpr(gz, scope, operand_node); const result = try gz.addUnNode(tag, operand, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn simpleUnOp( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, - operand_rl: ResultLoc, + operand_ri: ResultInfo, operand_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { - const operand = try expr(gz, scope, operand_rl, operand_node); + const operand = try expr(gz, scope, operand_ri, operand_node); const result = try gz.addUnNode(tag, operand, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn negation( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; @@ -8146,18 +8314,18 @@ fn negation( // its negativity rather than having it go through comptime subtraction. const operand_node = node_datas[node].lhs; if (node_tags[operand_node] == .number_literal) { - return numberLiteral(gz, rl, operand_node, node, .negative); + return numberLiteral(gz, ri, operand_node, node, .negative); } - const operand = try expr(gz, scope, .none, operand_node); + const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); const result = try gz.addUnNode(.negate, operand, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn cmpxchg( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, params: []const Ast.Node.Index, small: u16, @@ -8166,98 +8334,98 @@ fn cmpxchg( const result = try gz.addExtendedPayloadSmall(.cmpxchg, small, Zir.Inst.Cmpxchg{ // zig fmt: off .node = gz.nodeIndexToRelative(node), - .ptr = try expr(gz, scope, .none, params[1]), - .expected_value = try expr(gz, scope, .{ .ty = int_type }, params[2]), - .new_value = try expr(gz, scope, .{ .coerced_ty = int_type }, params[3]), - .success_order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]), - .failure_order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[5]), + .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .expected_value = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]), + .new_value = try expr(gz, scope, .{ .rl = .{ .coerced_ty = int_type } }, params[3]), + .success_order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[4]), + .failure_order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[5]), // zig fmt: on }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn bitBuiltin( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, operand_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { - const operand = try expr(gz, scope, .none, operand_node); + const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); const result = try gz.addUnNode(tag, operand, node); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn divBuiltin( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, lhs_node: Ast.Node.Index, rhs_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ - .lhs = try expr(gz, scope, .none, lhs_node), - .rhs = try expr(gz, scope, .none, rhs_node), + .lhs = try expr(gz, scope, .{ .rl = .none }, lhs_node), + .rhs = try expr(gz, scope, .{ .rl = .none }, rhs_node), }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn simpleCBuiltin( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, operand_node: Ast.Node.Index, tag: Zir.Inst.Extended, ) InnerError!Zir.Inst.Ref { const name: []const u8 = if (tag == .c_undef) "C undef" else "C include"; if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name}); - const operand = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, operand_node); + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, operand_node); _ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, }); - return rvalue(gz, rl, .void_value, node); + return rvalue(gz, ri, .void_value, node); } fn offsetOf( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, lhs_node: Ast.Node.Index, rhs_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const type_inst = try typeExpr(gz, scope, lhs_node); - const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, rhs_node); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = type_inst, .rhs = field_name, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn shiftOp( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, lhs_node: Ast.Node.Index, rhs_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { - const lhs = try expr(gz, scope, .none, lhs_node); + const lhs = try expr(gz, scope, .{ .rl = .none }, lhs_node); const log2_int_type = try gz.addUnNode(.typeof_log2_int_type, lhs, lhs_node); - const rhs = try expr(gz, scope, .{ .ty_shift_operand = log2_int_type }, rhs_node); + const rhs = try expr(gz, scope, .{ .rl = .{ .ty = log2_int_type }, .ctx = .shift_op }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn cImport( @@ -8275,7 +8443,7 @@ fn cImport( defer block_scope.unstack(); const block_inst = try gz.makeBlockInst(.c_import, node); - const block_result = try expr(&block_scope, &block_scope.base, .none, body_node); + const block_result = try expr(&block_scope, &block_scope.base, .{ .rl = .none }, body_node); _ = try gz.addUnNode(.ensure_result_used, block_result, node); if (!gz.refIsNoReturn(block_result)) { _ = try block_scope.addBreak(.break_inline, block_inst, .void_value); @@ -8290,29 +8458,29 @@ fn cImport( fn overflowArithmetic( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, params: []const Ast.Node.Index, tag: Zir.Inst.Extended, ) InnerError!Zir.Inst.Ref { const int_type = try typeExpr(gz, scope, params[0]); const ptr_type = try gz.addUnNode(.overflow_arithmetic_ptr, int_type, params[0]); - const lhs = try expr(gz, scope, .{ .ty = int_type }, params[1]); - const rhs = try expr(gz, scope, .{ .ty = int_type }, params[2]); - const ptr = try expr(gz, scope, .{ .ty = ptr_type }, params[3]); + const lhs = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[1]); + const rhs = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]); + const ptr = try expr(gz, scope, .{ .rl = .{ .ty = ptr_type } }, params[3]); const result = try gz.addExtendedPayload(tag, Zir.Inst.OverflowArithmetic{ .node = gz.nodeIndexToRelative(node), .lhs = lhs, .rhs = rhs, .ptr = ptr, }); - return rvalue(gz, rl, result, node); + return rvalue(gz, ri, result, node); } fn callExpr( gz: *GenZir, scope: *Scope, - rl: ResultLoc, + ri: ResultInfo, node: Ast.Node.Index, call: Ast.full.Call, ) InnerError!Zir.Inst.Ref { @@ -8364,7 +8532,7 @@ fn callExpr( defer arg_block.unstack(); // `call_inst` is reused to provide the param type. - const arg_ref = try expr(&arg_block, &arg_block.base, .{ .coerced_ty = call_inst }, param_node); + const arg_ref = try expr(&arg_block, &arg_block.base, .{ .rl = .{ .coerced_ty = call_inst }, .ctx = .fn_arg }, param_node); _ = try arg_block.addBreak(.break_inline, call_index, arg_ref); const body = arg_block.instructionsSlice(); @@ -8375,9 +8543,18 @@ fn callExpr( scratch_index += 1; } + // If our result location is a try/catch/error-union-if/return, a function argument, + // or an initializer for a `const` variable, the error trace propagates. + // Otherwise, it should always be popped (handled in Sema). + const propagate_error_trace = switch (ri.ctx) { + .error_handling_expr, .@"return", .fn_arg, .const_init => true, + else => false, + }; + const payload_index = try addExtra(astgen, Zir.Inst.Call{ .callee = callee, .flags = .{ + .pop_error_return_trace = !propagate_error_trace, .packed_modifier = @intCast(Zir.Inst.Call.Flags.PackedModifier, @enumToInt(modifier)), .args_len = @intCast(Zir.Inst.Call.Flags.PackedArgsLen, call.ast.params.len), }, @@ -8392,7 +8569,7 @@ fn callExpr( .payload_index = payload_index, } }, }); - return rvalue(gz, rl, call_inst, node); // TODO function call with result location + return rvalue(gz, ri, call_inst, node); // TODO function call with result location } /// calleeExpr generates the function part of a call expression (f in f(x)), or the @@ -8413,7 +8590,7 @@ fn calleeExpr( const tag = tree.nodes.items(.tag)[node]; switch (tag) { - .field_access => return addFieldAccess(.field_call_bind, gz, scope, .ref, node), + .field_access => return addFieldAccess(.field_call_bind, gz, scope, .{ .rl = .ref }, node), .builtin_call_two, .builtin_call_two_comma, @@ -8445,8 +8622,8 @@ fn calleeExpr( // If anything is wrong, fall back to builtinCall. // It will emit any necessary compile errors and notes. if (std.mem.eql(u8, builtin_name, "@field") and params.len == 2) { - const lhs = try expr(gz, scope, .ref, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]); + const lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); return gz.addExtendedPayload(.field_call_bind_named, Zir.Inst.FieldNamedNode{ .node = gz.nodeIndexToRelative(node), .lhs = lhs, @@ -8454,9 +8631,9 @@ fn calleeExpr( }); } - return builtinCall(gz, scope, .none, node, params); + return builtinCall(gz, scope, .{ .rl = .none }, node, params); }, - else => return expr(gz, scope, .none, node), + else => return expr(gz, scope, .{ .rl = .none }, node), } } @@ -8738,6 +8915,33 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_ } } +fn nodeMayAppendToErrorTrace(tree: *const Ast, start_node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + + var node = start_node; + while (true) { + switch (node_tags[node]) { + // These don't have the opportunity to call any runtime functions. + .error_value, + .identifier, + .@"comptime", + => return false, + + // Forward the question to the LHS sub-expression. + .grouped_expression, + .@"try", + .@"nosuspend", + .unwrap_optional, + => node = node_datas[node].lhs, + + // Anything that does not eval to an error is guaranteed to pop any + // additions to the error trace, so it effectively does not append. + else => return nodeMayEvalToError(tree, start_node) != .never, + } + } +} + fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.EvalToError { const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); @@ -9472,7 +9676,7 @@ fn nodeUsesAnonNameStrategy(tree: *const Ast, node: Ast.Node.Index) bool { /// Assumes nothing stacked on `gz`. fn rvalue( gz: *GenZir, - rl: ResultLoc, + ri: ResultInfo, raw_result: Zir.Inst.Ref, src_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { @@ -9487,7 +9691,7 @@ fn rvalue( break :r raw_result; }; if (gz.endsWithNoReturn()) return result; - switch (rl) { + switch (ri.rl) { .none, .coerced_ty => return result, .discard => { // Emit a compile error for discarding error values. @@ -9513,7 +9717,7 @@ fn rvalue( } return indexToRef(gop.value_ptr.*); }, - .ty, .ty_shift_operand => |ty_inst| { + .ty => |ty_inst| { // Quickly eliminate some common, unnecessary type coercion. const as_ty = @as(u64, @enumToInt(Zir.Inst.Ref.type_type)) << 32; const as_comptime_int = @as(u64, @enumToInt(Zir.Inst.Ref.comptime_int_type)) << 32; @@ -9574,7 +9778,7 @@ fn rvalue( => return result, // type of result is already correct // Need an explicit type coercion instruction. - else => return gz.addPlNode(rl.zirTag(), src_node, Zir.Inst.As{ + else => return gz.addPlNode(ri.zirTag(), src_node, Zir.Inst.As{ .dest_type = ty_inst, .operand = result, }), @@ -10268,8 +10472,8 @@ const GenZir = struct { label: ?Label = null, break_block: Zir.Inst.Index = 0, continue_block: Zir.Inst.Index = 0, - /// Only valid when setBreakResultLoc is called. - break_result_loc: AstGen.ResultLoc = undefined, + /// Only valid when setBreakResultInfo is called. + break_result_info: AstGen.ResultInfo = undefined, /// When a block has a pointer result location, here it is. rl_ptr: Zir.Inst.Ref = .none, /// When a block has a type result location, here it is. @@ -10371,7 +10575,7 @@ const GenZir = struct { fn finishCoercion( as_scope: *GenZir, parent_gz: *GenZir, - rl: ResultLoc, + ri: ResultInfo, src_node: Ast.Node.Index, result: Zir.Inst.Ref, dest_type: Zir.Inst.Ref, @@ -10397,7 +10601,7 @@ const GenZir = struct { as_scope.instructions_top = GenZir.unstacked_top; // as_scope now unstacked, can add new instructions to parent_gz const casted_result = try parent_gz.addBin(.as, dest_type, result); - return rvalue(parent_gz, rl, casted_result, src_node); + return rvalue(parent_gz, ri, casted_result, src_node); } else { // implicitly move all as_scope instructions to parent_gz as_scope.instructions_top = GenZir.unstacked_top; @@ -10440,7 +10644,7 @@ const GenZir = struct { return gz.astgen.tree.firstToken(gz.decl_node_index); } - fn setBreakResultLoc(gz: *GenZir, parent_rl: AstGen.ResultLoc) void { + fn setBreakResultInfo(gz: *GenZir, parent_ri: AstGen.ResultInfo) void { // Depending on whether the result location is a pointer or value, different // ZIR needs to be generated. In the former case we rely on storing to the // pointer to communicate the result, and use breakvoid; in the latter case @@ -10449,32 +10653,32 @@ const GenZir = struct { // the scenario where the result location is not consumed. In this case // we emit ZIR for the block break instructions to have the result values, // and then rvalue() on that to pass the value to the result location. - switch (parent_rl) { - .ty, .ty_shift_operand, .coerced_ty => |ty_inst| { + switch (parent_ri.rl) { + .ty, .coerced_ty => |ty_inst| { gz.rl_ty_inst = ty_inst; - gz.break_result_loc = parent_rl; + gz.break_result_info = parent_ri; }, .discard, .none, .ref => { gz.rl_ty_inst = .none; - gz.break_result_loc = parent_rl; + gz.break_result_info = parent_ri; }, .ptr => |ptr_res| { gz.rl_ty_inst = .none; - gz.break_result_loc = .{ .ptr = .{ .inst = ptr_res.inst } }; + gz.break_result_info = .{ .rl = .{ .ptr = .{ .inst = ptr_res.inst } } }; }, .inferred_ptr => |ptr| { gz.rl_ty_inst = .none; gz.rl_ptr = ptr; - gz.break_result_loc = .{ .block_ptr = gz }; + gz.break_result_info = .{ .rl = .{ .block_ptr = gz }, .ctx = parent_ri.ctx }; }, .block_ptr => |parent_block_scope| { gz.rl_ty_inst = parent_block_scope.rl_ty_inst; gz.rl_ptr = parent_block_scope.rl_ptr; - gz.break_result_loc = .{ .block_ptr = gz }; + gz.break_result_info = .{ .rl = .{ .block_ptr = gz }, .ctx = parent_ri.ctx }; }, } } @@ -11157,6 +11361,46 @@ const GenZir = struct { }); } + fn addSaveErrRetIndex( + gz: *GenZir, + cond: union(enum) { + always: void, + if_of_error_type: Zir.Inst.Ref, + }, + ) !Zir.Inst.Index { + return gz.addAsIndex(.{ + .tag = .save_err_ret_index, + .data = .{ .save_err_ret_index = .{ + .operand = if (cond == .if_of_error_type) cond.if_of_error_type else .none, + } }, + }); + } + + const BranchTarget = union(enum) { + ret, + block: Zir.Inst.Index, + }; + + fn addRestoreErrRetIndex( + gz: *GenZir, + bt: BranchTarget, + cond: union(enum) { + always: void, + if_non_error: Zir.Inst.Ref, + }, + ) !Zir.Inst.Index { + return gz.addAsIndex(.{ + .tag = .restore_err_ret_index, + .data = .{ .restore_err_ret_index = .{ + .block = switch (bt) { + .ret => .none, + .block => |b| Zir.indexToRef(b), + }, + .operand = if (cond == .if_non_error) cond.if_non_error else .none, + } }, + }); + } + fn addBreak( gz: *GenZir, tag: Zir.Inst.Tag, @@ -11624,10 +11868,10 @@ const GenZir = struct { return new_index; } - fn addRet(gz: *GenZir, rl: ResultLoc, operand: Zir.Inst.Ref, node: Ast.Node.Index) !void { - switch (rl) { + fn addRet(gz: *GenZir, ri: ResultInfo, operand: Zir.Inst.Ref, node: Ast.Node.Index) !void { + switch (ri.rl) { .ptr => |ptr_res| _ = try gz.addUnNode(.ret_load, ptr_res.inst, node), - .ty, .ty_shift_operand => _ = try gz.addUnNode(.ret_node, operand, node), + .ty => _ = try gz.addUnNode(.ret_node, operand, node), else => unreachable, } } diff --git a/src/Liveness.zig b/src/Liveness.zig index 54a5041e8b06..ff8afb83070a 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -228,6 +228,7 @@ pub fn categorizeOperand( .frame_addr, .wasm_memory_size, .err_return_trace, + .save_err_return_trace_index, => return .none, .fence => return .write, @@ -805,6 +806,7 @@ fn analyzeInst( .frame_addr, .wasm_memory_size, .err_return_trace, + .save_err_return_trace_index, => return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }), .not, diff --git a/src/Module.zig b/src/Module.zig index 8483c41ae840..4f150b01487f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5633,6 +5633,12 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const last_arg_index = inner_block.instructions.items.len; + // Save the error trace as our first action in the function. + // If this is unnecessary after all, Liveness will clean it up for us. + const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&inner_block); + sema.error_return_trace_index_on_fn_entry = error_return_trace_index; + inner_block.error_return_trace_index = error_return_trace_index; + sema.analyzeBody(&inner_block, fn_info.body) catch |err| switch (err) { // TODO make these unreachable instead of @panic error.NeededSourceLocation => @panic("zig compiler bug: NeededSourceLocation"), diff --git a/src/Sema.zig b/src/Sema.zig index 972a19de4fbe..4c2f72034e3d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -32,6 +32,8 @@ owner_func: ?*Module.Fn, /// This starts out the same as `owner_func` and then diverges in the case of /// an inline or comptime function call. func: ?*Module.Fn, +/// Used to restore the error return trace when returning a non-error from a function. +error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none, /// When semantic analysis needs to know the return type of the function whose body /// is being analyzed, this `Type` should be used instead of going through `func`. /// This will correctly handle the case of a comptime/inline function call of a @@ -153,6 +155,10 @@ pub const Block = struct { is_typeof: bool = false, is_coerce_result_ptr: bool = false, + /// Keep track of the active error return trace index around blocks so that we can correctly + /// pop the error trace upon block exit. + error_return_trace_index: Air.Inst.Ref = .none, + /// when null, it is determined by build mode, changed by @setRuntimeSafety want_safety: ?bool = null, @@ -226,6 +232,7 @@ pub const Block = struct { .float_mode = parent.float_mode, .c_import_buf = parent.c_import_buf, .switch_else_err_ty = parent.switch_else_err_ty, + .error_return_trace_index = parent.error_return_trace_index, }; } @@ -499,6 +506,25 @@ pub const Block = struct { return result_index; } + /// Insert an instruction into the block at `index`. Moves all following + /// instructions forward in the block to make room. Operation is O(N). + pub fn insertInst(block: *Block, index: Air.Inst.Index, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { + return Air.indexToRef(try block.insertInstAsIndex(index, inst)); + } + + pub fn insertInstAsIndex(block: *Block, index: Air.Inst.Index, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index { + const sema = block.sema; + const gpa = sema.gpa; + + try sema.air_instructions.ensureUnusedCapacity(gpa, 1); + + const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); + sema.air_instructions.appendAssumeCapacity(inst); + + try block.instructions.insert(gpa, index, result_index); + return result_index; + } + fn addUnreachable(block: *Block, src: LazySrcLoc, safety_check: bool) !void { if (safety_check and block.wantSafety()) { _ = try block.sema.safetyPanic(block, src, .unreach); @@ -1208,6 +1234,16 @@ fn analyzeBodyInner( i += 1; continue; }, + .save_err_ret_index => { + try sema.zirSaveErrRetIndex(block, inst); + i += 1; + continue; + }, + .restore_err_ret_index => { + try sema.zirRestoreErrRetIndex(block, inst); + i += 1; + continue; + }, // Special case instructions to handle comptime control flow. .@"break" => { @@ -1300,31 +1336,32 @@ fn analyzeBodyInner( const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const gpa = sema.gpa; - // If this block contains a function prototype, we need to reset the - // current list of parameters and restore it later. - // Note: this probably needs to be resolved in a more general manner. - const prev_params = block.params; - const need_sub_block = tags[inline_body[inline_body.len - 1]] == .repeat_inline; - var sub_block = block; - var block_space: Block = undefined; - // NOTE: this has to be done like this because branching in - // defers here breaks stage1. - block_space.instructions = .{}; - if (need_sub_block) { - block_space = block.makeSubBlock(); - block_space.inline_block = inline_body[0]; - sub_block = &block_space; - } - block.params = .{}; - defer { - block.params.deinit(gpa); - block.params = prev_params; - block_space.instructions.deinit(gpa); - } - const opt_break_data = try sema.analyzeBodyBreak(sub_block, inline_body); - if (need_sub_block) { - try block.instructions.appendSlice(gpa, block_space.instructions.items); - } + + const opt_break_data = b: { + // Create a temporary child block so that this inline block is properly + // labeled for any .restore_err_ret_index instructions + var child_block = block.makeSubBlock(); + + // If this block contains a function prototype, we need to reset the + // current list of parameters and restore it later. + // Note: this probably needs to be resolved in a more general manner. + if (tags[inline_body[inline_body.len - 1]] == .repeat_inline) { + child_block.inline_block = inline_body[0]; + } else child_block.inline_block = block.inline_block; + + var label: Block.Label = .{ + .zir_block = inst, + .merges = undefined, + }; + child_block.label = &label; + defer child_block.params.deinit(gpa); + + // Write these instructions directly into the parent block + child_block.instructions = block.instructions; + defer block.instructions = child_block.instructions; + + break :b try sema.analyzeBodyBreak(&child_block, inline_body); + }; // A runtime conditional branch that needs a post-hoc block to be // emitted communicates this by mapping the block index into the inst map. @@ -4968,7 +5005,7 @@ fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErro // Reserve space for a Block instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being - // comptime evaluated. + // comptime evaluated or is an unlabeled block. const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); try sema.air_instructions.append(gpa, .{ .tag = .block, @@ -4999,6 +5036,7 @@ fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErro .runtime_cond = parent_block.runtime_cond, .runtime_loop = parent_block.runtime_loop, .runtime_index = parent_block.runtime_index, + .error_return_trace_index = parent_block.error_return_trace_index, }; defer child_block.instructions.deinit(gpa); @@ -5641,6 +5679,117 @@ fn funcDeclSrc(sema: *Sema, block: *Block, src: LazySrcLoc, func_inst: Air.Inst. return owner_decl.srcLoc(); } +pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref { + const src = sema.src; + + const backend_supports_error_return_tracing = sema.mod.comp.bin_file.options.use_llvm; + if (!backend_supports_error_return_tracing or !sema.mod.comp.bin_file.options.error_return_tracing) + return .none; + + if (block.is_comptime) + return .none; + + const unresolved_stack_trace_ty = sema.getBuiltinType(block, src, "StackTrace") catch |err| switch (err) { + error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, + else => |e| return e, + }; + const stack_trace_ty = sema.resolveTypeFields(block, src, unresolved_stack_trace_ty) catch |err| switch (err) { + error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, + else => |e| return e, + }; + const field_index = sema.structFieldIndex(block, stack_trace_ty, "index", src) catch |err| switch (err) { + error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, + else => |e| return e, + }; + + return try block.addInst(.{ + .tag = .save_err_return_trace_index, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(stack_trace_ty), + .payload = @intCast(u32, field_index), + } }, + }); +} + +/// Add instructions to block to "pop" the error return trace. +/// If `operand` is provided, only pops if operand is non-error. +fn popErrorReturnTrace( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + operand: Air.Inst.Ref, + saved_error_trace_index: Air.Inst.Ref, +) CompileError!void { + var is_non_error: ?bool = null; + var is_non_error_inst: Air.Inst.Ref = undefined; + if (operand != .none) { + is_non_error_inst = try sema.analyzeIsNonErr(block, src, operand); + if (try sema.resolveDefinedValue(block, src, is_non_error_inst)) |cond_val| + is_non_error = cond_val.toBool(); + } else is_non_error = true; // no operand means pop unconditionally + + if (is_non_error == true) { + // AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or + // the result is comptime-known to be a non-error. Either way, pop unconditionally. + + const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); + const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); + const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); + const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, "index", src, stack_trace_ty, true); + try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); + } else if (is_non_error == null) { + // The result might be an error. If it is, we leave the error trace alone. If it isn't, we need + // to pop any error trace that may have been propagated from our arguments. + + try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Block).Struct.fields.len); + const cond_block_inst = try block.addInstAsIndex(.{ + .tag = .block, + .data = .{ + .ty_pl = .{ + .ty = Air.Inst.Ref.void_type, + .payload = undefined, // updated below + }, + }, + }); + + var then_block = block.makeSubBlock(); + defer then_block.instructions.deinit(sema.gpa); + + // If non-error, then pop the error return trace by restoring the index. + const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); + const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); + const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); + const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, "index", src, stack_trace_ty, true); + try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); + _ = try then_block.addBr(cond_block_inst, Air.Inst.Ref.void_value); + + // Otherwise, do nothing + var else_block = block.makeSubBlock(); + defer else_block.instructions.deinit(sema.gpa); + _ = try else_block.addBr(cond_block_inst, Air.Inst.Ref.void_value); + + try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.CondBr).Struct.fields.len + + then_block.instructions.items.len + else_block.instructions.items.len + + @typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block + + const cond_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(sema.gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ + .operand = is_non_error_inst, + .payload = sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = @intCast(u32, then_block.instructions.items.len), + .else_body_len = @intCast(u32, else_block.instructions.items.len), + }), + } } }); + sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); + sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items); + + sema.air_instructions.items(.data)[cond_block_inst].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = 1 }); + sema.air_extra.appendAssumeCapacity(cond_br_inst); + } +} + fn zirCall( sema: *Sema, block: *Block, @@ -5657,6 +5806,7 @@ fn zirCall( const modifier = @intToEnum(std.builtin.CallOptions.Modifier, extra.data.flags.packed_modifier); const ensure_result_used = extra.data.flags.ensure_result_used; + const pop_error_return_trace = extra.data.flags.pop_error_return_trace; var func = try sema.resolveInst(extra.data.callee); var resolved_args: []Air.Inst.Ref = undefined; @@ -5729,6 +5879,9 @@ fn zirCall( const args_body = sema.code.extra[extra.end..]; + var input_is_error = false; + const block_index = @intCast(Air.Inst.Index, block.instructions.items.len); + const parent_comptime = block.is_comptime; // `extra_index` and `arg_index` are separate since the bound function is passed as the first argument. var extra_index: usize = 0; @@ -5746,10 +5899,8 @@ fn zirCall( else func_ty_info.param_types[arg_index]; - const old_comptime = block.is_comptime; - defer block.is_comptime = old_comptime; // Generate args to comptime params in comptime block. - block.is_comptime = parent_comptime; + defer block.is_comptime = parent_comptime; if (arg_index < fn_params_len and func_ty_info.comptime_params[arg_index]) { block.is_comptime = true; } @@ -5758,13 +5909,58 @@ fn zirCall( try sema.inst_map.put(sema.gpa, inst, param_ty_inst); const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst); - if (sema.typeOf(resolved).zigTypeTag() == .NoReturn) { + const resolved_ty = sema.typeOf(resolved); + if (resolved_ty.zigTypeTag() == .NoReturn) { return resolved; } + if (resolved_ty.isError()) { + input_is_error = true; + } resolved_args[arg_index] = resolved; } + if (sema.owner_func == null or !sema.owner_func.?.calls_or_awaits_errorable_fn) + input_is_error = false; // input was an error type, but no errorable fn's were actually called - return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src); + const backend_supports_error_return_tracing = sema.mod.comp.bin_file.options.use_llvm; + if (backend_supports_error_return_tracing and sema.mod.comp.bin_file.options.error_return_tracing and + !block.is_comptime and (input_is_error or pop_error_return_trace)) + { + const call_inst: Air.Inst.Ref = if (modifier == .always_tail) undefined else b: { + break :b try sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src); + }; + + const return_ty = sema.typeOf(call_inst); + if (modifier != .always_tail and return_ty.isNoReturn()) + return call_inst; // call to "fn(...) noreturn", don't pop + + // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only + // need to clean-up our own trace if we were passed to a non-error-handling expression. + if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError())) { + const unresolved_stack_trace_ty = try sema.getBuiltinType(block, call_src, "StackTrace"); + const stack_trace_ty = try sema.resolveTypeFields(block, call_src, unresolved_stack_trace_ty); + const field_index = try sema.structFieldIndex(block, stack_trace_ty, "index", call_src); + + // Insert a save instruction before the arg resolution + call instructions we just generated + const save_inst = try block.insertInst(block_index, .{ + .tag = .save_err_return_trace_index, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(stack_trace_ty), + .payload = @intCast(u32, field_index), + } }, + }); + + // Pop the error return trace, testing the result for non-error if necessary + const operand = if (pop_error_return_trace or modifier == .always_tail) .none else call_inst; + try sema.popErrorReturnTrace(block, call_src, operand, save_inst); + } + + if (modifier == .always_tail) // Perform the call *after* the restore, so that a tail call is possible. + return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src); + + return call_inst; + } else { + return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src); + } } const GenericCallAdapter = struct { @@ -6056,6 +6252,10 @@ fn analyzeCall( sema.func = module_fn; defer sema.func = parent_func; + const parent_err_ret_index = sema.error_return_trace_index_on_fn_entry; + sema.error_return_trace_index_on_fn_entry = block.error_return_trace_index; + defer sema.error_return_trace_index_on_fn_entry = parent_err_ret_index; + var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, fn_owner_decl.src_scope); defer wip_captures.deinit(); @@ -6069,6 +6269,7 @@ fn analyzeCall( .label = null, .inlining = &inlining, .is_comptime = is_comptime_call, + .error_return_trace_index = block.error_return_trace_index, }; const merges = &child_block.inlining.?.merges; @@ -6814,6 +7015,13 @@ fn instantiateGenericCall( } arg_i += 1; } + + // Save the error trace as our first action in the function. + // If this is unnecessary after all, Liveness will clean it up for us. + const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block); + child_sema.error_return_trace_index_on_fn_entry = error_return_trace_index; + child_block.error_return_trace_index = error_return_trace_index; + const new_func_inst = child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst) catch |err| { // TODO look up the compile error that happened here and attach a note to it // pointing here, at the generic instantiation callsite. @@ -9703,6 +9911,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .defer_err_code, .err_union_code, .ret_err_value_code, + .restore_err_ret_index, .is_non_err, .condbr, => {}, @@ -10005,6 +10214,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .runtime_cond = block.runtime_cond, .runtime_loop = block.runtime_loop, .runtime_index = block.runtime_index, + .error_return_trace_index = block.error_return_trace_index, }; const merges = &child_block.label.?.merges; defer child_block.instructions.deinit(gpa); @@ -10888,6 +11098,7 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op const tags = sema.code.instructions.items(.tag); for (body) |inst| { switch (tags[inst]) { + .save_err_ret_index, .dbg_block_begin, .dbg_block_end, .dbg_stmt, @@ -10910,6 +11121,10 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op try sema.zirDbgStmt(block, inst); continue; }, + .save_err_ret_index => { + try sema.zirSaveErrRetIndex(block, inst); + continue; + }, .str => try sema.zirStr(block, inst), .as_node => try sema.zirAsNode(block, inst), .field_val => try sema.zirFieldVal(block, inst), @@ -10955,6 +11170,7 @@ fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Ind return; } if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| { + if (!operand_ty.isError()) return; if (val.getError() == null) return; try sema.maybeErrorUnwrapComptime(block, body, err_operand); } @@ -15519,6 +15735,7 @@ fn zirTypeofBuiltin(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr .is_comptime = false, .is_typeof = true, .want_safety = false, + .error_return_trace_index = block.error_return_trace_index, }; defer child_block.instructions.deinit(sema.gpa); @@ -16176,6 +16393,75 @@ fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool { backend_supports_error_return_tracing; } +fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const inst_data = sema.code.instructions.items(.data)[inst].save_err_ret_index; + + const backend_supports_error_return_tracing = sema.mod.comp.bin_file.options.use_llvm; + const ok = backend_supports_error_return_tracing and sema.mod.comp.bin_file.options.error_return_tracing; + if (!ok) return; + + // This is only relevant at runtime. + if (block.is_comptime) return; + + // This is only relevant within functions. + if (sema.func == null) return; + + const save_index = inst_data.operand == .none or b: { + const operand = try sema.resolveInst(inst_data.operand); + const operand_ty = sema.typeOf(operand); + break :b operand_ty.isError(); + }; + + if (save_index) + block.error_return_trace_index = try sema.analyzeSaveErrRetIndex(block); +} + +fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!void { + const inst_data = sema.code.instructions.items(.data)[inst].restore_err_ret_index; + const src = sema.src; // TODO + + // This is only relevant at runtime. + if (start_block.is_comptime) return; + + const backend_supports_error_return_tracing = sema.mod.comp.bin_file.options.use_llvm; + const ok = sema.owner_func.?.calls_or_awaits_errorable_fn and + sema.mod.comp.bin_file.options.error_return_tracing and + backend_supports_error_return_tracing; + if (!ok) return; + + const tracy = trace(@src()); + defer tracy.end(); + + const saved_index = if (Zir.refToIndex(inst_data.block)) |zir_block| b: { + var block = start_block; + while (true) { + if (block.label) |label| { + if (label.zir_block == zir_block) { + const target_trace_index = if (block.parent) |parent_block| tgt: { + break :tgt parent_block.error_return_trace_index; + } else sema.error_return_trace_index_on_fn_entry; + + if (start_block.error_return_trace_index != target_trace_index) + break :b target_trace_index; + + return; // No need to restore + } + } + block = block.parent.?; + } + } else b: { + if (start_block.error_return_trace_index != sema.error_return_trace_index_on_fn_entry) + break :b sema.error_return_trace_index_on_fn_entry; + + return; // No need to restore + }; + + assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere + + const operand = try sema.resolveInst(inst_data.operand); + return sema.popErrorReturnTrace(start_block, src, operand, saved_index); +} + fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion); @@ -17181,8 +17467,6 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); - _ = src; const operand = try sema.resolveInst(inst_data.operand); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; diff --git a/src/Zir.zig b/src/Zir.zig index 9881ee161750..b17d6d7fec6c 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -988,6 +988,15 @@ pub const Inst = struct { /// Uses the `err_defer_code` union field. defer_err_code, + /// Requests that Sema update the saved error return trace index for the enclosing + /// block, if the operand is .none or of an error/error-union type. + /// Uses the `save_err_ret_index` field. + save_err_ret_index, + /// Sets error return trace to zero if no operand is given, + /// otherwise sets the value to the given amount. + /// Uses the `restore_err_ret_index` union field. + restore_err_ret_index, + /// The ZIR instruction tag is one of the `Extended` ones. /// Uses the `extended` union field. extended, @@ -1236,6 +1245,8 @@ pub const Inst = struct { //.try_ptr_inline, .@"defer", .defer_err_code, + .save_err_ret_index, + .restore_err_ret_index, => false, .@"break", @@ -1305,6 +1316,8 @@ pub const Inst = struct { .check_comptime_control_flow, .@"defer", .defer_err_code, + .restore_err_ret_index, + .save_err_ret_index, => true, .param, @@ -1810,6 +1823,9 @@ pub const Inst = struct { .@"defer" = .@"defer", .defer_err_code = .defer_err_code, + .save_err_ret_index = .save_err_ret_index, + .restore_err_ret_index = .restore_err_ret_index, + .extended = .extended, }); }; @@ -2586,6 +2602,13 @@ pub const Inst = struct { err_code: Ref, payload_index: u32, }, + save_err_ret_index: struct { + operand: Ref, // If error type (or .none), save new trace index + }, + restore_err_ret_index: struct { + block: Ref, // If restored, the index is from this block's entrypoint + operand: Ref, // If non-error (or .none), then restore the index + }, // Make sure we don't accidentally add a field to make this union // bigger than expected. Note that in Debug builds, Zig is allowed @@ -2624,6 +2647,8 @@ pub const Inst = struct { str_op, @"defer", defer_err_code, + save_err_ret_index, + restore_err_ret_index, }; }; @@ -2809,10 +2834,11 @@ pub const Inst = struct { pub const Flags = packed struct { /// std.builtin.CallOptions.Modifier in packed form pub const PackedModifier = u3; - pub const PackedArgsLen = u28; + pub const PackedArgsLen = u27; packed_modifier: PackedModifier, ensure_result_used: bool = false, + pop_error_return_trace: bool, args_len: PackedArgsLen, comptime { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 8da94f2e9cc8..eb8ca8e8f120 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -702,6 +702,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst), .err_return_trace => try self.airErrReturnTrace(inst), .set_err_return_trace => try self.airSetErrReturnTrace(inst), + .save_err_return_trace_index=> try self.airSaveErrReturnTraceIndex(inst), .wrap_optional => try self.airWrapOptional(inst), .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), @@ -2867,6 +2868,11 @@ fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airSetErrReturnTrace for {}", .{self.target.cpu.arch}); } +fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{self.target.cpu.arch}); +} + fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 530d5c2b0456..1ebc348fc21c 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -751,6 +751,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst), .err_return_trace => try self.airErrReturnTrace(inst), .set_err_return_trace => try self.airSetErrReturnTrace(inst), + .save_err_return_trace_index=> try self.airSaveErrReturnTraceIndex(inst), .wrap_optional => try self.airWrapOptional(inst), .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), @@ -2116,6 +2117,11 @@ fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airSetErrReturnTrace for {}", .{self.target.cpu.arch}); } +fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{self.target.cpu.arch}); +} + /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index dd31bfb6f772..003d2c7e5ff1 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -665,6 +665,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst), .err_return_trace => try self.airErrReturnTrace(inst), .set_err_return_trace => try self.airSetErrReturnTrace(inst), + .save_err_return_trace_index=> try self.airSaveErrReturnTraceIndex(inst), .wrap_optional => try self.airWrapOptional(inst), .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), @@ -1329,6 +1330,11 @@ fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airSetErrReturnTrace for {}", .{self.target.cpu.arch}); } +fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{self.target.cpu.arch}); +} + fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 6217119f341c..9d37cd9d1b52 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -679,6 +679,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst), .err_return_trace => @panic("TODO try self.airErrReturnTrace(inst)"), .set_err_return_trace => @panic("TODO try self.airSetErrReturnTrace(inst)"), + .save_err_return_trace_index=> @panic("TODO try self.airSaveErrReturnTraceIndex(inst)"), .wrap_optional => try self.airWrapOptional(inst), .wrap_errunion_payload => @panic("TODO try self.airWrapErrUnionPayload(inst)"), diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b47bb71e9843..538fcb13c10d 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1857,6 +1857,7 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .tag_name, .err_return_trace, .set_err_return_trace, + .save_err_return_trace_index, .is_named_enum_value, .error_set_has_value, .addrspace_cast, diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index a3888b4173e4..a1b354482bba 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -756,6 +756,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst), .err_return_trace => try self.airErrReturnTrace(inst), .set_err_return_trace => try self.airSetErrReturnTrace(inst), + .save_err_return_trace_index=> try self.airSaveErrReturnTraceIndex(inst), .wrap_optional => try self.airWrapOptional(inst), .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), @@ -1973,6 +1974,11 @@ fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airSetErrReturnTrace for {}", .{self.target.cpu.arch}); } +fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{self.target.cpu.arch}); +} + fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 072091d9b24a..d6584d75aed0 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1935,6 +1935,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .errunion_payload_ptr_set => try airErrUnionPayloadPtrSet(f, inst), .err_return_trace => try airErrReturnTrace(f, inst), .set_err_return_trace => try airSetErrReturnTrace(f, inst), + .save_err_return_trace_index => try airSaveErrReturnTraceIndex(f, inst), .wasm_memory_size => try airWasmMemorySize(f, inst), .wasm_memory_grow => try airWasmMemoryGrow(f, inst), @@ -3625,6 +3626,11 @@ fn airSetErrReturnTrace(f: *Function, inst: Air.Inst.Index) !CValue { return f.fail("TODO: C backend: implement airSetErrReturnTrace", .{}); } +fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue { + _ = inst; + return f.fail("TODO: C backend: implement airSaveErrReturnTraceIndex", .{}); +} + fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) return CValue.none; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index b0d1588007f8..3ebca13c2022 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4592,6 +4592,7 @@ pub const FuncGen = struct { .errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst), .err_return_trace => try self.airErrReturnTrace(inst), .set_err_return_trace => try self.airSetErrReturnTrace(inst), + .save_err_return_trace_index => try self.airSaveErrReturnTraceIndex(inst), .wrap_optional => try self.airWrapOptional(inst), .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), @@ -6543,6 +6544,24 @@ pub const FuncGen = struct { return null; } + fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + if (self.liveness.isUnused(inst)) return null; + + const target = self.dg.module.getTarget(); + + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + //const struct_ty = try self.resolveInst(ty_pl.ty); + const struct_ty = self.air.getRefType(ty_pl.ty); + const field_index = ty_pl.payload; + + var ptr_ty_buf: Type.Payload.Pointer = undefined; + const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const struct_llvm_ty = try self.dg.lowerType(struct_ty); + const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field_index, ""); + const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); + return self.load(field_ptr, field_ptr_ty); + } + fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { if (self.liveness.isUnused(inst)) return null; diff --git a/src/print_air.zig b/src/print_air.zig index d3523c0fc665..0bbc1100f7b1 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -197,6 +197,7 @@ const Writer = struct { .unreach, .ret_addr, .frame_addr, + .save_err_return_trace_index, => try w.writeNoOp(s, inst), .const_ty, diff --git a/src/print_zir.zig b/src/print_zir.zig index aab7444e0844..f1b106892040 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -254,6 +254,9 @@ const Writer = struct { .str => try self.writeStr(stream, inst), .int_type => try self.writeIntType(stream, inst), + .save_err_ret_index => try self.writeSaveErrRetIndex(stream, inst), + .restore_err_ret_index => try self.writeRestoreErrRetIndex(stream, inst), + .@"break", .break_inline, => try self.writeBreak(stream, inst), @@ -440,7 +443,7 @@ const Writer = struct { .dbg_block_begin, .dbg_block_end, - => try stream.writeAll("))"), + => try stream.writeAll(")"), .closure_get => try self.writeInstNode(stream, inst), @@ -2272,6 +2275,22 @@ const Writer = struct { try self.writeSrc(stream, int_type.src()); } + fn writeSaveErrRetIndex(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { + const inst_data = self.code.instructions.items(.data)[inst].save_err_ret_index; + + try self.writeInstRef(stream, inst_data.operand); + try stream.writeAll(")"); + } + + fn writeRestoreErrRetIndex(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { + const inst_data = self.code.instructions.items(.data)[inst].restore_err_ret_index; + + try self.writeInstRef(stream, inst_data.block); + try stream.writeAll(", "); + try self.writeInstRef(stream, inst_data.operand); + try stream.writeAll(")"); + } + fn writeBreak(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { const inst_data = self.code.instructions.items(.data)[inst].@"break"; diff --git a/src/value.zig b/src/value.zig index ee5b357a70ff..d24c5a1c1762 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2971,9 +2971,10 @@ pub const Value = extern union { }; } - /// Valid for all types. Asserts the value is not undefined and not unreachable. - /// Prefer `errorUnionIsPayload` to find out whether something is an error or not - /// because it works without having to figure out the string. + /// Valid only for error (union) types. Asserts the value is not undefined and not + /// unreachable. For error unions, prefer `errorUnionIsPayload` to find out whether + /// something is an error or not because it works without having to figure out the + /// string. pub fn getError(self: Value) ?[]const u8 { return switch (self.tag()) { .@"error" => self.castTag(.@"error").?.data.name, diff --git a/test/behavior/bugs/12891.zig b/test/behavior/bugs/12891.zig index 97126bde4a96..78947d1776b8 100644 --- a/test/behavior/bugs/12891.zig +++ b/test/behavior/bugs/12891.zig @@ -7,6 +7,7 @@ test "issue12891" { try std.testing.expect(i < f); } test "nan" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO const f = comptime std.math.nan(f64); diff --git a/test/behavior/error.zig b/test/behavior/error.zig index c93d7aebf749..6b64a0dc0142 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -830,3 +830,16 @@ test "compare error union and error set" { try expect(a != b); try expect(b != a); } + +fn non_errorable() void { + // Make sure catch works even in a function that does not call any errorable functions. + // + // This test is needed because stage 2's fix for #1923 means that catch blocks interact + // with the error return trace index. + var x: error{Foo}!void = {}; + return x catch {}; +} + +test "catch within a function that calls no errorable functions" { + non_errorable(); +} diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index aa92f42e2405..2fa07d0de797 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -1401,7 +1401,21 @@ test "continue in inline for inside a comptime switch" { try expect(count == 4); } +test "length of global array is determinable at comptime" { + const S = struct { + var bytes: [1024]u8 = undefined; + + fn foo() !void { + try std.testing.expect(bytes.len == 1024); + } + }; + comptime try S.foo(); +} + test "continue nested inline for loop" { + // TODO: https://github.com/ziglang/zig/issues/13175 + if (builtin.zig_backend != .stage1) return error.SkipZigTest; + var a: u8 = 0; loop: inline for ([_]u8{ 1, 2 }) |x| { inline for ([_]u8{1}) |y| { @@ -1415,13 +1429,21 @@ test "continue nested inline for loop" { try expect(a == 2); } -test "length of global array is determinable at comptime" { - const S = struct { - var bytes: [1024]u8 = undefined; +test "continue nested inline for loop in named block expr" { + // TODO: https://github.com/ziglang/zig/issues/13175 + if (builtin.zig_backend != .stage1) return error.SkipZigTest; - fn foo() !void { - try std.testing.expect(bytes.len == 1024); - } - }; - comptime try S.foo(); + var a: u8 = 0; + loop: inline for ([_]u8{ 1, 2 }) |x| { + a = b: { + inline for ([_]u8{1}) |y| { + if (x == y) { + continue :loop; + } + } + break :b x; + }; + try expect(x == 2); + } + try expect(a == 2); } diff --git a/test/stack_traces.zig b/test/stack_traces.zig index 3a8682b5a5dc..ebd910563b1b 100644 --- a/test/stack_traces.zig +++ b/test/stack_traces.zig @@ -97,6 +97,547 @@ pub fn addCases(cases: *tests.StackTracesContext) void { , }, }); + cases.addCase(.{ + .name = "non-error return pops error trace", + .source = + \\fn bar() !void { + \\ return error.UhOh; + \\} + \\ + \\fn foo() !void { + \\ bar() catch { + \\ return; // non-error result: success + \\ }; + \\} + \\ + \\pub fn main() !void { + \\ try foo(); + \\ return error.UnrelatedError; + \\} + , + .Debug = .{ + .expect = + \\error: UnrelatedError + \\source.zig:13:5: [address] in main (test) + \\ return error.UnrelatedError; + \\ ^ + \\ + , + }, + .ReleaseSafe = .{ + .exclude_os = .{ + .windows, // TODO + .linux, // defeated by aggressive inlining + }, + .expect = + \\error: UnrelatedError + \\source.zig:13:5: [address] in [function] + \\ return error.UnrelatedError; + \\ ^ + \\ + , + }, + .ReleaseFast = .{ + .expect = + \\error: UnrelatedError + \\ + , + }, + .ReleaseSmall = .{ + .expect = + \\error: UnrelatedError + \\ + , + }, + }); + + cases.addCase(.{ + .name = "try return + handled catch/if-else", + .source = + \\fn foo() !void { + \\ return error.TheSkyIsFalling; + \\} + \\ + \\pub fn main() !void { + \\ foo() catch {}; // should not affect error trace + \\ if (foo()) |_| {} else |_| { + \\ // should also not affect error trace + \\ } + \\ try foo(); + \\} + , + .Debug = .{ + .expect = + \\error: TheSkyIsFalling + \\source.zig:2:5: [address] in foo (test) + \\ return error.TheSkyIsFalling; + \\ ^ + \\source.zig:10:5: [address] in main (test) + \\ try foo(); + \\ ^ + \\ + , + }, + .ReleaseSafe = .{ + .exclude_os = .{ + .windows, // TODO + .linux, // defeated by aggressive inlining + }, + .expect = + \\error: TheSkyIsFalling + \\source.zig:2:5: [address] in [function] + \\ return error.TheSkyIsFalling; + \\ ^ + \\source.zig:10:5: [address] in [function] + \\ try foo(); + \\ ^ + \\ + , + }, + .ReleaseFast = .{ + .expect = + \\error: TheSkyIsFalling + \\ + , + }, + .ReleaseSmall = .{ + .expect = + \\error: TheSkyIsFalling + \\ + , + }, + }); + + cases.addCase(.{ + .name = "break from inline loop pops error return trace", + .source = + \\fn foo() !void { return error.FooBar; } + \\ + \\pub fn main() !void { + \\ comptime var i: usize = 0; + \\ b: inline while (i < 5) : (i += 1) { + \\ foo() catch { + \\ break :b; // non-error break, success + \\ }; + \\ } + \\ // foo() was successfully handled, should not appear in trace + \\ + \\ return error.BadTime; + \\} + , + .Debug = .{ + .expect = + \\error: BadTime + \\source.zig:12:5: [address] in main (test) + \\ return error.BadTime; + \\ ^ + \\ + , + }, + .ReleaseSafe = .{ + .exclude_os = .{ + .windows, // TODO + .linux, // defeated by aggressive inlining + }, + .expect = + \\error: BadTime + \\source.zig:12:5: [address] in [function] + \\ return error.BadTime; + \\ ^ + \\ + , + }, + .ReleaseFast = .{ + .expect = + \\error: BadTime + \\ + , + }, + .ReleaseSmall = .{ + .expect = + \\error: BadTime + \\ + , + }, + }); + + cases.addCase(.{ + .name = "catch and re-throw error", + .source = + \\fn foo() !void { + \\ return error.TheSkyIsFalling; + \\} + \\ + \\pub fn main() !void { + \\ return foo() catch error.AndMyCarIsOutOfGas; + \\} + , + .Debug = .{ + .expect = + \\error: AndMyCarIsOutOfGas + \\source.zig:2:5: [address] in foo (test) + \\ return error.TheSkyIsFalling; + \\ ^ + \\source.zig:6:5: [address] in main (test) + \\ return foo() catch error.AndMyCarIsOutOfGas; + \\ ^ + \\ + , + }, + .ReleaseSafe = .{ + .exclude_os = .{ + .windows, // TODO + .linux, // defeated by aggressive inlining + }, + .expect = + \\error: AndMyCarIsOutOfGas + \\source.zig:2:5: [address] in [function] + \\ return error.TheSkyIsFalling; + \\ ^ + \\source.zig:6:5: [address] in [function] + \\ return foo() catch error.AndMyCarIsOutOfGas; + \\ ^ + \\ + , + }, + .ReleaseFast = .{ + .expect = + \\error: AndMyCarIsOutOfGas + \\ + , + }, + .ReleaseSmall = .{ + .expect = + \\error: AndMyCarIsOutOfGas + \\ + , + }, + }); + + cases.addCase(.{ + .name = "errors stored in var do not contribute to error trace", + .source = + \\fn foo() !void { + \\ return error.TheSkyIsFalling; + \\} + \\ + \\pub fn main() !void { + \\ // Once an error is stored in a variable, it is popped from the trace + \\ var x = foo(); + \\ x = {}; + \\ + \\ // As a result, this error trace will still be clean + \\ return error.SomethingUnrelatedWentWrong; + \\} + , + .Debug = .{ + .expect = + \\error: SomethingUnrelatedWentWrong + \\source.zig:11:5: [address] in main (test) + \\ return error.SomethingUnrelatedWentWrong; + \\ ^ + \\ + , + }, + .ReleaseSafe = .{ + .exclude_os = .{ + .windows, // TODO + .linux, // defeated by aggressive inlining + }, + .expect = + \\error: SomethingUnrelatedWentWrong + \\source.zig:11:5: [address] in [function] + \\ return error.SomethingUnrelatedWentWrong; + \\ ^ + \\ + , + }, + .ReleaseFast = .{ + .expect = + \\error: SomethingUnrelatedWentWrong + \\ + , + }, + .ReleaseSmall = .{ + .expect = + \\error: SomethingUnrelatedWentWrong + \\ + , + }, + }); + + cases.addCase(.{ + .name = "error stored in const has trace preserved for duration of block", + .source = + \\fn foo() !void { return error.TheSkyIsFalling; } + \\fn bar() !void { return error.InternalError; } + \\fn baz() !void { return error.UnexpectedReality; } + \\ + \\pub fn main() !void { + \\ const x = foo(); + \\ const y = b: { + \\ if (true) + \\ break :b bar(); + \\ + \\ break :b {}; + \\ }; + \\ x catch {}; + \\ y catch {}; + \\ // foo()/bar() error traces not popped until end of block + \\ + \\ { + \\ const z = baz(); + \\ z catch {}; + \\ // baz() error trace still alive here + \\ } + \\ // baz() error trace popped, foo(), bar() still alive + \\ return error.StillUnresolved; + \\} + , + .Debug = .{ + .expect = + \\error: StillUnresolved + \\source.zig:1:18: [address] in foo (test) + \\fn foo() !void { return error.TheSkyIsFalling; } + \\ ^ + \\source.zig:2:18: [address] in bar (test) + \\fn bar() !void { return error.InternalError; } + \\ ^ + \\source.zig:23:5: [address] in main (test) + \\ return error.StillUnresolved; + \\ ^ + \\ + , + }, + .ReleaseSafe = .{ + .exclude_os = .{ + .windows, // TODO + .linux, // defeated by aggressive inlining + }, + .expect = + \\error: StillUnresolved + \\source.zig:1:18: [address] in [function] + \\fn foo() !void { return error.TheSkyIsFalling; } + \\ ^ + \\source.zig:2:18: [address] in [function] + \\fn bar() !void { return error.InternalError; } + \\ ^ + \\source.zig:23:5: [address] in [function] + \\ return error.StillUnresolved; + \\ ^ + \\ + , + }, + .ReleaseFast = .{ + .expect = + \\error: StillUnresolved + \\ + , + }, + .ReleaseSmall = .{ + .expect = + \\error: StillUnresolved + \\ + , + }, + }); + + cases.addCase(.{ + .name = "error passed to function has its trace preserved for duration of the call", + .source = + \\pub fn expectError(expected_error: anyerror, actual_error: anyerror!void) !void { + \\ actual_error catch |err| { + \\ if (err == expected_error) return {}; + \\ }; + \\ return error.TestExpectedError; + \\} + \\ + \\fn alwaysErrors() !void { return error.ThisErrorShouldNotAppearInAnyTrace; } + \\fn foo() !void { return error.Foo; } + \\ + \\pub fn main() !void { + \\ try expectError(error.ThisErrorShouldNotAppearInAnyTrace, alwaysErrors()); + \\ try expectError(error.ThisErrorShouldNotAppearInAnyTrace, alwaysErrors()); + \\ try expectError(error.Foo, foo()); + \\ + \\ // Only the error trace for this failing check should appear: + \\ try expectError(error.Bar, foo()); + \\} + , + .Debug = .{ + .expect = + \\error: TestExpectedError + \\source.zig:9:18: [address] in foo (test) + \\fn foo() !void { return error.Foo; } + \\ ^ + \\source.zig:5:5: [address] in expectError (test) + \\ return error.TestExpectedError; + \\ ^ + \\source.zig:17:5: [address] in main (test) + \\ try expectError(error.Bar, foo()); + \\ ^ + \\ + , + }, + .ReleaseSafe = .{ + .exclude_os = .{ + .windows, // TODO + }, + .expect = + \\error: TestExpectedError + \\source.zig:9:18: [address] in [function] + \\fn foo() !void { return error.Foo; } + \\ ^ + \\source.zig:5:5: [address] in [function] + \\ return error.TestExpectedError; + \\ ^ + \\source.zig:17:5: [address] in [function] + \\ try expectError(error.Bar, foo()); + \\ ^ + \\ + , + }, + .ReleaseFast = .{ + .expect = + \\error: TestExpectedError + \\ + , + }, + .ReleaseSmall = .{ + .expect = + \\error: TestExpectedError + \\ + , + }, + }); + + cases.addCase(.{ + .name = "try return from within catch", + .source = + \\fn foo() !void { + \\ return error.TheSkyIsFalling; + \\} + \\ + \\fn bar() !void { + \\ return error.AndMyCarIsOutOfGas; + \\} + \\ + \\pub fn main() !void { + \\ foo() catch { // error trace should include foo() + \\ try bar(); + \\ }; + \\} + , + .Debug = .{ + .expect = + \\error: AndMyCarIsOutOfGas + \\source.zig:2:5: [address] in foo (test) + \\ return error.TheSkyIsFalling; + \\ ^ + \\source.zig:6:5: [address] in bar (test) + \\ return error.AndMyCarIsOutOfGas; + \\ ^ + \\source.zig:11:9: [address] in main (test) + \\ try bar(); + \\ ^ + \\ + , + }, + .ReleaseSafe = .{ + .exclude_os = .{ + .windows, // TODO + }, + .expect = + \\error: AndMyCarIsOutOfGas + \\source.zig:2:5: [address] in [function] + \\ return error.TheSkyIsFalling; + \\ ^ + \\source.zig:6:5: [address] in [function] + \\ return error.AndMyCarIsOutOfGas; + \\ ^ + \\source.zig:11:9: [address] in [function] + \\ try bar(); + \\ ^ + \\ + , + }, + .ReleaseFast = .{ + .expect = + \\error: AndMyCarIsOutOfGas + \\ + , + }, + .ReleaseSmall = .{ + .expect = + \\error: AndMyCarIsOutOfGas + \\ + , + }, + }); + + cases.addCase(.{ + .name = "try return from within if-else", + .source = + \\fn foo() !void { + \\ return error.TheSkyIsFalling; + \\} + \\ + \\fn bar() !void { + \\ return error.AndMyCarIsOutOfGas; + \\} + \\ + \\pub fn main() !void { + \\ if (foo()) |_| {} else |_| { // error trace should include foo() + \\ try bar(); + \\ } + \\} + , + .Debug = .{ + .expect = + \\error: AndMyCarIsOutOfGas + \\source.zig:2:5: [address] in foo (test) + \\ return error.TheSkyIsFalling; + \\ ^ + \\source.zig:6:5: [address] in bar (test) + \\ return error.AndMyCarIsOutOfGas; + \\ ^ + \\source.zig:11:9: [address] in main (test) + \\ try bar(); + \\ ^ + \\ + , + }, + .ReleaseSafe = .{ + .exclude_os = .{ + .windows, // TODO + }, + .expect = + \\error: AndMyCarIsOutOfGas + \\source.zig:2:5: [address] in [function] + \\ return error.TheSkyIsFalling; + \\ ^ + \\source.zig:6:5: [address] in [function] + \\ return error.AndMyCarIsOutOfGas; + \\ ^ + \\source.zig:11:9: [address] in [function] + \\ try bar(); + \\ ^ + \\ + , + }, + .ReleaseFast = .{ + .expect = + \\error: AndMyCarIsOutOfGas + \\ + , + }, + .ReleaseSmall = .{ + .expect = + \\error: AndMyCarIsOutOfGas + \\ + , + }, + }); cases.addCase(.{ .name = "try try return return",