diff --git a/doc/langref.html.in b/doc/langref.html.in
index 865f4ecf8787..311d373a877d 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -6801,6 +6801,32 @@ test "peer type resolution: *const T and ?*T" {
try expect(a == b);
try expect(b == a);
}
+
+test "peer type resolution: error union switch" {
+ // The non-error and error cases are only peers if the error case is just a switch expression;
+ // the pattern `if (x) {...} else |err| blk: { switch (err) {...} }` does not consider the
+ // non-error and error case to be peers.
+ var a: error{ A, B, C }!u32 = 0;
+ _ = &a;
+ const b = if (a) |x|
+ x + 3
+ else |err| switch (err) {
+ error.A => 0,
+ error.B => 1,
+ error.C => null,
+ };
+ try expect(@TypeOf(b) == ?u32);
+
+ // The non-error and error cases are only peers if the error case is just a switch expression;
+ // the pattern `x catch |err| blk: { switch (err) {...} }` does not consider the unwrapped `x`
+ // and error case to be peers.
+ const c = a catch |err| switch (err) {
+ error.A => 0,
+ error.B => 1,
+ error.C => null,
+ };
+ try expect(@TypeOf(c) == ?u32);
+}
{#code_end#}
{#header_close#}
{#header_close#}
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 3941f5a3781c..6b93f9567ce2 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -93,6 +93,7 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void {
Zir.Inst.Call.Flags,
Zir.Inst.BuiltinCall.Flags,
Zir.Inst.SwitchBlock.Bits,
+ Zir.Inst.SwitchBlockErrUnion.Bits,
Zir.Inst.FuncFancy.Bits,
=> @bitCast(@field(extra, field.name)),
@@ -838,7 +839,18 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
.if_simple,
.@"if",
- => return ifExpr(gz, scope, ri.br(), node, tree.fullIf(node).?),
+ => {
+ const if_full = tree.fullIf(node).?;
+ if (if_full.error_token) |error_token| {
+ const tag = node_tags[if_full.ast.else_expr];
+ if ((tag == .@"switch" or tag == .switch_comma) and
+ std.mem.eql(u8, tree.tokenSlice(error_token), tree.tokenSlice(error_token + 4)))
+ {
+ return switchExprErrUnion(gz, scope, ri.br(), node, .@"if");
+ }
+ }
+ return ifExpr(gz, scope, ri.br(), node, if_full);
+ },
.while_simple,
.while_cont,
@@ -1014,10 +1026,16 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
},
.@"catch" => {
const catch_token = main_tokens[node];
- const payload_token: ?Ast.TokenIndex = if (token_tags[catch_token + 1] == .pipe)
- catch_token + 2
- else
- null;
+ const payload_token: ?Ast.TokenIndex = if (token_tags[catch_token + 1] == .pipe) blk: {
+ if (token_tags.len > catch_token + 6 and
+ token_tags[catch_token + 4] == .keyword_switch)
+ {
+ if (std.mem.eql(u8, tree.tokenSlice(catch_token + 2), tree.tokenSlice(catch_token + 6))) {
+ return switchExprErrUnion(gz, scope, ri.br(), node, .@"catch");
+ }
+ }
+ break :blk catch_token + 2;
+ } else null;
switch (ri.rl) {
.ref, .ref_coerced_ty => return orelseCatchExpr(
gz,
@@ -2556,7 +2574,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.vector_type,
.indexable_ptr_len,
.anyframe_type,
- .as,
.as_node,
.as_shift_operand,
.bit_and,
@@ -2641,6 +2658,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.import,
.switch_block,
.switch_block_ref,
+ .switch_block_err_union,
.union_init,
.field_type_ref,
.error_set_decl,
@@ -6858,6 +6876,538 @@ fn forExpr(
return result;
}
+fn switchExprErrUnion(
+ parent_gz: *GenZir,
+ scope: *Scope,
+ ri: ResultInfo,
+ catch_or_if_node: Ast.Node.Index,
+ node_ty: enum { @"catch", @"if" },
+) InnerError!Zir.Inst.Ref {
+ const astgen = parent_gz.astgen;
+ const gpa = astgen.gpa;
+ const tree = astgen.tree;
+ const node_datas = tree.nodes.items(.data);
+ const node_tags = tree.nodes.items(.tag);
+ const main_tokens = tree.nodes.items(.main_token);
+ const token_tags = tree.tokens.items(.tag);
+
+ const if_full = switch (node_ty) {
+ .@"catch" => undefined,
+ .@"if" => tree.fullIf(catch_or_if_node).?,
+ };
+
+ const switch_node, const operand_node, const error_payload = switch (node_ty) {
+ .@"catch" => .{
+ node_datas[catch_or_if_node].rhs,
+ node_datas[catch_or_if_node].lhs,
+ main_tokens[catch_or_if_node] + 2,
+ },
+ .@"if" => .{
+ if_full.ast.else_expr,
+ if_full.ast.cond_expr,
+ if_full.error_token.?,
+ },
+ };
+ assert(node_tags[switch_node] == .@"switch" or node_tags[switch_node] == .switch_comma);
+
+ const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange);
+ const case_nodes = tree.extra_data[extra.start..extra.end];
+
+ const need_rl = astgen.nodes_need_rl.contains(catch_or_if_node);
+ const block_ri: ResultInfo = if (need_rl) ri else .{
+ .rl = switch (ri.rl) {
+ .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, catch_or_if_node)).? },
+ .inferred_ptr => .none,
+ else => ri.rl,
+ },
+ .ctx = ri.ctx,
+ };
+
+ const payload_is_ref = node_ty == .@"if" and
+ if_full.payload_token != null and token_tags[if_full.payload_token.?] == .asterisk;
+
+ // We need to call `rvalue` to write through to the pointer only if we had a
+ // result pointer and aren't forwarding it.
+ const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?;
+ const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl);
+ var scalar_cases_len: u32 = 0;
+ var multi_cases_len: u32 = 0;
+ var inline_cases_len: u32 = 0;
+ var has_else = false;
+ var else_node: Ast.Node.Index = 0;
+ var else_src: ?Ast.TokenIndex = null;
+ for (case_nodes) |case_node| {
+ const case = tree.fullSwitchCase(case_node).?;
+
+ if (case.ast.values.len == 0) {
+ const case_src = case.ast.arrow_token - 1;
+ if (else_src) |src| {
+ return astgen.failTokNotes(
+ case_src,
+ "multiple else prongs in switch expression",
+ .{},
+ &[_]u32{
+ try astgen.errNoteTok(
+ src,
+ "previous else prong here",
+ .{},
+ ),
+ },
+ );
+ }
+ has_else = true;
+ else_node = case_node;
+ else_src = case_src;
+ continue;
+ } else if (case.ast.values.len == 1 and
+ node_tags[case.ast.values[0]] == .identifier and
+ mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_"))
+ {
+ const case_src = case.ast.arrow_token - 1;
+ return astgen.failTokNotes(
+ case_src,
+ "'_' prong is not allowed when switching on errors",
+ .{},
+ &[_]u32{
+ try astgen.errNoteTok(
+ case_src,
+ "consider using 'else'",
+ .{},
+ ),
+ },
+ );
+ }
+
+ for (case.ast.values) |val| {
+ if (node_tags[val] == .string_literal)
+ return astgen.failNode(val, "cannot switch on strings", .{});
+ }
+
+ if (case.ast.values.len == 1 and node_tags[case.ast.values[0]] != .switch_range) {
+ scalar_cases_len += 1;
+ } else {
+ multi_cases_len += 1;
+ }
+ if (case.inline_token != null) {
+ inline_cases_len += 1;
+ }
+ }
+
+ const operand_ri: ResultInfo = .{
+ .rl = if (payload_is_ref) .ref else .none,
+ .ctx = .error_handling_expr,
+ };
+
+ astgen.advanceSourceCursorToNode(operand_node);
+ const operand_lc = LineColumn{ astgen.source_line - parent_gz.decl_line, astgen.source_column };
+
+ const raw_operand = try reachableExpr(parent_gz, scope, operand_ri, operand_node, switch_node);
+ const item_ri: ResultInfo = .{ .rl = .none };
+
+ // This contains the data that goes into the `extra` array for the SwitchBlockErrUnion, except
+ // the first cases_nodes.len slots are a table that indexes payloads later in the array,
+ // with the non-error and else case indices coming first, then scalar_cases_len indexes, then
+ // multi_cases_len indexes
+ const payloads = &astgen.scratch;
+ const scratch_top = astgen.scratch.items.len;
+ const case_table_start = scratch_top;
+ const scalar_case_table = case_table_start + 1 + @intFromBool(has_else);
+ const multi_case_table = scalar_case_table + scalar_cases_len;
+ const case_table_end = multi_case_table + multi_cases_len;
+
+ try astgen.scratch.resize(gpa, case_table_end);
+ defer astgen.scratch.items.len = scratch_top;
+
+ var block_scope = parent_gz.makeSubBlock(scope);
+ // block_scope not used for collecting instructions
+ block_scope.instructions_top = GenZir.unstacked_top;
+ block_scope.setBreakResultInfo(block_ri);
+
+ // Sema expects a dbg_stmt immediately before switch_block_err_union
+ try emitDbgStmt(parent_gz, operand_lc);
+ // This gets added to the parent block later, after the item expressions.
+ const switch_block = try parent_gz.makeBlockInst(.switch_block_err_union, switch_node);
+
+ // We re-use this same scope for all cases, including the special prong, if any.
+ var case_scope = parent_gz.makeSubBlock(&block_scope.base);
+ case_scope.instructions_top = GenZir.unstacked_top;
+
+ {
+ const body_len_index: u32 = @intCast(payloads.items.len);
+ payloads.items[case_table_start] = body_len_index;
+ try payloads.resize(gpa, body_len_index + 1); // body_len
+
+ case_scope.instructions_top = parent_gz.instructions.items.len;
+ defer case_scope.unstack();
+
+ try case_scope.addDbgBlockBegin();
+
+ const unwrap_payload_tag: Zir.Inst.Tag = if (payload_is_ref)
+ .err_union_payload_unsafe_ptr
+ else
+ .err_union_payload_unsafe;
+
+ const unwrapped_payload = try case_scope.addUnNode(
+ unwrap_payload_tag,
+ raw_operand,
+ catch_or_if_node,
+ );
+
+ switch (node_ty) {
+ .@"catch" => {
+ const case_result = switch (ri.rl) {
+ .ref, .ref_coerced_ty => unwrapped_payload,
+ else => try rvalue(
+ &case_scope,
+ block_scope.break_result_info,
+ unwrapped_payload,
+ catch_or_if_node,
+ ),
+ };
+ try case_scope.addDbgBlockEnd();
+ _ = try case_scope.addBreakWithSrcNode(
+ .@"break",
+ switch_block,
+ case_result,
+ catch_or_if_node,
+ );
+ },
+ .@"if" => {
+ var payload_val_scope: Scope.LocalVal = undefined;
+
+ try case_scope.addDbgBlockBegin();
+ const then_node = if_full.ast.then_expr;
+ const then_sub_scope = s: {
+ assert(if_full.error_token != null);
+ if (if_full.payload_token) |payload_token| {
+ const token_name_index = payload_token + @intFromBool(payload_is_ref);
+ const ident_name = try astgen.identAsString(token_name_index);
+ const token_name_str = tree.tokenSlice(token_name_index);
+ if (mem.eql(u8, "_", token_name_str))
+ break :s &case_scope.base;
+ try astgen.detectLocalShadowing(
+ &case_scope.base,
+ ident_name,
+ token_name_index,
+ token_name_str,
+ .capture,
+ );
+ payload_val_scope = .{
+ .parent = &case_scope.base,
+ .gen_zir = &case_scope,
+ .name = ident_name,
+ .inst = unwrapped_payload,
+ .token_src = payload_token,
+ .id_cat = .capture,
+ };
+ try case_scope.addDbgVar(.dbg_var_val, ident_name, unwrapped_payload);
+ break :s &payload_val_scope.base;
+ } else {
+ _ = try case_scope.addUnNode(
+ .ensure_err_union_payload_void,
+ raw_operand,
+ catch_or_if_node,
+ );
+ break :s &case_scope.base;
+ }
+ };
+ const then_result = try expr(
+ &case_scope,
+ then_sub_scope,
+ block_scope.break_result_info,
+ then_node,
+ );
+ try checkUsed(parent_gz, &case_scope.base, then_sub_scope);
+ if (!case_scope.endsWithNoReturn()) {
+ try case_scope.addDbgBlockEnd();
+ _ = try case_scope.addBreakWithSrcNode(
+ .@"break",
+ switch_block,
+ then_result,
+ then_node,
+ );
+ }
+ },
+ }
+
+ const case_slice = case_scope.instructionsSlice();
+ // Since we use the switch_block_err_union instruction itself to refer
+ // to the capture, which will not be added to the child block, we need
+ // to handle ref_table manually.
+ const refs_len = refs: {
+ var n: usize = 0;
+ var check_inst = switch_block;
+ while (astgen.ref_table.get(check_inst)) |ref_inst| {
+ n += 1;
+ check_inst = ref_inst;
+ }
+ break :refs n;
+ };
+ const body_len = refs_len + astgen.countBodyLenAfterFixups(case_slice);
+ try payloads.ensureUnusedCapacity(gpa, body_len);
+ const capture: Zir.Inst.SwitchBlock.ProngInfo.Capture = switch (node_ty) {
+ .@"catch" => .none,
+ .@"if" => if (if_full.payload_token == null)
+ .none
+ else if (payload_is_ref)
+ .by_ref
+ else
+ .by_val,
+ };
+ payloads.items[body_len_index] = @bitCast(Zir.Inst.SwitchBlock.ProngInfo{
+ .body_len = @intCast(body_len),
+ .capture = capture,
+ .is_inline = false,
+ .has_tag_capture = false,
+ });
+ if (astgen.ref_table.fetchRemove(switch_block)) |kv| {
+ appendPossiblyRefdBodyInst(astgen, payloads, kv.value);
+ }
+ appendBodyWithFixupsArrayList(astgen, payloads, case_slice);
+ }
+
+ const err_name = blk: {
+ const err_str = tree.tokenSlice(error_payload);
+ if (mem.eql(u8, err_str, "_")) {
+ return astgen.failTok(error_payload, "discard of error capture; omit it instead", .{});
+ }
+ const err_name = try astgen.identAsString(error_payload);
+ try astgen.detectLocalShadowing(scope, err_name, error_payload, err_str, .capture);
+
+ break :blk err_name;
+ };
+
+ // allocate a shared dummy instruction for the error capture
+ const err_inst = err_inst: {
+ const inst: Zir.Inst.Index = @enumFromInt(astgen.instructions.len);
+ try astgen.instructions.append(astgen.gpa, .{
+ .tag = .extended,
+ .data = .{ .extended = .{
+ .opcode = .value_placeholder,
+ .small = undefined,
+ .operand = undefined,
+ } },
+ });
+ break :err_inst inst;
+ };
+
+ // In this pass we generate all the item and prong expressions for error cases.
+ var multi_case_index: u32 = 0;
+ var scalar_case_index: u32 = 0;
+ var any_uses_err_capture = false;
+ for (case_nodes) |case_node| {
+ const case = tree.fullSwitchCase(case_node).?;
+
+ const is_multi_case = case.ast.values.len > 1 or
+ (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .switch_range);
+
+ var dbg_var_name: Zir.NullTerminatedString = .empty;
+ var dbg_var_inst: Zir.Inst.Ref = undefined;
+ var err_scope: Scope.LocalVal = undefined;
+ var capture_scope: Scope.LocalVal = undefined;
+
+ const sub_scope = blk: {
+ err_scope = .{
+ .parent = &case_scope.base,
+ .gen_zir = &case_scope,
+ .name = err_name,
+ .inst = err_inst.toRef(),
+ .token_src = error_payload,
+ .id_cat = .capture,
+ };
+
+ const capture_token = case.payload_token orelse break :blk &err_scope.base;
+ assert(token_tags[capture_token] == .identifier);
+
+ const capture_slice = tree.tokenSlice(capture_token);
+ if (mem.eql(u8, capture_slice, "_")) {
+ return astgen.failTok(capture_token, "discard of error capture; omit it instead", .{});
+ }
+ const tag_name = try astgen.identAsString(capture_token);
+ try astgen.detectLocalShadowing(&case_scope.base, tag_name, capture_token, capture_slice, .capture);
+
+ capture_scope = .{
+ .parent = &case_scope.base,
+ .gen_zir = &case_scope,
+ .name = tag_name,
+ .inst = switch_block.toRef(),
+ .token_src = capture_token,
+ .id_cat = .capture,
+ };
+ dbg_var_name = tag_name;
+ dbg_var_inst = switch_block.toRef();
+
+ err_scope.parent = &capture_scope.base;
+
+ break :blk &err_scope.base;
+ };
+
+ const header_index: u32 = @intCast(payloads.items.len);
+ const body_len_index = if (is_multi_case) blk: {
+ payloads.items[multi_case_table + multi_case_index] = header_index;
+ multi_case_index += 1;
+ try payloads.resize(gpa, header_index + 3); // items_len, ranges_len, body_len
+
+ // items
+ var items_len: u32 = 0;
+ for (case.ast.values) |item_node| {
+ if (node_tags[item_node] == .switch_range) continue;
+ items_len += 1;
+
+ const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node);
+ try payloads.append(gpa, @intFromEnum(item_inst));
+ }
+
+ // ranges
+ var ranges_len: u32 = 0;
+ for (case.ast.values) |range| {
+ if (node_tags[range] != .switch_range) continue;
+ ranges_len += 1;
+
+ const first = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].lhs);
+ const last = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].rhs);
+ try payloads.appendSlice(gpa, &[_]u32{
+ @intFromEnum(first), @intFromEnum(last),
+ });
+ }
+
+ payloads.items[header_index] = items_len;
+ payloads.items[header_index + 1] = ranges_len;
+ break :blk header_index + 2;
+ } else if (case_node == else_node) blk: {
+ payloads.items[case_table_start + 1] = header_index;
+ try payloads.resize(gpa, header_index + 1); // body_len
+ break :blk header_index;
+ } else blk: {
+ payloads.items[scalar_case_table + scalar_case_index] = header_index;
+ scalar_case_index += 1;
+ try payloads.resize(gpa, header_index + 2); // item, body_len
+ const item_node = case.ast.values[0];
+ const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node);
+ payloads.items[header_index] = @intFromEnum(item_inst);
+ break :blk header_index + 1;
+ };
+
+ {
+ // temporarily stack case_scope on parent_gz
+ case_scope.instructions_top = parent_gz.instructions.items.len;
+ defer case_scope.unstack();
+
+ try case_scope.addDbgBlockBegin();
+ if (dbg_var_name != .empty) {
+ try case_scope.addDbgVar(.dbg_var_val, dbg_var_name, dbg_var_inst);
+ }
+ const target_expr_node = case.ast.target_expr;
+ const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_info, target_expr_node);
+ // check capture_scope, not err_scope to avoid false positive unused error capture
+ try checkUsed(parent_gz, &case_scope.base, err_scope.parent);
+ const uses_err = err_scope.used != 0 or err_scope.discarded != 0;
+ if (uses_err) {
+ try case_scope.addDbgVar(.dbg_var_val, err_name, err_inst.toRef());
+ any_uses_err_capture = true;
+ }
+ try case_scope.addDbgBlockEnd();
+ if (!parent_gz.refIsNoReturn(case_result)) {
+ _ = try case_scope.addBreakWithSrcNode(.@"break", switch_block, case_result, target_expr_node);
+ }
+
+ const case_slice = case_scope.instructionsSlice();
+ // Since we use the switch_block_err_union instruction itself to refer
+ // to the capture, which will not be added to the child block, we need
+ // to handle ref_table manually.
+ const refs_len = refs: {
+ var n: usize = 0;
+ var check_inst = switch_block;
+ while (astgen.ref_table.get(check_inst)) |ref_inst| {
+ n += 1;
+ check_inst = ref_inst;
+ }
+ if (uses_err) {
+ check_inst = err_inst;
+ while (astgen.ref_table.get(check_inst)) |ref_inst| {
+ n += 1;
+ check_inst = ref_inst;
+ }
+ }
+ break :refs n;
+ };
+ const body_len = refs_len + astgen.countBodyLenAfterFixups(case_slice);
+ try payloads.ensureUnusedCapacity(gpa, body_len);
+ payloads.items[body_len_index] = @bitCast(Zir.Inst.SwitchBlock.ProngInfo{
+ .body_len = @intCast(body_len),
+ .capture = if (case.payload_token != null) .by_val else .none,
+ .is_inline = case.inline_token != null,
+ .has_tag_capture = false,
+ });
+ if (astgen.ref_table.fetchRemove(switch_block)) |kv| {
+ appendPossiblyRefdBodyInst(astgen, payloads, kv.value);
+ }
+ if (uses_err) {
+ if (astgen.ref_table.fetchRemove(err_inst)) |kv| {
+ appendPossiblyRefdBodyInst(astgen, payloads, kv.value);
+ }
+ }
+ appendBodyWithFixupsArrayList(astgen, payloads, case_slice);
+ }
+ }
+ // Now that the item expressions are generated we can add this.
+ try parent_gz.instructions.append(gpa, switch_block);
+
+ try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlockErrUnion).Struct.fields.len +
+ @intFromBool(multi_cases_len != 0) +
+ payloads.items.len - case_table_end +
+ (case_table_end - case_table_start) * @typeInfo(Zir.Inst.As).Struct.fields.len);
+
+ const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlockErrUnion{
+ .operand = raw_operand,
+ .bits = Zir.Inst.SwitchBlockErrUnion.Bits{
+ .has_multi_cases = multi_cases_len != 0,
+ .has_else = has_else,
+ .scalar_cases_len = @intCast(scalar_cases_len),
+ .any_uses_err_capture = any_uses_err_capture,
+ .payload_is_ref = payload_is_ref,
+ },
+ .main_src_node_offset = parent_gz.nodeIndexToRelative(catch_or_if_node),
+ });
+
+ if (multi_cases_len != 0) {
+ astgen.extra.appendAssumeCapacity(multi_cases_len);
+ }
+
+ if (any_uses_err_capture) {
+ astgen.extra.appendAssumeCapacity(@intFromEnum(err_inst));
+ }
+
+ const zir_datas = astgen.instructions.items(.data);
+ zir_datas[@intFromEnum(switch_block)].pl_node.payload_index = payload_index;
+
+ for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| {
+ var body_len_index = start_index;
+ var end_index = start_index;
+ const table_index = case_table_start + i;
+ if (table_index < scalar_case_table) {
+ end_index += 1;
+ } else if (table_index < multi_case_table) {
+ body_len_index += 1;
+ end_index += 2;
+ } else {
+ body_len_index += 2;
+ const items_len = payloads.items[start_index];
+ const ranges_len = payloads.items[start_index + 1];
+ end_index += 3 + items_len + 2 * ranges_len;
+ }
+ const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[body_len_index]);
+ end_index += prong_info.body_len;
+ astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]);
+ }
+
+ if (need_result_rvalue) {
+ return rvalue(parent_gz, ri, switch_block.toRef(), switch_node);
+ } else {
+ return switch_block.toRef();
+ }
+}
+
fn switchExpr(
parent_gz: *GenZir,
scope: *Scope,
diff --git a/src/Sema.zig b/src/Sema.zig
index a4f6f4f7bbd2..b8dcd0a1a556 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1018,7 +1018,6 @@ fn analyzeBodyInner(
.array_type => try sema.zirArrayType(block, inst),
.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst),
.vector_type => try sema.zirVectorType(block, inst),
- .as => try sema.zirAs(block, inst),
.as_node => try sema.zirAsNode(block, inst),
.as_shift_operand => try sema.zirAsShiftOperand(block, inst),
.bit_and => try sema.zirBitwise(block, inst, .bit_and),
@@ -1098,6 +1097,7 @@ fn analyzeBodyInner(
.str => try sema.zirStr(inst),
.switch_block => try sema.zirSwitchBlock(block, inst, false),
.switch_block_ref => try sema.zirSwitchBlock(block, inst, true),
+ .switch_block_err_union => try sema.zirSwitchBlockErrUnion(block, inst),
.type_info => try sema.zirTypeInfo(block, inst),
.size_of => try sema.zirSizeOf(block, inst),
.bit_size_of => try sema.zirBitSizeOf(block, inst),
@@ -8939,10 +8939,14 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
+ return sema.analyzeErrUnionCodePtr(block, src, operand);
+}
+
+fn analyzeErrUnionCodePtr(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
assert(operand_ty.zigTypeTag(mod) == .Pointer);
@@ -8957,7 +8961,10 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
assert(val.getErrorName(mod) != .none);
- return Air.internedToRef(val.toIntern());
+ return Air.internedToRef((try mod.intern(.{ .err = .{
+ .ty = result_ty.toIntern(),
+ .name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
+ } })));
}
}
@@ -9794,14 +9801,6 @@ fn zirParamAnytype(
sema.inst_map.putAssumeCapacity(inst, .generic_poison);
}
-fn zirAs(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const tracy = trace(@src());
- defer tracy.end();
-
- const bin_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin;
- return sema.analyzeAs(block, sema.src, bin_inst.lhs, bin_inst.rhs, false);
-}
-
fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -11168,13 +11167,312 @@ fn switchCond(
const SwitchErrorSet = std.AutoHashMap(InternPool.NullTerminatedString, Module.SwitchProngSrc);
+fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const mod = sema.mod;
+ const gpa = sema.gpa;
+ const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
+ const switch_src = inst_data.src();
+ const switch_src_node_offset = inst_data.src_node;
+ const switch_operand_src: LazySrcLoc = .{ .node_offset_switch_operand = switch_src_node_offset };
+ const else_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = switch_src_node_offset };
+ const extra = sema.code.extraData(Zir.Inst.SwitchBlockErrUnion, inst_data.payload_index);
+ const main_operand_src: LazySrcLoc = .{ .node_offset_if_cond = extra.data.main_src_node_offset };
+ const main_src: LazySrcLoc = .{ .node_offset_main_token = extra.data.main_src_node_offset };
+
+ const raw_operand_val = try sema.resolveInst(extra.data.operand);
+
+ // AstGen guarantees that the instruction immediately preceding
+ // switch_block_err_union is a dbg_stmt
+ const cond_dbg_node_index: Zir.Inst.Index = @enumFromInt(@intFromEnum(inst) - 1);
+
+ var header_extra_index: usize = extra.end;
+
+ const scalar_cases_len = extra.data.bits.scalar_cases_len;
+ const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
+ const multi_cases_len = sema.code.extra[header_extra_index];
+ header_extra_index += 1;
+ break :blk multi_cases_len;
+ } else 0;
+
+ const err_capture_inst: Zir.Inst.Index = if (extra.data.bits.any_uses_err_capture) blk: {
+ const err_capture_inst: Zir.Inst.Index = @enumFromInt(sema.code.extra[header_extra_index]);
+ header_extra_index += 1;
+ // SwitchProngAnalysis wants inst_map to have space for the tag capture.
+ // Note that the normal capture is referred to via the switch block
+ // index, which there is already necessarily space for.
+ try sema.inst_map.ensureSpaceForInstructions(gpa, &.{err_capture_inst});
+ break :blk err_capture_inst;
+ } else undefined;
+
+ var case_vals = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(gpa, scalar_cases_len + 2 * multi_cases_len);
+ defer case_vals.deinit(gpa);
+
+ const NonError = struct {
+ body: []const Zir.Inst.Index,
+ end: usize,
+ capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
+ };
+
+ const non_error_case: NonError = non_error: {
+ const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[header_extra_index]);
+ const extra_body_start = header_extra_index + 1;
+ break :non_error .{
+ .body = sema.code.bodySlice(extra_body_start, info.body_len),
+ .end = extra_body_start + info.body_len,
+ .capture = info.capture,
+ };
+ };
+
+ const Else = struct {
+ body: []const Zir.Inst.Index,
+ end: usize,
+ is_inline: bool,
+ has_capture: bool,
+ };
+
+ const else_case: Else = if (!extra.data.bits.has_else) .{
+ .body = &.{},
+ .end = non_error_case.end,
+ .is_inline = false,
+ .has_capture = false,
+ } else special: {
+ const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[non_error_case.end]);
+ const extra_body_start = non_error_case.end + 1;
+ assert(info.capture != .by_ref);
+ assert(!info.has_tag_capture);
+ break :special .{
+ .body = sema.code.bodySlice(extra_body_start, info.body_len),
+ .end = extra_body_start + info.body_len,
+ .is_inline = info.is_inline,
+ .has_capture = info.capture != .none,
+ };
+ };
+
+ var seen_errors = SwitchErrorSet.init(gpa);
+ defer seen_errors.deinit();
+
+ const operand_ty = sema.typeOf(raw_operand_val);
+ const operand_err_set_ty = if (extra.data.bits.payload_is_ref)
+ operand_ty.childType(mod).errorUnionSet(mod)
+ else
+ operand_ty.errorUnionSet(mod);
+
+ const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
+ try sema.air_instructions.append(gpa, .{
+ .tag = .block,
+ .data = undefined,
+ });
+ var label: Block.Label = .{
+ .zir_block = inst,
+ .merges = .{
+ .src_locs = .{},
+ .results = .{},
+ .br_list = .{},
+ .block_inst = block_inst,
+ },
+ };
+
+ var child_block: Block = .{
+ .parent = block,
+ .sema = sema,
+ .src_decl = block.src_decl,
+ .namespace = block.namespace,
+ .wip_capture_scope = block.wip_capture_scope,
+ .instructions = .{},
+ .label = &label,
+ .inlining = block.inlining,
+ .is_comptime = block.is_comptime,
+ .comptime_reason = block.comptime_reason,
+ .is_typeof = block.is_typeof,
+ .c_import_buf = block.c_import_buf,
+ .runtime_cond = block.runtime_cond,
+ .runtime_loop = block.runtime_loop,
+ .runtime_index = block.runtime_index,
+ .error_return_trace_index = block.error_return_trace_index,
+ .want_safety = block.want_safety,
+ };
+ const merges = &child_block.label.?.merges;
+ defer child_block.instructions.deinit(gpa);
+ defer merges.deinit(gpa);
+
+ const resolved_err_set = try sema.resolveInferredErrorSetTy(block, main_src, operand_err_set_ty.toIntern());
+ if (Type.fromInterned(resolved_err_set).errorSetIsEmpty(mod)) {
+ return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
+ }
+
+ const else_error_ty: ?Type = try validateErrSetSwitch(
+ sema,
+ block,
+ &seen_errors,
+ &case_vals,
+ operand_err_set_ty,
+ inst_data,
+ scalar_cases_len,
+ multi_cases_len,
+ .{ .body = else_case.body, .end = else_case.end, .src = else_prong_src },
+ extra.data.bits.has_else,
+ );
+
+ var spa: SwitchProngAnalysis = .{
+ .sema = sema,
+ .parent_block = block,
+ .operand = undefined, // must be set to the unwrapped error code before use
+ .operand_ptr = .none,
+ .cond = raw_operand_val,
+ .else_error_ty = else_error_ty,
+ .switch_block_inst = inst,
+ .tag_capture_inst = undefined,
+ };
+
+ if (try sema.resolveDefinedValue(&child_block, main_src, raw_operand_val)) |ov| {
+ const operand_val = if (extra.data.bits.payload_is_ref)
+ (try sema.pointerDeref(&child_block, main_src, ov, operand_ty)).?
+ else
+ ov;
+
+ if (operand_val.errorUnionIsPayload(mod)) {
+ return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
+ } else {
+ const err_val = Value.fromInterned(try mod.intern(.{
+ .err = .{
+ .ty = operand_err_set_ty.toIntern(),
+ .name = operand_val.getErrorName(mod).unwrap().?,
+ },
+ }));
+ spa.operand = if (extra.data.bits.payload_is_ref)
+ try sema.analyzeErrUnionCodePtr(block, switch_operand_src, raw_operand_val)
+ else
+ try sema.analyzeErrUnionCode(block, switch_operand_src, raw_operand_val);
+
+ if (extra.data.bits.any_uses_err_capture) {
+ sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand);
+ }
+ defer if (extra.data.bits.any_uses_err_capture) assert(sema.inst_map.remove(err_capture_inst));
+
+ return resolveSwitchComptime(
+ sema,
+ spa,
+ &child_block,
+ try sema.switchCond(block, switch_operand_src, spa.operand),
+ err_val,
+ operand_err_set_ty,
+ .{
+ .body = else_case.body,
+ .end = else_case.end,
+ .capture = if (else_case.has_capture) .by_val else .none,
+ .is_inline = else_case.is_inline,
+ .has_tag_capture = false,
+ },
+ case_vals,
+ scalar_cases_len,
+ multi_cases_len,
+ true,
+ false,
+ );
+ }
+ }
+
+ if (scalar_cases_len + multi_cases_len == 0) {
+ if (else_error_ty) |ty| if (ty.errorSetIsEmpty(mod)) {
+ return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
+ };
+ }
+
+ if (child_block.is_comptime) {
+ _ = try sema.resolveConstDefinedValue(&child_block, main_operand_src, raw_operand_val, .{
+ .needed_comptime_reason = "condition in comptime switch must be comptime-known",
+ .block_comptime_reason = child_block.comptime_reason,
+ });
+ unreachable;
+ }
+
+ const cond = if (extra.data.bits.payload_is_ref) blk: {
+ try sema.checkErrorType(block, main_src, sema.typeOf(raw_operand_val).elemType2(mod));
+ const loaded = try sema.analyzeLoad(block, main_src, raw_operand_val, main_src);
+ break :blk try sema.analyzeIsNonErr(block, main_src, loaded);
+ } else blk: {
+ try sema.checkErrorType(block, main_src, sema.typeOf(raw_operand_val));
+ break :blk try sema.analyzeIsNonErr(block, main_src, raw_operand_val);
+ };
+
+ var sub_block = child_block.makeSubBlock();
+ sub_block.runtime_loop = null;
+ sub_block.runtime_cond = main_operand_src;
+ sub_block.runtime_index.increment();
+ defer sub_block.instructions.deinit(gpa);
+
+ try sema.analyzeBodyRuntimeBreak(&sub_block, non_error_case.body);
+ const true_instructions = try sub_block.instructions.toOwnedSlice(gpa);
+ defer gpa.free(true_instructions);
+
+ spa.operand = if (extra.data.bits.payload_is_ref)
+ try sema.analyzeErrUnionCodePtr(&sub_block, switch_operand_src, raw_operand_val)
+ else
+ try sema.analyzeErrUnionCode(&sub_block, switch_operand_src, raw_operand_val);
+
+ if (extra.data.bits.any_uses_err_capture) {
+ sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand);
+ }
+ defer if (extra.data.bits.any_uses_err_capture) assert(sema.inst_map.remove(err_capture_inst));
+ _ = try sema.analyzeSwitchRuntimeBlock(
+ spa,
+ &sub_block,
+ switch_src,
+ try sema.switchCond(block, switch_operand_src, spa.operand),
+ operand_err_set_ty,
+ switch_operand_src,
+ case_vals,
+ .{
+ .body = else_case.body,
+ .end = else_case.end,
+ .capture = if (else_case.has_capture) .by_val else .none,
+ .is_inline = else_case.is_inline,
+ .has_tag_capture = false,
+ },
+ scalar_cases_len,
+ multi_cases_len,
+ false,
+ undefined,
+ true,
+ switch_src_node_offset,
+ else_prong_src,
+ undefined,
+ seen_errors,
+ undefined,
+ undefined,
+ undefined,
+ cond_dbg_node_index,
+ true,
+ );
+
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
+ true_instructions.len + sub_block.instructions.items.len);
+
+ _ = try child_block.addInst(.{
+ .tag = .cond_br,
+ .data = .{ .pl_op = .{
+ .operand = cond,
+ .payload = sema.addExtraAssumeCapacity(Air.CondBr{
+ .then_body_len = @intCast(true_instructions.len),
+ .else_body_len = @intCast(sub_block.instructions.items.len),
+ }),
+ } },
+ });
+ sema.air_extra.appendSliceAssumeCapacity(@ptrCast(true_instructions));
+ sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items));
+
+ return sema.analyzeBlockBody(block, main_src, &child_block, merges);
+}
+
fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_ref: bool) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const gpa = sema.gpa;
- const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = inst_data.src();
const src_node_offset = inst_data.src_node;
@@ -11220,16 +11518,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
var case_vals = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(gpa, scalar_cases_len + 2 * multi_cases_len);
defer case_vals.deinit(gpa);
- const Special = struct {
- body: []const Zir.Inst.Index,
- end: usize,
- capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
- is_inline: bool,
- has_tag_capture: bool,
- };
-
const special_prong = extra.data.bits.specialProng();
- const special: Special = switch (special_prong) {
+ const special: SpecialProng = switch (special_prong) {
.none => .{
.body = &.{},
.end = header_extra_index,
@@ -11409,150 +11699,18 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
);
}
},
- .ErrorSet => {
- var extra_index: usize = special.end;
- {
- var scalar_i: u32 = 0;
- while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
- const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
- extra_index += 1;
- const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
- extra_index += 1 + info.body_len;
-
- case_vals.appendAssumeCapacity(try sema.validateSwitchItemError(
- block,
- &seen_errors,
- item_ref,
- operand_ty,
- src_node_offset,
- .{ .scalar = scalar_i },
- ));
- }
- }
- {
- var multi_i: u32 = 0;
- while (multi_i < multi_cases_len) : (multi_i += 1) {
- const items_len = sema.code.extra[extra_index];
- extra_index += 1;
- const ranges_len = sema.code.extra[extra_index];
- extra_index += 1;
- const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
- extra_index += 1;
- const items = sema.code.refSlice(extra_index, items_len);
- extra_index += items_len + info.body_len;
-
- try case_vals.ensureUnusedCapacity(gpa, items.len);
- for (items, 0..) |item_ref, item_i| {
- case_vals.appendAssumeCapacity(try sema.validateSwitchItemError(
- block,
- &seen_errors,
- item_ref,
- operand_ty,
- src_node_offset,
- .{ .multi = .{ .prong = multi_i, .item = @intCast(item_i) } },
- ));
- }
-
- try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
- }
- }
-
- switch (try sema.resolveInferredErrorSetTy(block, src, operand_ty.toIntern())) {
- .anyerror_type => {
- if (special_prong != .@"else") {
- return sema.fail(
- block,
- src,
- "else prong required when switching on type 'anyerror'",
- .{},
- );
- }
- else_error_ty = Type.anyerror;
- },
- else => |err_set_ty_index| else_validation: {
- const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names;
- var maybe_msg: ?*Module.ErrorMsg = null;
- errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
-
- for (error_names.get(ip)) |error_name| {
- if (!seen_errors.contains(error_name) and special_prong != .@"else") {
- const msg = maybe_msg orelse blk: {
- maybe_msg = try sema.errMsg(
- block,
- src,
- "switch must handle all possibilities",
- .{},
- );
- break :blk maybe_msg.?;
- };
-
- try sema.errNote(
- block,
- src,
- msg,
- "unhandled error value: 'error.{}'",
- .{error_name.fmt(ip)},
- );
- }
- }
-
- if (maybe_msg) |msg| {
- maybe_msg = null;
- try sema.addDeclaredHereNote(msg, operand_ty);
- return sema.failWithOwnedErrorMsg(block, msg);
- }
-
- if (special_prong == .@"else" and
- seen_errors.count() == error_names.len)
- {
- // In order to enable common patterns for generic code allow simple else bodies
- // else => unreachable,
- // else => return,
- // else => |e| return e,
- // even if all the possible errors were already handled.
- const tags = sema.code.instructions.items(.tag);
- for (special.body) |else_inst| switch (tags[@intFromEnum(else_inst)]) {
- .dbg_block_begin,
- .dbg_block_end,
- .dbg_stmt,
- .dbg_var_val,
- .ret_type,
- .as_node,
- .ret_node,
- .@"unreachable",
- .@"defer",
- .defer_err_code,
- .err_union_code,
- .ret_err_value_code,
- .restore_err_ret_index,
- .is_non_err,
- .ret_is_non_err,
- .condbr,
- => {},
- else => break,
- } else break :else_validation;
-
- return sema.fail(
- block,
- special_prong_src,
- "unreachable else prong; all cases already handled",
- .{},
- );
- }
-
- var names: InferredErrorSet.NameMap = .{};
- try names.ensureUnusedCapacity(sema.arena, error_names.len);
- for (error_names.get(ip)) |error_name| {
- if (seen_errors.contains(error_name)) continue;
-
- names.putAssumeCapacityNoClobber(error_name, {});
- }
- // No need to keep the hash map metadata correct; here we
- // extract the (sorted) keys only.
- else_error_ty = try mod.errorSetFromUnsortedNames(names.keys());
- },
- }
- },
+ .ErrorSet => else_error_ty = try validateErrSetSwitch(
+ sema,
+ block,
+ &seen_errors,
+ &case_vals,
+ operand_ty,
+ inst_data,
+ scalar_cases_len,
+ multi_cases_len,
+ .{ .body = special.body, .end = special.end, .src = special_prong_src },
+ special_prong == .@"else",
+ ),
.Int, .ComptimeInt => {
var extra_index: usize = special.end;
{
@@ -11848,114 +12006,19 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
defer merges.deinit(gpa);
if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| {
- const resolved_operand_val = try sema.resolveLazyValue(operand_val);
- var extra_index: usize = special.end;
- {
- var scalar_i: usize = 0;
- while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
- extra_index += 1;
- const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
- extra_index += 1;
- const body = sema.code.bodySlice(extra_index, info.body_len);
- extra_index += info.body_len;
-
- const item = case_vals.items[scalar_i];
- const item_val = sema.resolveConstDefinedValue(&child_block, .unneeded, item, undefined) catch unreachable;
- if (operand_val.eql(item_val, operand_ty, sema.mod)) {
- if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand);
- return spa.resolveProngComptime(
- &child_block,
- .normal,
- body,
- info.capture,
- .{ .scalar_capture = @intCast(scalar_i) },
- &.{item},
- if (info.is_inline) operand else .none,
- info.has_tag_capture,
- merges,
- );
- }
- }
- }
- {
- var multi_i: usize = 0;
- var case_val_idx: usize = scalar_cases_len;
- while (multi_i < multi_cases_len) : (multi_i += 1) {
- const items_len = sema.code.extra[extra_index];
- extra_index += 1;
- const ranges_len = sema.code.extra[extra_index];
- extra_index += 1;
- const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
- extra_index += 1 + items_len;
- const body = sema.code.bodySlice(extra_index + 2 * ranges_len, info.body_len);
-
- const items = case_vals.items[case_val_idx..][0..items_len];
- case_val_idx += items_len;
-
- for (items) |item| {
- // Validation above ensured these will succeed.
- const item_val = sema.resolveConstDefinedValue(&child_block, .unneeded, item, undefined) catch unreachable;
- if (operand_val.eql(item_val, operand_ty, sema.mod)) {
- if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand);
- return spa.resolveProngComptime(
- &child_block,
- .normal,
- body,
- info.capture,
- .{ .multi_capture = @intCast(multi_i) },
- items,
- if (info.is_inline) operand else .none,
- info.has_tag_capture,
- merges,
- );
- }
- }
-
- var range_i: usize = 0;
- while (range_i < ranges_len) : (range_i += 1) {
- const range_items = case_vals.items[case_val_idx..][0..2];
- extra_index += 2;
- case_val_idx += 2;
-
- // Validation above ensured these will succeed.
- const first_val = sema.resolveConstDefinedValue(&child_block, .unneeded, range_items[0], undefined) catch unreachable;
- const last_val = sema.resolveConstDefinedValue(&child_block, .unneeded, range_items[1], undefined) catch unreachable;
- if ((try sema.compareAll(resolved_operand_val, .gte, first_val, operand_ty)) and
- (try sema.compareAll(resolved_operand_val, .lte, last_val, operand_ty)))
- {
- if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand);
- return spa.resolveProngComptime(
- &child_block,
- .normal,
- body,
- info.capture,
- .{ .multi_capture = @intCast(multi_i) },
- undefined, // case_vals may be undefined for ranges
- if (info.is_inline) operand else .none,
- info.has_tag_capture,
- merges,
- );
- }
- }
-
- extra_index += info.body_len;
- }
- }
- if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, special.body, operand);
- if (empty_enum) {
- return .void_value;
- }
-
- return spa.resolveProngComptime(
+ return resolveSwitchComptime(
+ sema,
+ spa,
&child_block,
- .special,
- special.body,
- special.capture,
- .special_capture,
- undefined, // case_vals may be undefined for special prongs
- if (special.is_inline) operand else .none,
- special.has_tag_capture,
- merges,
+ operand,
+ operand_val,
+ operand_ty,
+ special,
+ case_vals,
+ scalar_cases_len,
+ multi_cases_len,
+ err_set,
+ empty_enum,
);
}
@@ -11966,7 +12029,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
if (special_prong == .none) {
return sema.fail(block, src, "switch must handle all possibilities", .{});
}
- if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand, operand_src)) {
+ if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand, operand_src, false)) {
return .unreachable_value;
}
if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and
@@ -11998,6 +12061,73 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
unreachable;
}
+ _ = try sema.analyzeSwitchRuntimeBlock(
+ spa,
+ &child_block,
+ src,
+ operand,
+ operand_ty,
+ operand_src,
+ case_vals,
+ special,
+ scalar_cases_len,
+ multi_cases_len,
+ union_originally,
+ maybe_union_ty,
+ err_set,
+ src_node_offset,
+ special_prong_src,
+ seen_enum_fields,
+ seen_errors,
+ range_set,
+ true_count,
+ false_count,
+ cond_dbg_node_index,
+ false,
+ );
+
+ return sema.analyzeBlockBody(block, src, &child_block, merges);
+}
+
+const SpecialProng = struct {
+ body: []const Zir.Inst.Index,
+ end: usize,
+ capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
+ is_inline: bool,
+ has_tag_capture: bool,
+};
+
+fn analyzeSwitchRuntimeBlock(
+ sema: *Sema,
+ spa: SwitchProngAnalysis,
+ child_block: *Block,
+ src: LazySrcLoc,
+ operand: Air.Inst.Ref,
+ operand_ty: Type,
+ operand_src: LazySrcLoc,
+ case_vals: std.ArrayListUnmanaged(Air.Inst.Ref),
+ special: SpecialProng,
+ scalar_cases_len: usize,
+ multi_cases_len: usize,
+ union_originally: bool,
+ maybe_union_ty: Type,
+ err_set: bool,
+ src_node_offset: i32,
+ special_prong_src: LazySrcLoc,
+ seen_enum_fields: []?Module.SwitchProngSrc,
+ seen_errors: SwitchErrorSet,
+ range_set: RangeSet,
+ true_count: u8,
+ false_count: u8,
+ cond_dbg_node_index: Zir.Inst.Index,
+ allow_err_code_unwrap: bool,
+) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
+ const gpa = sema.gpa;
+ const ip = &mod.intern_pool;
+
+ const block = child_block.parent.?;
+
const estimated_cases_extra = (scalar_cases_len + multi_cases_len) *
@typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2;
var cases_extra = try std.ArrayListUnmanaged(u32).initCapacity(gpa, estimated_cases_extra);
@@ -12032,7 +12162,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
break :blk field_ty.zigTypeTag(mod) != .NoReturn;
} else true;
- if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src)) {
+ if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) {
// nothing to do here
} else if (analyze_body) {
try spa.analyzeProngRuntime(
@@ -12216,7 +12346,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
const body = sema.code.bodySlice(extra_index, info.body_len);
extra_index += info.body_len;
- if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src)) {
+ if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) {
// nothing to do here
} else if (analyze_body) {
try spa.analyzeProngRuntime(
@@ -12300,7 +12430,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
const body = sema.code.bodySlice(extra_index, info.body_len);
extra_index += info.body_len;
- if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src)) {
+ if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) {
// nothing to do here
} else {
try spa.analyzeProngRuntime(
@@ -12543,7 +12673,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
else
true;
if (special.body.len != 0 and err_set and
- try sema.maybeErrorUnwrap(&case_block, special.body, operand, operand_src))
+ try sema.maybeErrorUnwrap(&case_block, special.body, operand, operand_src, allow_err_code_unwrap))
{
// nothing to do here
} else if (special.body.len != 0 and analyze_body and !special.is_inline) {
@@ -12588,17 +12718,147 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len +
cases_extra.items.len + final_else_body.len);
- _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{
- .operand = operand,
- .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{
- .cases_len = @intCast(cases_len),
- .else_body_len = @intCast(final_else_body.len),
- }),
- } } });
+ const payload_index = sema.addExtraAssumeCapacity(Air.SwitchBr{
+ .cases_len = @intCast(cases_len),
+ .else_body_len = @intCast(final_else_body.len),
+ });
+
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(cases_extra.items));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(final_else_body));
- return sema.analyzeBlockBody(block, src, &child_block, merges);
+ return try child_block.addInst(.{
+ .tag = .switch_br,
+ .data = .{ .pl_op = .{
+ .operand = operand,
+ .payload = payload_index,
+ } },
+ });
+}
+
+fn resolveSwitchComptime(
+ sema: *Sema,
+ spa: SwitchProngAnalysis,
+ child_block: *Block,
+ cond_operand: Air.Inst.Ref,
+ operand_val: Value,
+ operand_ty: Type,
+ special: SpecialProng,
+ case_vals: std.ArrayListUnmanaged(Air.Inst.Ref),
+ scalar_cases_len: u32,
+ multi_cases_len: u32,
+ err_set: bool,
+ empty_enum: bool,
+) CompileError!Air.Inst.Ref {
+ const merges = &child_block.label.?.merges;
+ const resolved_operand_val = try sema.resolveLazyValue(operand_val);
+ var extra_index: usize = special.end;
+ {
+ var scalar_i: usize = 0;
+ while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
+ extra_index += 1;
+ const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
+ extra_index += 1;
+ const body = sema.code.bodySlice(extra_index, info.body_len);
+ extra_index += info.body_len;
+
+ const item = case_vals.items[scalar_i];
+ const item_val = sema.resolveConstDefinedValue(child_block, .unneeded, item, undefined) catch unreachable;
+ if (operand_val.eql(item_val, operand_ty, sema.mod)) {
+ if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand);
+ return spa.resolveProngComptime(
+ child_block,
+ .normal,
+ body,
+ info.capture,
+ .{ .scalar_capture = @intCast(scalar_i) },
+ &.{item},
+ if (info.is_inline) cond_operand else .none,
+ info.has_tag_capture,
+ merges,
+ );
+ }
+ }
+ }
+ {
+ var multi_i: usize = 0;
+ var case_val_idx: usize = scalar_cases_len;
+ while (multi_i < multi_cases_len) : (multi_i += 1) {
+ const items_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ const ranges_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
+ extra_index += 1 + items_len;
+ const body = sema.code.bodySlice(extra_index + 2 * ranges_len, info.body_len);
+
+ const items = case_vals.items[case_val_idx..][0..items_len];
+ case_val_idx += items_len;
+
+ for (items) |item| {
+ // Validation above ensured these will succeed.
+ const item_val = sema.resolveConstDefinedValue(child_block, .unneeded, item, undefined) catch unreachable;
+ if (operand_val.eql(item_val, operand_ty, sema.mod)) {
+ if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand);
+ return spa.resolveProngComptime(
+ child_block,
+ .normal,
+ body,
+ info.capture,
+ .{ .multi_capture = @intCast(multi_i) },
+ items,
+ if (info.is_inline) cond_operand else .none,
+ info.has_tag_capture,
+ merges,
+ );
+ }
+ }
+
+ var range_i: usize = 0;
+ while (range_i < ranges_len) : (range_i += 1) {
+ const range_items = case_vals.items[case_val_idx..][0..2];
+ extra_index += 2;
+ case_val_idx += 2;
+
+ // Validation above ensured these will succeed.
+ const first_val = sema.resolveConstDefinedValue(child_block, .unneeded, range_items[0], undefined) catch unreachable;
+ const last_val = sema.resolveConstDefinedValue(child_block, .unneeded, range_items[1], undefined) catch unreachable;
+ if ((try sema.compareAll(resolved_operand_val, .gte, first_val, operand_ty)) and
+ (try sema.compareAll(resolved_operand_val, .lte, last_val, operand_ty)))
+ {
+ if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand);
+ return spa.resolveProngComptime(
+ child_block,
+ .normal,
+ body,
+ info.capture,
+ .{ .multi_capture = @intCast(multi_i) },
+ undefined, // case_vals may be undefined for ranges
+ if (info.is_inline) cond_operand else .none,
+ info.has_tag_capture,
+ merges,
+ );
+ }
+ }
+
+ extra_index += info.body_len;
+ }
+ }
+ if (err_set) try sema.maybeErrorUnwrapComptime(child_block, special.body, cond_operand);
+ if (empty_enum) {
+ return .void_value;
+ }
+
+ return spa.resolveProngComptime(
+ child_block,
+ .special,
+ special.body,
+ special.capture,
+ .special_capture,
+ undefined, // case_vals may be undefined for special prongs
+ if (special.is_inline) cond_operand else .none,
+ special.has_tag_capture,
+ merges,
+ );
}
const RangeSetUnhandledIterator = struct {
@@ -12718,6 +12978,168 @@ fn resolveSwitchItemVal(
return .{ .ref = new_item, .val = val.toIntern() };
}
+fn validateErrSetSwitch(
+ sema: *Sema,
+ block: *Block,
+ seen_errors: *SwitchErrorSet,
+ case_vals: *std.ArrayListUnmanaged(Air.Inst.Ref),
+ operand_ty: Type,
+ inst_data: std.meta.FieldType(Zir.Inst.Data, .pl_node),
+ scalar_cases_len: u32,
+ multi_cases_len: u32,
+ else_case: struct { body: []const Zir.Inst.Index, end: usize, src: LazySrcLoc },
+ has_else: bool,
+) CompileError!?Type {
+ const gpa = sema.gpa;
+ const mod = sema.mod;
+ const ip = &mod.intern_pool;
+
+ const src_node_offset = inst_data.src_node;
+ const src = inst_data.src();
+
+ var extra_index: usize = else_case.end;
+ {
+ var scalar_i: u32 = 0;
+ while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
+ const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
+ extra_index += 1;
+ const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
+ extra_index += 1 + info.body_len;
+
+ case_vals.appendAssumeCapacity(try sema.validateSwitchItemError(
+ block,
+ seen_errors,
+ item_ref,
+ operand_ty,
+ src_node_offset,
+ .{ .scalar = scalar_i },
+ ));
+ }
+ }
+ {
+ var multi_i: u32 = 0;
+ while (multi_i < multi_cases_len) : (multi_i += 1) {
+ const items_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ const ranges_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
+ extra_index += 1;
+ const items = sema.code.refSlice(extra_index, items_len);
+ extra_index += items_len + info.body_len;
+
+ try case_vals.ensureUnusedCapacity(gpa, items.len);
+ for (items, 0..) |item_ref, item_i| {
+ case_vals.appendAssumeCapacity(try sema.validateSwitchItemError(
+ block,
+ seen_errors,
+ item_ref,
+ operand_ty,
+ src_node_offset,
+ .{ .multi = .{ .prong = multi_i, .item = @intCast(item_i) } },
+ ));
+ }
+
+ try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
+ }
+ }
+
+ switch (try sema.resolveInferredErrorSetTy(block, src, operand_ty.toIntern())) {
+ .anyerror_type => {
+ if (!has_else) {
+ return sema.fail(
+ block,
+ src,
+ "else prong required when switching on type 'anyerror'",
+ .{},
+ );
+ }
+ return Type.anyerror;
+ },
+ else => |err_set_ty_index| else_validation: {
+ const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names;
+ var maybe_msg: ?*Module.ErrorMsg = null;
+ errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
+
+ for (error_names.get(ip)) |error_name| {
+ if (!seen_errors.contains(error_name) and !has_else) {
+ const msg = maybe_msg orelse blk: {
+ maybe_msg = try sema.errMsg(
+ block,
+ src,
+ "switch must handle all possibilities",
+ .{},
+ );
+ break :blk maybe_msg.?;
+ };
+
+ try sema.errNote(
+ block,
+ src,
+ msg,
+ "unhandled error value: 'error.{}'",
+ .{error_name.fmt(ip)},
+ );
+ }
+ }
+
+ if (maybe_msg) |msg| {
+ maybe_msg = null;
+ try sema.addDeclaredHereNote(msg, operand_ty);
+ return sema.failWithOwnedErrorMsg(block, msg);
+ }
+
+ if (has_else and seen_errors.count() == error_names.len) {
+ // In order to enable common patterns for generic code allow simple else bodies
+ // else => unreachable,
+ // else => return,
+ // else => |e| return e,
+ // even if all the possible errors were already handled.
+ const tags = sema.code.instructions.items(.tag);
+ for (else_case.body) |else_inst| switch (tags[@intFromEnum(else_inst)]) {
+ .dbg_block_begin,
+ .dbg_block_end,
+ .dbg_stmt,
+ .dbg_var_val,
+ .ret_type,
+ .as_node,
+ .ret_node,
+ .@"unreachable",
+ .@"defer",
+ .defer_err_code,
+ .err_union_code,
+ .ret_err_value_code,
+ .restore_err_ret_index,
+ .is_non_err,
+ .ret_is_non_err,
+ .condbr,
+ => {},
+ else => break,
+ } else break :else_validation;
+
+ return sema.fail(
+ block,
+ else_case.src,
+ "unreachable else prong; all cases already handled",
+ .{},
+ );
+ }
+
+ var names: InferredErrorSet.NameMap = .{};
+ try names.ensureUnusedCapacity(sema.arena, error_names.len);
+ for (error_names.get(ip)) |error_name| {
+ if (seen_errors.contains(error_name)) continue;
+
+ names.putAssumeCapacityNoClobber(error_name, {});
+ }
+ // No need to keep the hash map metadata correct; here we
+ // extract the (sorted) keys only.
+ return try mod.errorSetFromUnsortedNames(names.keys());
+ },
+ }
+ return null;
+}
+
fn validateSwitchRange(
sema: *Sema,
block: *Block,
@@ -12905,7 +13327,14 @@ fn validateSwitchNoRange(
return sema.failWithOwnedErrorMsg(block, msg);
}
-fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref, operand_src: LazySrcLoc) !bool {
+fn maybeErrorUnwrap(
+ sema: *Sema,
+ block: *Block,
+ body: []const Zir.Inst.Index,
+ operand: Air.Inst.Ref,
+ operand_src: LazySrcLoc,
+ allow_err_code_inst: bool,
+) !bool {
const mod = sema.mod;
if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false;
@@ -12913,6 +13342,7 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op
for (body) |inst| {
switch (tags[@intFromEnum(inst)]) {
.@"unreachable" => if (!block.wantSafety()) return false,
+ .err_union_code => if (!allow_err_code_inst) return false,
.save_err_ret_index,
.dbg_block_begin,
.dbg_block_end,
@@ -12930,6 +13360,7 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op
const air_inst = switch (tags[@intFromEnum(inst)]) {
.dbg_block_begin,
.dbg_block_end,
+ .err_union_code,
=> continue,
.dbg_stmt => {
try sema.zirDbgStmt(block, inst);
@@ -18393,7 +18824,7 @@ fn zirCondbr(
break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand);
};
- if (err_cond != null and try sema.maybeErrorUnwrap(&sub_block, else_body, err_cond.?, cond_src)) {
+ if (err_cond != null and try sema.maybeErrorUnwrap(&sub_block, else_body, err_cond.?, cond_src, false)) {
// nothing to do
} else {
try sema.analyzeBodyRuntimeBreak(&sub_block, else_body);
diff --git a/src/Zir.zig b/src/Zir.zig
index 1ecd8ff484f7..3737467e47be 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -100,6 +100,7 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) ExtraData(T) {
Inst.Call.Flags,
Inst.BuiltinCall.Flags,
Inst.SwitchBlock.Bits,
+ Inst.SwitchBlockErrUnion.Bits,
Inst.FuncFancy.Bits,
=> @bitCast(code.extra[i]),
@@ -277,9 +278,6 @@ pub const Inst = struct {
/// Create a `anyframe->T` type.
/// Uses the `un_node` field.
anyframe_type,
- /// Type coercion. No source location attached.
- /// Uses the `bin` field.
- as,
/// Type coercion to the function's return type.
/// Uses the `pl_node` field. Payload is `As`. AST node could be many things.
as_node,
@@ -688,6 +686,9 @@ pub const Inst = struct {
/// A switch expression. Uses the `pl_node` union field.
/// AST node is the switch, payload is `SwitchBlock`. Operand is a pointer.
switch_block_ref,
+ /// A switch on an error union `a catch |err| switch (err) {...}`.
+ /// Uses the `pl_node` union field. AST node is the `catch`, payload is `SwitchBlockErrUnion`.
+ switch_block_err_union,
/// Check that operand type supports the dereference operand (.*).
/// Uses the `un_node` field.
validate_deref,
@@ -1083,7 +1084,6 @@ pub const Inst = struct {
.vector_elem_type,
.indexable_ptr_len,
.anyframe_type,
- .as,
.as_node,
.as_shift_operand,
.bit_and,
@@ -1190,6 +1190,7 @@ pub const Inst = struct {
.set_eval_branch_quota,
.switch_block,
.switch_block_ref,
+ .switch_block_err_union,
.validate_deref,
.validate_destructure,
.union_init,
@@ -1396,7 +1397,6 @@ pub const Inst = struct {
.vector_elem_type,
.indexable_ptr_len,
.anyframe_type,
- .as,
.as_node,
.as_shift_operand,
.bit_and,
@@ -1488,6 +1488,7 @@ pub const Inst = struct {
.typeof_log2_int_type,
.switch_block,
.switch_block_ref,
+ .switch_block_err_union,
.union_init,
.field_type_ref,
.enum_from_int,
@@ -1629,7 +1630,6 @@ pub const Inst = struct {
.vector_elem_type = .un_node,
.indexable_ptr_len = .un_node,
.anyframe_type = .un_node,
- .as = .bin,
.as_node = .pl_node,
.as_shift_operand = .pl_node,
.bit_and = .pl_node,
@@ -1741,6 +1741,7 @@ pub const Inst = struct {
.enum_literal = .str_tok,
.switch_block = .pl_node,
.switch_block_ref = .pl_node,
+ .switch_block_err_union = .pl_node,
.validate_deref = .un_node,
.validate_destructure = .pl_node,
.field_type_ref = .pl_node,
@@ -2782,6 +2783,29 @@ pub const Inst = struct {
index: u32,
};
+ pub const SwitchBlockErrUnion = struct {
+ operand: Ref,
+ bits: Bits,
+ main_src_node_offset: i32,
+
+ pub const Bits = packed struct(u32) {
+ /// If true, one or more prongs have multiple items.
+ has_multi_cases: bool,
+ /// If true, there is an else prong. This is mutually exclusive with `has_under`.
+ has_else: bool,
+ any_uses_err_capture: bool,
+ payload_is_ref: bool,
+ scalar_cases_len: ScalarCasesLen,
+
+ pub const ScalarCasesLen = u28;
+ };
+
+ pub const MultiProng = struct {
+ items: []const Ref,
+ body: []const Index,
+ };
+ };
+
/// 0. multi_cases_len: u32 // If has_multi_cases is set.
/// 1. tag_capture_inst: u32 // If any_has_tag_capture is set. Index of instruction prongs use to refer to the inline tag capture.
/// 2. else_body { // If has_else or has_under is set.
@@ -2830,7 +2854,7 @@ pub const Inst = struct {
};
};
- pub const Bits = packed struct {
+ pub const Bits = packed struct(u32) {
/// If true, one or more prongs have multiple items.
has_multi_cases: bool,
/// If true, there is an else prong. This is mutually exclusive with `has_under`.
diff --git a/src/print_zir.zig b/src/print_zir.zig
index efa0775291ae..a6e3ca91a89d 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -199,7 +199,6 @@ const Writer = struct {
const tag = tags[@intFromEnum(inst)];
try stream.print("= {s}(", .{@tagName(tags[@intFromEnum(inst)])});
switch (tag) {
- .as,
.store,
.store_to_inferred_ptr,
=> try self.writeBin(stream, inst),
@@ -465,6 +464,8 @@ const Writer = struct {
.switch_block_ref,
=> try self.writeSwitchBlock(stream, inst),
+ .switch_block_err_union => try self.writeSwitchBlockErrUnion(stream, inst),
+
.field_val,
.field_ptr,
=> try self.writePlNodeField(stream, inst),
@@ -2027,6 +2028,143 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
+ fn writeSwitchBlockErrUnion(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
+ const extra = self.code.extraData(Zir.Inst.SwitchBlockErrUnion, inst_data.payload_index);
+
+ var extra_index: usize = extra.end;
+
+ const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
+ const multi_cases_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk multi_cases_len;
+ } else 0;
+
+ const err_capture_inst: Zir.Inst.Index = if (extra.data.bits.any_uses_err_capture) blk: {
+ const tag_capture_inst = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk @enumFromInt(tag_capture_inst);
+ } else undefined;
+
+ try self.writeInstRef(stream, extra.data.operand);
+
+ if (extra.data.bits.any_uses_err_capture) {
+ try stream.writeAll(", err_capture=");
+ try self.writeInstIndex(stream, err_capture_inst);
+ }
+
+ self.indent += 2;
+
+ {
+ const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index]));
+ extra_index += 1;
+
+ assert(!info.is_inline);
+ const body = self.code.bodySlice(extra_index, info.body_len);
+ extra_index += body.len;
+
+ try stream.writeAll(",\n");
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.writeAll("non_err => ");
+ try self.writeBracedBody(stream, body);
+ }
+
+ if (extra.data.bits.has_else) {
+ const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index]));
+ extra_index += 1;
+ const capture_text = switch (info.capture) {
+ .none => "",
+ .by_val => "by_val ",
+ .by_ref => "by_ref ",
+ };
+ const inline_text = if (info.is_inline) "inline " else "";
+ const body = self.code.bodySlice(extra_index, info.body_len);
+ extra_index += body.len;
+
+ try stream.writeAll(",\n");
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.print("{s}{s}else => ", .{ capture_text, inline_text });
+ try self.writeBracedBody(stream, body);
+ }
+
+ {
+ const scalar_cases_len = extra.data.bits.scalar_cases_len;
+ var scalar_i: usize = 0;
+ while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
+ const item_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
+ extra_index += 1;
+ const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index]));
+ extra_index += 1;
+ const body = self.code.bodySlice(extra_index, info.body_len);
+ extra_index += info.body_len;
+
+ try stream.writeAll(",\n");
+ try stream.writeByteNTimes(' ', self.indent);
+ switch (info.capture) {
+ .none => {},
+ .by_val => try stream.writeAll("by_val "),
+ .by_ref => try stream.writeAll("by_ref "),
+ }
+ if (info.is_inline) try stream.writeAll("inline ");
+ try self.writeInstRef(stream, item_ref);
+ try stream.writeAll(" => ");
+ try self.writeBracedBody(stream, body);
+ }
+ }
+ {
+ var multi_i: usize = 0;
+ while (multi_i < multi_cases_len) : (multi_i += 1) {
+ const items_len = self.code.extra[extra_index];
+ extra_index += 1;
+ const ranges_len = self.code.extra[extra_index];
+ extra_index += 1;
+ const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index]));
+ extra_index += 1;
+ const items = self.code.refSlice(extra_index, items_len);
+ extra_index += items_len;
+
+ try stream.writeAll(",\n");
+ try stream.writeByteNTimes(' ', self.indent);
+ switch (info.capture) {
+ .none => {},
+ .by_val => try stream.writeAll("by_val "),
+ .by_ref => try stream.writeAll("by_ref "),
+ }
+ if (info.is_inline) try stream.writeAll("inline ");
+
+ for (items, 0..) |item_ref, item_i| {
+ if (item_i != 0) try stream.writeAll(", ");
+ try self.writeInstRef(stream, item_ref);
+ }
+
+ var range_i: usize = 0;
+ while (range_i < ranges_len) : (range_i += 1) {
+ const item_first = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
+ extra_index += 1;
+ const item_last = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
+ extra_index += 1;
+
+ if (range_i != 0 or items.len != 0) {
+ try stream.writeAll(", ");
+ }
+ try self.writeInstRef(stream, item_first);
+ try stream.writeAll("...");
+ try self.writeInstRef(stream, item_last);
+ }
+
+ const body = self.code.bodySlice(extra_index, info.body_len);
+ extra_index += info.body_len;
+ try stream.writeAll(" => ");
+ try self.writeBracedBody(stream, body);
+ }
+ }
+
+ self.indent -= 2;
+
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
fn writeSwitchBlock(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = self.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
diff --git a/test/behavior/switch_on_captured_error.zig b/test/behavior/switch_on_captured_error.zig
new file mode 100644
index 000000000000..b6b422ba93cd
--- /dev/null
+++ b/test/behavior/switch_on_captured_error.zig
@@ -0,0 +1,750 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const expect = std.testing.expect;
+const expectError = std.testing.expectError;
+const expectEqual = std.testing.expectEqual;
+
+test "switch on error union catch capture" {
+ const S = struct {
+ const Error = error{ A, B, C };
+ fn doTheTest() !void {
+ try testScalar();
+ try testMulti();
+ try testElse();
+ try testCapture();
+ try testInline();
+ try testEmptyErrSet();
+ }
+
+ fn testScalar() !void {
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ error.B => 1,
+ error.C => 2,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ error.B => @intFromError(err) + 4,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ error.B => @intFromError(err) + 4,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ }
+
+ fn testMulti() !void {
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A, error.B => 0,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A, error.B => 0,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testElse() !void {
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ else => 1,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 1,
+ else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 1), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ else => 1,
+ };
+ try expectEqual(@as(u64, 1), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testCapture() !void {
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => |e| @intFromError(e) + 4,
+ else => 0,
+ };
+ try expectEqual(@as(u64, @intFromError(error.A) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => |e| @intFromError(e) + 4,
+ else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testInline() !void {
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ inline else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => |e| @intFromError(e) + 4,
+ inline else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ inline else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.A => 0,
+ inline error.B, error.C => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testEmptyErrSet() !void {
+ {
+ var a: error{}!u64 = 0;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ else => |e| return e,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: error{}!u64 = 0;
+ _ = &a;
+ const b: u64 = a catch |err| switch (err) {
+ error.UnknownError => return error.Fail,
+ else => |e| return e,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ }
+ };
+
+ try comptime S.doTheTest();
+ try S.doTheTest();
+}
+
+test "switch on error union if else capture" {
+ const S = struct {
+ const Error = error{ A, B, C };
+ fn doTheTest() !void {
+ try testScalar();
+ try testScalarPtr();
+ try testMulti();
+ try testMultiPtr();
+ try testElse();
+ try testElsePtr();
+ try testCapture();
+ try testCapturePtr();
+ try testInline();
+ try testInlinePtr();
+ try testEmptyErrSet();
+ try testEmptyErrSetPtr();
+ }
+
+ fn testScalar() !void {
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ error.B => 1,
+ error.C => 2,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ error.B => @intFromError(err) + 4,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ error.B => @intFromError(err) + 4,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ }
+
+ fn testScalarPtr() !void {
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ error.B => 1,
+ error.C => 2,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ error.B => @intFromError(err) + 4,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ error.B => @intFromError(err) + 4,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ }
+
+ fn testMulti() !void {
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A, error.B => 0,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A, error.B => 0,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testMultiPtr() !void {
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A, error.B => 0,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A, error.B => 0,
+ error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testElse() !void {
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ else => 1,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 1,
+ else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 1), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ else => 1,
+ };
+ try expectEqual(@as(u64, 1), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testElsePtr() !void {
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ else => 1,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = 3;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 3), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 1,
+ else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, 1), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ else => 1,
+ };
+ try expectEqual(@as(u64, 1), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testCapture() !void {
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => |e| @intFromError(e) + 4,
+ else => 0,
+ };
+ try expectEqual(@as(u64, @intFromError(error.A) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => |e| @intFromError(e) + 4,
+ else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testCapturePtr() !void {
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => |e| @intFromError(e) + 4,
+ else => 0,
+ };
+ try expectEqual(@as(u64, @intFromError(error.A) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.A;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => |e| @intFromError(e) + 4,
+ else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ error.B, error.C => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testInline() !void {
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ inline else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => |e| @intFromError(e) + 4,
+ inline else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ inline else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.A => 0,
+ inline error.B, error.C => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testInlinePtr() !void {
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ inline else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => |e| @intFromError(e) + 4,
+ inline else => @intFromError(err) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ inline else => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ {
+ var a: Error!u64 = error.B;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.A => 0,
+ inline error.B, error.C => |e| @intFromError(e) + 4,
+ };
+ try expectEqual(@as(u64, @intFromError(error.B) + 4), b);
+ }
+ }
+
+ fn testEmptyErrSet() !void {
+ {
+ var a: error{}!u64 = 0;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ else => |e| return e,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: error{}!u64 = 0;
+ _ = &a;
+ const b: u64 = if (a) |x| x else |err| switch (err) {
+ error.UnknownError => return error.Fail,
+ else => |e| return e,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ }
+
+ fn testEmptyErrSetPtr() !void {
+ {
+ var a: error{}!u64 = 0;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ else => |e| return e,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ {
+ var a: error{}!u64 = 0;
+ _ = &a;
+ const b: u64 = if (a) |*x| x.* else |err| switch (err) {
+ error.UnknownError => return error.Fail,
+ else => |e| return e,
+ };
+ try expectEqual(@as(u64, 0), b);
+ }
+ }
+ };
+
+ try comptime S.doTheTest();
+ try S.doTheTest();
+}
diff --git a/test/cases/compile_errors/switch_expression-duplicate_error_prong.zig b/test/cases/compile_errors/switch_expression-duplicate_error_prong.zig
new file mode 100644
index 000000000000..bcebd25a5849
--- /dev/null
+++ b/test/cases/compile_errors/switch_expression-duplicate_error_prong.zig
@@ -0,0 +1,33 @@
+fn f(n: Error!i32) i32 {
+ if (n) |x|
+ _ = x
+ else |e| switch (e) {
+ error.Foo => 1,
+ error.Bar => 2,
+ error.Baz => 3,
+ error.Foo => 2,
+ }
+}
+fn g(n: Error!i32) i32 {
+ n catch |e| switch (e) {
+ error.Foo => 1,
+ error.Bar => 2,
+ error.Baz => 3,
+ error.Foo => 2,
+ };
+}
+
+const Error = error{ Foo, Bar, Baz };
+
+export fn entry() usize {
+ return @sizeOf(@TypeOf(&f)) + @sizeOf(@TypeOf(&g));
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :8:9: error: duplicate switch value
+// :5:9: note: previous value here
+// :16:9: error: duplicate switch value
+// :13:9: note: previous value here
diff --git a/test/cases/compile_errors/switch_expression-duplicate_error_prong_when_else_present.zig b/test/cases/compile_errors/switch_expression-duplicate_error_prong_when_else_present.zig
new file mode 100644
index 000000000000..ca156183e056
--- /dev/null
+++ b/test/cases/compile_errors/switch_expression-duplicate_error_prong_when_else_present.zig
@@ -0,0 +1,35 @@
+fn f(n: Error!i32) i32 {
+ if (n) |x|
+ _ = x
+ else |e| switch (e) {
+ error.Foo => 1,
+ error.Bar => 2,
+ error.Baz => 3,
+ error.Foo => 2,
+ else => 10,
+ }
+}
+fn g(n: Error!i32) i32 {
+ n catch |e| switch (e) {
+ error.Foo => 1,
+ error.Bar => 2,
+ error.Baz => 3,
+ error.Foo => 2,
+ else => 10,
+ };
+}
+
+const Error = error{ Foo, Bar, Baz };
+
+export fn entry() usize {
+ return @sizeOf(@TypeOf(&f)) + @sizeOf(@TypeOf(&g));
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :8:9: error: duplicate switch value
+// :5:9: note: previous value here
+// :17:9: error: duplicate switch value
+// :14:9: note: previous value here
diff --git a/test/cases/compile_errors/switch_expression-missing_error_prong.zig b/test/cases/compile_errors/switch_expression-missing_error_prong.zig
new file mode 100644
index 000000000000..ee28057c4362
--- /dev/null
+++ b/test/cases/compile_errors/switch_expression-missing_error_prong.zig
@@ -0,0 +1,33 @@
+const Error = error {
+ One,
+ Two,
+ Three,
+ Four,
+};
+fn f(n: Error!i32) i32 {
+ if (n) |x| x else |e| switch (e) {
+ error.One => 1,
+ error.Two => 2,
+ error.Three => 3,
+ }
+}
+fn h(n: Error!i32) i32 {
+ n catch |e| switch (e) {
+ error.One => 1,
+ error.Two => 2,
+ error.Three => 3,
+ };
+}
+
+export fn entry() usize {
+ return @sizeOf(@TypeOf(&f)) + @sizeOf(@TypeOf(&h));
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :8:27: error: switch must handle all possibilities
+// :8:27: note: unhandled error value: 'error.Four'
+// :15:17: error: switch must handle all possibilities
+// :15:17: note: unhandled error value: 'error.Four'
diff --git a/test/cases/compile_errors/switch_expression-multiple_else_prongs.zig b/test/cases/compile_errors/switch_expression-multiple_else_prongs.zig
index f4cdb3b125c4..a6bb48db1740 100644
--- a/test/cases/compile_errors/switch_expression-multiple_else_prongs.zig
+++ b/test/cases/compile_errors/switch_expression-multiple_else_prongs.zig
@@ -5,8 +5,24 @@ fn f(x: u32) void {
else => true,
};
}
+fn g(x: error{Foo, Bar, Baz}!u32) void {
+ const value: bool = if (x) |_| true else |e| switch (e) {
+ error.Foo => false,
+ else => true,
+ else => true,
+ };
+}
+fn h(x: error{Foo, Bar, Baz}!u32) void {
+ const value: u32 = x catch |e| switch (e) {
+ error.Foo => 1,
+ else => 2,
+ else => 3,
+ };
+}
export fn entry() void {
f(1234);
+ g(1234);
+ h(1234);
}
// error
@@ -15,3 +31,7 @@ export fn entry() void {
//
// :5:9: error: multiple else prongs in switch expression
// :4:9: note: previous else prong here
+// :12:9: error: multiple else prongs in switch expression
+// :11:9: note: previous else prong here
+// :19:9: error: multiple else prongs in switch expression
+// :18:9: note: previous else prong here
diff --git a/test/cases/compile_errors/switch_expression-unreachable_else_prong_error.zig b/test/cases/compile_errors/switch_expression-unreachable_else_prong_error.zig
new file mode 100644
index 000000000000..520723d9278a
--- /dev/null
+++ b/test/cases/compile_errors/switch_expression-unreachable_else_prong_error.zig
@@ -0,0 +1,32 @@
+fn foo(x: u2) void {
+ const y: Error!u2 = x;
+ if (y) |_| {} else |e| switch (e) {
+ error.Foo => {},
+ error.Bar => {},
+ error.Baz => {},
+ else => {},
+ }
+}
+
+fn bar(x: u2) void {
+ const y: Error!u2 = x;
+ y catch |e| switch (e) {
+ error.Foo => {},
+ error.Bar => {},
+ error.Baz => {},
+ else => {},
+ };
+}
+
+const Error = error{ Foo, Bar, Baz };
+
+export fn entry() usize {
+ return @sizeOf(@TypeOf(&foo)) + @sizeOf(@TypeOf(&bar));
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :7:14: error: unreachable else prong; all cases already handled
+// :17:14: error: unreachable else prong; all cases already handled
diff --git a/test/cases/compile_errors/switch_on_error_union_discard.zig b/test/cases/compile_errors/switch_on_error_union_discard.zig
new file mode 100644
index 000000000000..34a607cfef2e
--- /dev/null
+++ b/test/cases/compile_errors/switch_on_error_union_discard.zig
@@ -0,0 +1,12 @@
+export fn entry() void {
+ const x: error{}!u32 = 0;
+ if (x) |v| v else |_| switch (_) {
+ }
+}
+
+
+// error
+// backend=stage2
+// target=native
+//
+// :3:24: error: discard of error capture; omit it instead
diff --git a/test/cases/compile_errors/switch_on_error_with_1_field_with_no_prongs.zig b/test/cases/compile_errors/switch_on_error_with_1_field_with_no_prongs.zig
new file mode 100644
index 000000000000..504df0374f69
--- /dev/null
+++ b/test/cases/compile_errors/switch_on_error_with_1_field_with_no_prongs.zig
@@ -0,0 +1,20 @@
+const Error = error{M};
+
+export fn entry() void {
+ const f: Error!void = void{};
+ if (f) {} else |e| switch (e) {}
+}
+
+export fn entry2() void {
+ const f: Error!void = void{};
+ f catch |e| switch (e) {};
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :5:24: error: switch must handle all possibilities
+// :5:24: note: unhandled error value: 'error.M'
+// :10:17: error: switch must handle all possibilities
+// :10:17: note: unhandled error value: 'error.M'
diff --git a/test/cases/inherit_want_safety.zig b/test/cases/inherit_want_safety.zig
index 6a70e603a67a..a0c79952b8d1 100644
--- a/test/cases/inherit_want_safety.zig
+++ b/test/cases/inherit_want_safety.zig
@@ -23,6 +23,13 @@ pub export fn entry() usize {
u += 1;
},
}
+ if (@as(error{}!usize, u)) |_| {
+ u += 1;
+ } else |e| switch (e) {
+ else => {
+ u += 1;
+ }
+ }
return u;
}