srctree

Andrew Kelley parent 377ecc6a 38331b1c 97aa5f7b
Merge pull request #19190 from mlugg/struct-equivalence

compiler: namespace type equivalence based on AST node + captures

inlinesplit
lib/std/zig/AstGen.zig added: 4090, removed: 2946, total 1144
@@ -44,6 +44,9 @@ compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{},
/// The topmost block of the current function.
fn_block: ?*GenZir = null,
fn_var_args: bool = false,
/// Whether we are somewhere within a function. If `true`, any container decls may be
/// generic and thus must be tunneled through closure.
within_fn: bool = false,
/// The return type of the current function. This may be a trivial `Ref`, or
/// otherwise it refers to a `ret_type` instruction.
fn_ret_ty: Zir.Inst.Ref = .none,
@@ -2205,7 +2208,7 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn
},
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
.namespace, .enum_namespace => break,
.namespace => break,
.defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
.top => unreachable,
}
@@ -2279,7 +2282,7 @@ fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index)
try parent_gz.addDefer(defer_scope.index, defer_scope.len);
},
.defer_error => scope = scope.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => break,
.namespace => break,
.top => unreachable,
}
}
@@ -2412,7 +2415,7 @@ fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: Ast.Toke
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
.defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => break,
.namespace => break,
.top => unreachable,
}
}
@@ -2790,7 +2793,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.@"resume",
.@"await",
.ret_err_value_code,
.closure_get,
.ret_ptr,
.ret_type,
.for_len,
@@ -2860,7 +2862,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.store_to_inferred_ptr,
.resolve_inferred_alloc,
.set_runtime_safety,
.closure_capture,
.memcpy,
.memset,
.validate_deref,
@@ -2928,7 +2929,7 @@ fn countDefers(outer_scope: *Scope, inner_scope: *Scope) struct {
const have_err_payload = defer_scope.remapped_err_code != .none;
need_err_code = need_err_code or have_err_payload;
},
.namespace, .enum_namespace => unreachable,
.namespace => unreachable,
.top => unreachable,
}
}
@@ -2998,7 +2999,7 @@ fn genDefers(
.normal_only => continue,
}
},
.namespace, .enum_namespace => unreachable,
.namespace => unreachable,
.top => unreachable,
}
}
@@ -3042,7 +3043,7 @@ fn checkUsed(gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope) InnerError!v
scope = s.parent;
},
.defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => unreachable,
.namespace => unreachable,
.top => unreachable,
}
}
@@ -4052,6 +4053,11 @@ fn fnDecl(
};
defer fn_gz.unstack();
 
// Set this now, since parameter types, return type, etc may be generic.
const prev_within_fn = astgen.within_fn;
defer astgen.within_fn = prev_within_fn;
astgen.within_fn = true;
 
const is_pub = fn_proto.visib_token != null;
const is_export = blk: {
const maybe_export_token = fn_proto.extern_export_inline_token orelse break :blk false;
@@ -4313,6 +4319,10 @@ fn fnDecl(
 
const prev_fn_block = astgen.fn_block;
const prev_fn_ret_ty = astgen.fn_ret_ty;
defer {
astgen.fn_block = prev_fn_block;
astgen.fn_ret_ty = prev_fn_ret_ty;
}
astgen.fn_block = &fn_gz;
astgen.fn_ret_ty = if (is_inferred_error or ret_ref.toIndex() != null) r: {
// We're essentially guaranteed to need the return type at some point,
@@ -4321,10 +4331,6 @@ fn fnDecl(
// return type now so the rest of the function can use it.
break :r try fn_gz.addNode(.ret_type, decl_node);
} else ret_ref;
defer {
astgen.fn_block = prev_fn_block;
astgen.fn_ret_ty = prev_fn_ret_ty;
}
 
const prev_var_args = astgen.fn_var_args;
astgen.fn_var_args = is_var_args;
@@ -4732,7 +4738,7 @@ fn testDecl(
},
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => {
.namespace => {
const ns = s.cast(Scope.Namespace).?;
if (ns.decls.get(name_str_index)) |i| {
if (found_already) |f| {
@@ -4770,11 +4776,14 @@ fn testDecl(
};
defer fn_block.unstack();
 
const prev_within_fn = astgen.within_fn;
const prev_fn_block = astgen.fn_block;
const prev_fn_ret_ty = astgen.fn_ret_ty;
astgen.within_fn = true;
astgen.fn_block = &fn_block;
astgen.fn_ret_ty = .anyerror_void_error_union_type;
defer {
astgen.within_fn = prev_within_fn;
astgen.fn_block = prev_fn_block;
astgen.fn_ret_ty = prev_fn_ret_ty;
}
@@ -4849,10 +4858,10 @@ fn structDeclInner(
try gz.setStruct(decl_inst, .{
.src_node = node,
.layout = layout,
.captures_len = 0,
.fields_len = 0,
.decls_len = 0,
.backing_int_ref = .none,
.backing_int_body_len = 0,
.has_backing_int = false,
.known_non_opv = false,
.known_comptime_only = false,
.is_tuple = false,
@@ -4873,6 +4882,7 @@ fn structDeclInner(
.node = node,
.inst = decl_inst,
.declaring_gz = gz,
.maybe_generic = astgen.within_fn,
};
defer namespace.deinit(gpa);
 
@@ -5142,10 +5152,10 @@ fn structDeclInner(
try gz.setStruct(decl_inst, .{
.src_node = node,
.layout = layout,
.captures_len = @intCast(namespace.captures.count()),
.fields_len = field_count,
.decls_len = decl_count,
.backing_int_ref = backing_int_ref,
.backing_int_body_len = @intCast(backing_int_body_len),
.has_backing_int = backing_int_ref != .none,
.known_non_opv = known_non_opv,
.known_comptime_only = known_comptime_only,
.is_tuple = is_tuple,
@@ -5159,15 +5169,22 @@ fn structDeclInner(
const decls_slice = wip_members.declsSlice();
const fields_slice = wip_members.fieldsSlice();
const bodies_slice = astgen.scratch.items[bodies_start..];
try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len +
decls_slice.len + fields_slice.len + bodies_slice.len);
astgen.extra.appendSliceAssumeCapacity(astgen.scratch.items[scratch_top..][0..backing_int_body_len]);
try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len + 2 +
decls_slice.len + namespace.captures.count() + fields_slice.len + bodies_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
if (backing_int_ref != .none) {
astgen.extra.appendAssumeCapacity(@intCast(backing_int_body_len));
if (backing_int_body_len == 0) {
astgen.extra.appendAssumeCapacity(@intFromEnum(backing_int_ref));
} else {
astgen.extra.appendSliceAssumeCapacity(astgen.scratch.items[scratch_top..][0..backing_int_body_len]);
}
}
astgen.extra.appendSliceAssumeCapacity(decls_slice);
astgen.extra.appendSliceAssumeCapacity(fields_slice);
astgen.extra.appendSliceAssumeCapacity(bodies_slice);
 
block_scope.unstack();
try gz.addNamespaceCaptures(&namespace);
return decl_inst.toRef();
}
 
@@ -5190,6 +5207,7 @@ fn unionDeclInner(
.node = node,
.inst = decl_inst,
.declaring_gz = gz,
.maybe_generic = astgen.within_fn,
};
defer namespace.deinit(gpa);
 
@@ -5368,6 +5386,7 @@ fn unionDeclInner(
.src_node = node,
.layout = layout,
.tag_type = arg_inst,
.captures_len = @intCast(namespace.captures.count()),
.body_len = body_len,
.fields_len = field_count,
.decls_len = decl_count,
@@ -5379,13 +5398,13 @@ fn unionDeclInner(
wip_members.finishBits(bits_per_field);
const decls_slice = wip_members.declsSlice();
const fields_slice = wip_members.fieldsSlice();
try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len);
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() + decls_slice.len + body_len + fields_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
astgen.extra.appendSliceAssumeCapacity(decls_slice);
astgen.appendBodyWithFixups(body);
astgen.extra.appendSliceAssumeCapacity(fields_slice);
 
block_scope.unstack();
try gz.addNamespaceCaptures(&namespace);
return decl_inst.toRef();
}
 
@@ -5537,6 +5556,7 @@ fn containerDecl(
.node = node,
.inst = decl_inst,
.declaring_gz = gz,
.maybe_generic = astgen.within_fn,
};
defer namespace.deinit(gpa);
 
@@ -5555,7 +5575,7 @@ fn containerDecl(
defer block_scope.unstack();
 
_ = try astgen.scanDecls(&namespace, container_decl.ast.members);
namespace.base.tag = .enum_namespace;
namespace.base.tag = .namespace;
 
const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0)
try comptimeExpr(&block_scope, &namespace.base, coerced_type_ri, container_decl.ast.arg)
@@ -5586,7 +5606,6 @@ fn containerDecl(
if (member_node == counts.nonexhaustive_node)
continue;
fields_hasher.update(tree.getNodeSource(member_node));
namespace.base.tag = .namespace;
var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
.decl => continue,
.field => |field| field,
@@ -5630,7 +5649,6 @@ fn containerDecl(
},
);
}
namespace.base.tag = .enum_namespace;
const tag_value_inst = try expr(&block_scope, &namespace.base, .{ .rl = .{ .ty = arg_inst } }, member.ast.value_expr);
wip_members.appendToField(@intFromEnum(tag_value_inst));
}
@@ -5676,6 +5694,7 @@ fn containerDecl(
.src_node = node,
.nonexhaustive = nonexhaustive,
.tag_type = arg_inst,
.captures_len = @intCast(namespace.captures.count()),
.body_len = body_len,
.fields_len = @intCast(counts.total_fields),
.decls_len = @intCast(counts.decls),
@@ -5685,13 +5704,13 @@ fn containerDecl(
wip_members.finishBits(bits_per_field);
const decls_slice = wip_members.declsSlice();
const fields_slice = wip_members.fieldsSlice();
try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len);
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() + decls_slice.len + body_len + fields_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
astgen.extra.appendSliceAssumeCapacity(decls_slice);
astgen.appendBodyWithFixups(body);
astgen.extra.appendSliceAssumeCapacity(fields_slice);
 
block_scope.unstack();
try gz.addNamespaceCaptures(&namespace);
return rvalue(gz, ri, decl_inst.toRef(), node);
},
.keyword_opaque => {
@@ -5704,6 +5723,7 @@ fn containerDecl(
.node = node,
.inst = decl_inst,
.declaring_gz = gz,
.maybe_generic = astgen.within_fn,
};
defer namespace.deinit(gpa);
 
@@ -5733,16 +5753,17 @@ fn containerDecl(
 
try gz.setOpaque(decl_inst, .{
.src_node = node,
.captures_len = @intCast(namespace.captures.count()),
.decls_len = decl_count,
});
 
wip_members.finishBits(0);
const decls_slice = wip_members.declsSlice();
try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len);
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() + decls_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
astgen.extra.appendSliceAssumeCapacity(decls_slice);
 
block_scope.unstack();
try gz.addNamespaceCaptures(&namespace);
return rvalue(gz, ri, decl_inst.toRef(), node);
},
else => unreachable,
@@ -8238,12 +8259,17 @@ fn localVarRef(
ident_token: Ast.TokenIndex,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const gpa = astgen.gpa;
const name_str_index = try astgen.identAsString(ident_token);
var s = scope;
var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already
var found_needs_tunnel: bool = undefined; // defined when `found_already != null`
var found_namespaces_out: u32 = undefined; // defined when `found_already != null`
 
// The number of namespaces above `gz` we currently are
var num_namespaces_out: u32 = 0;
var capturing_namespace: ?*Scope.Namespace = null;
// defined by `num_namespaces_out != 0`
var capturing_namespace: *Scope.Namespace = undefined;
 
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
@@ -8257,15 +8283,13 @@ fn localVarRef(
local_val.used = ident_token;
}
 
const value_inst = try tunnelThroughClosure(
const value_inst = if (num_namespaces_out != 0) try tunnelThroughClosure(
gz,
ident,
num_namespaces_out,
capturing_namespace,
local_val.inst,
local_val.token_src,
gpa,
);
.{ .ref = local_val.inst },
.{ .token = local_val.token_src },
) else local_val.inst;
 
return rvalueNoCoercePreRef(gz, ri, value_inst, ident);
}
@@ -8285,19 +8309,17 @@ fn localVarRef(
const ident_name = try astgen.identifierTokenString(ident_token);
return astgen.failNodeNotes(ident, "mutable '{s}' not accessible from here", .{ident_name}, &.{
try astgen.errNoteTok(local_ptr.token_src, "declared mutable here", .{}),
try astgen.errNoteNode(capturing_namespace.?.node, "crosses namespace boundary here", .{}),
try astgen.errNoteNode(capturing_namespace.node, "crosses namespace boundary here", .{}),
});
}
 
const ptr_inst = try tunnelThroughClosure(
const ptr_inst = if (num_namespaces_out != 0) try tunnelThroughClosure(
gz,
ident,
num_namespaces_out,
capturing_namespace,
local_ptr.ptr,
local_ptr.token_src,
gpa,
);
.{ .ref = local_ptr.ptr },
.{ .token = local_ptr.token_src },
) else local_ptr.ptr;
 
switch (ri.rl) {
.ref, .ref_coerced_ty => {
@@ -8314,7 +8336,7 @@ fn localVarRef(
},
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => {
.namespace => {
const ns = s.cast(Scope.Namespace).?;
if (ns.decls.get(name_str_index)) |i| {
if (found_already) |f| {
@@ -8325,8 +8347,10 @@ fn localVarRef(
}
// We found a match but must continue looking for ambiguous references to decls.
found_already = i;
found_needs_tunnel = ns.maybe_generic;
found_namespaces_out = num_namespaces_out;
}
if (s.tag == .namespace) num_namespaces_out += 1;
num_namespaces_out += 1;
capturing_namespace = ns;
s = ns.parent;
},
@@ -8339,6 +8363,29 @@ fn localVarRef(
 
// Decl references happen by name rather than ZIR index so that when unrelated
// decls are modified, ZIR code containing references to them can be unmodified.
 
if (found_namespaces_out > 0 and found_needs_tunnel) {
switch (ri.rl) {
.ref, .ref_coerced_ty => return tunnelThroughClosure(
gz,
ident,
found_namespaces_out,
.{ .decl_ref = name_str_index },
.{ .node = found_already.? },
),
else => {
const result = try tunnelThroughClosure(
gz,
ident,
found_namespaces_out,
.{ .decl_val = name_str_index },
.{ .node = found_already.? },
);
return rvalueNoCoercePreRef(gz, ri, result, ident);
},
}
}
 
switch (ri.rl) {
.ref, .ref_coerced_ty => return gz.addStrTok(.decl_ref, name_str_index, ident_token),
else => {
@@ -8348,41 +8395,90 @@ fn localVarRef(
}
}
 
/// Adds a capture to a namespace, if needed.
/// Returns the index of the closure_capture instruction.
/// Access a ZIR instruction through closure. May tunnel through arbitrarily
/// many namespaces, adding closure captures as required.
/// Returns the index of the `closure_get` instruction added to `gz`.
fn tunnelThroughClosure(
gz: *GenZir,
/// The node which references the value to be captured.
inner_ref_node: Ast.Node.Index,
/// The number of namespaces being tunnelled through. At least 1.
num_tunnels: u32,
ns: ?*Scope.Namespace,
value: Zir.Inst.Ref,
token: Ast.TokenIndex,
gpa: Allocator,
/// The value being captured.
value: union(enum) {
ref: Zir.Inst.Ref,
decl_val: Zir.NullTerminatedString,
decl_ref: Zir.NullTerminatedString,
},
/// The location of the value's declaration.
decl_src: union(enum) {
token: Ast.TokenIndex,
node: Ast.Node.Index,
},
) !Zir.Inst.Ref {
// For trivial values, we don't need a tunnel.
// Just return the ref.
if (num_tunnels == 0 or value.toIndex() == null) {
return value;
switch (value) {
.ref => |v| if (v.toIndex() == null) return v, // trivia value; do not need tunnel
.decl_val, .decl_ref => {},
}
 
// Otherwise we need a tunnel. Check if this namespace
// already has one for this value.
const gop = try ns.?.captures.getOrPut(gpa, value.toIndex().?);
if (!gop.found_existing) {
// Make a new capture for this value but don't add it to the declaring_gz yet
try gz.astgen.instructions.append(gz.astgen.gpa, .{
.tag = .closure_capture,
.data = .{ .un_tok = .{
.operand = value,
.src_tok = ns.?.declaring_gz.?.tokenIndexToRelative(token),
} },
const astgen = gz.astgen;
const gpa = astgen.gpa;
 
// Otherwise we need a tunnel. First, figure out the path of namespaces we
// are tunneling through. This is usually only going to be one or two, so
// use an SFBA to optimize for the common case.
var sfba = std.heap.stackFallback(@sizeOf(usize) * 2, astgen.arena);
var intermediate_tunnels = try sfba.get().alloc(*Scope.Namespace, num_tunnels - 1);
 
const root_ns = ns: {
var i: usize = num_tunnels - 1;
var scope: *Scope = gz.parent;
while (i > 0) {
if (scope.cast(Scope.Namespace)) |mid_ns| {
i -= 1;
intermediate_tunnels[i] = mid_ns;
}
scope = scope.parent().?;
}
while (true) {
if (scope.cast(Scope.Namespace)) |ns| break :ns ns;
scope = scope.parent().?;
}
};
 
// Now that we know the scopes we're tunneling through, begin adding
// captures as required, starting with the outermost namespace.
const root_capture = Zir.Inst.Capture.wrap(switch (value) {
.ref => |v| .{ .instruction = v.toIndex().? },
.decl_val => |str| .{ .decl_val = str },
.decl_ref => |str| .{ .decl_ref = str },
});
var cur_capture_index = std.math.cast(
u16,
(try root_ns.captures.getOrPut(gpa, root_capture)).index,
) orelse return astgen.failNodeNotes(root_ns.node, "this compiler implementation only supports up to 65536 captures per namespace", .{}, &.{
switch (decl_src) {
.token => |t| try astgen.errNoteTok(t, "captured value here", .{}),
.node => |n| try astgen.errNoteNode(n, "captured value here", .{}),
},
try astgen.errNoteNode(inner_ref_node, "value used here", .{}),
});
 
for (intermediate_tunnels) |tunnel_ns| {
cur_capture_index = std.math.cast(
u16,
(try tunnel_ns.captures.getOrPut(gpa, Zir.Inst.Capture.wrap(.{ .nested = cur_capture_index }))).index,
) orelse return astgen.failNodeNotes(tunnel_ns.node, "this compiler implementation only supports up to 65536 captures per namespace", .{}, &.{
switch (decl_src) {
.token => |t| try astgen.errNoteTok(t, "captured value here", .{}),
.node => |n| try astgen.errNoteNode(n, "captured value here", .{}),
},
try astgen.errNoteNode(inner_ref_node, "value used here", .{}),
});
gop.value_ptr.* = @enumFromInt(gz.astgen.instructions.len - 1);
}
 
// Add an instruction to get the value from the closure into
// our current context
return try gz.addInstNode(.closure_get, gop.value_ptr.*, inner_ref_node);
// Add an instruction to get the value from the closure.
return gz.addExtendedNodeSmall(.closure_get, inner_ref_node, cur_capture_index);
}
 
fn stringLiteral(
@@ -9095,7 +9191,7 @@ fn builtinCall(
},
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => {
.namespace => {
const ns = s.cast(Scope.Namespace).?;
if (ns.decls.get(decl_name)) |i| {
if (found_already) |f| {
@@ -11605,7 +11701,7 @@ const Scope = struct {
}
if (T == Namespace) {
switch (base.tag) {
.namespace, .enum_namespace => return @fieldParentPtr(T, "base", base),
.namespace => return @fieldParentPtr(T, "base", base),
else => return null,
}
}
@@ -11621,7 +11717,7 @@ const Scope = struct {
.local_val => base.cast(LocalVal).?.parent,
.local_ptr => base.cast(LocalPtr).?.parent,
.defer_normal, .defer_error => base.cast(Defer).?.parent,
.namespace, .enum_namespace => base.cast(Namespace).?.parent,
.namespace => base.cast(Namespace).?.parent,
.top => null,
};
}
@@ -11633,7 +11729,6 @@ const Scope = struct {
defer_normal,
defer_error,
namespace,
enum_namespace,
top,
};
 
@@ -11720,14 +11815,14 @@ const Scope = struct {
decls: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.Node.Index) = .{},
node: Ast.Node.Index,
inst: Zir.Inst.Index,
maybe_generic: bool,
 
/// The astgen scope containing this namespace.
/// Only valid during astgen.
declaring_gz: ?*GenZir,
 
/// Map from the raw captured value to the instruction
/// ref of the capture for decls in this namespace
captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
/// Set of captures used by this namespace.
captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Capture, void) = .{},
 
fn deinit(self: *Namespace, gpa: Allocator) void {
self.decls.deinit(gpa);
@@ -11787,12 +11882,6 @@ const GenZir = struct {
// Set if this GenZir is a defer or it is inside a defer.
any_defer_node: Ast.Node.Index = 0,
 
/// Namespace members are lazy. When executing a decl within a namespace,
/// any references to external instructions need to be treated specially.
/// This list tracks those references. See also .closure_capture and .closure_get.
/// Keys are the raw instruction index, values are the closure_capture instruction.
captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
 
const unstacked_top = std.math.maxInt(usize);
/// Call unstack before adding any new instructions to containing GenZir.
fn unstack(self: *GenZir) void {
@@ -12534,6 +12623,30 @@ const GenZir = struct {
return new_index.toRef();
}
 
fn addExtendedNodeSmall(
gz: *GenZir,
opcode: Zir.Inst.Extended,
src_node: Ast.Node.Index,
small: u16,
) !Zir.Inst.Ref {
const astgen = gz.astgen;
const gpa = astgen.gpa;
 
try gz.instructions.ensureUnusedCapacity(gpa, 1);
try astgen.instructions.ensureUnusedCapacity(gpa, 1);
const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len);
astgen.instructions.appendAssumeCapacity(.{
.tag = .extended,
.data = .{ .extended = .{
.opcode = opcode,
.small = small,
.operand = @bitCast(gz.nodeIndexToRelative(src_node)),
} },
});
gz.instructions.appendAssumeCapacity(new_index);
return new_index.toRef();
}
 
fn addUnTok(
gz: *GenZir,
tag: Zir.Inst.Tag,
@@ -12957,10 +13070,10 @@ const GenZir = struct {
 
fn setStruct(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
src_node: Ast.Node.Index,
captures_len: u32,
fields_len: u32,
decls_len: u32,
backing_int_ref: Zir.Inst.Ref,
backing_int_body_len: u32,
has_backing_int: bool,
layout: std.builtin.Type.ContainerLayout,
known_non_opv: bool,
known_comptime_only: bool,
@@ -12978,7 +13091,7 @@ const GenZir = struct {
 
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
 
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 4);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 3);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.StructDecl{
.fields_hash_0 = fields_hash_arr[0],
.fields_hash_1 = fields_hash_arr[1],
@@ -12987,26 +13100,24 @@ const GenZir = struct {
.src_node = gz.nodeIndexToRelative(args.src_node),
});
 
if (args.captures_len != 0) {
astgen.extra.appendAssumeCapacity(args.captures_len);
}
if (args.fields_len != 0) {
astgen.extra.appendAssumeCapacity(args.fields_len);
}
if (args.decls_len != 0) {
astgen.extra.appendAssumeCapacity(args.decls_len);
}
if (args.backing_int_ref != .none) {
astgen.extra.appendAssumeCapacity(args.backing_int_body_len);
if (args.backing_int_body_len == 0) {
astgen.extra.appendAssumeCapacity(@intFromEnum(args.backing_int_ref));
}
}
astgen.instructions.set(@intFromEnum(inst), .{
.tag = .extended,
.data = .{ .extended = .{
.opcode = .struct_decl,
.small = @bitCast(Zir.Inst.StructDecl.Small{
.has_captures_len = args.captures_len != 0,
.has_fields_len = args.fields_len != 0,
.has_decls_len = args.decls_len != 0,
.has_backing_int = args.backing_int_ref != .none,
.has_backing_int = args.has_backing_int,
.known_non_opv = args.known_non_opv,
.known_comptime_only = args.known_comptime_only,
.is_tuple = args.is_tuple,
@@ -13024,6 +13135,7 @@ const GenZir = struct {
fn setUnion(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
src_node: Ast.Node.Index,
tag_type: Zir.Inst.Ref,
captures_len: u32,
body_len: u32,
fields_len: u32,
decls_len: u32,
@@ -13039,7 +13151,7 @@ const GenZir = struct {
 
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
 
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 4);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 5);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.UnionDecl{
.fields_hash_0 = fields_hash_arr[0],
.fields_hash_1 = fields_hash_arr[1],
@@ -13051,6 +13163,9 @@ const GenZir = struct {
if (args.tag_type != .none) {
astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type));
}
if (args.captures_len != 0) {
astgen.extra.appendAssumeCapacity(args.captures_len);
}
if (args.body_len != 0) {
astgen.extra.appendAssumeCapacity(args.body_len);
}
@@ -13066,6 +13181,7 @@ const GenZir = struct {
.opcode = .union_decl,
.small = @bitCast(Zir.Inst.UnionDecl.Small{
.has_tag_type = args.tag_type != .none,
.has_captures_len = args.captures_len != 0,
.has_body_len = args.body_len != 0,
.has_fields_len = args.fields_len != 0,
.has_decls_len = args.decls_len != 0,
@@ -13082,6 +13198,7 @@ const GenZir = struct {
fn setEnum(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
src_node: Ast.Node.Index,
tag_type: Zir.Inst.Ref,
captures_len: u32,
body_len: u32,
fields_len: u32,
decls_len: u32,
@@ -13095,7 +13212,7 @@ const GenZir = struct {
 
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
 
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 4);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 5);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.EnumDecl{
.fields_hash_0 = fields_hash_arr[0],
.fields_hash_1 = fields_hash_arr[1],
@@ -13107,6 +13224,9 @@ const GenZir = struct {
if (args.tag_type != .none) {
astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type));
}
if (args.captures_len != 0) {
astgen.extra.appendAssumeCapacity(args.captures_len);
}
if (args.body_len != 0) {
astgen.extra.appendAssumeCapacity(args.body_len);
}
@@ -13122,6 +13242,7 @@ const GenZir = struct {
.opcode = .enum_decl,
.small = @bitCast(Zir.Inst.EnumDecl.Small{
.has_tag_type = args.tag_type != .none,
.has_captures_len = args.captures_len != 0,
.has_body_len = args.body_len != 0,
.has_fields_len = args.fields_len != 0,
.has_decls_len = args.decls_len != 0,
@@ -13135,6 +13256,7 @@ const GenZir = struct {
 
fn setOpaque(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
src_node: Ast.Node.Index,
captures_len: u32,
decls_len: u32,
}) !void {
const astgen = gz.astgen;
@@ -13142,11 +13264,14 @@ const GenZir = struct {
 
assert(args.src_node != 0);
 
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.OpaqueDecl).Struct.fields.len + 1);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.OpaqueDecl).Struct.fields.len + 2);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.OpaqueDecl{
.src_node = gz.nodeIndexToRelative(args.src_node),
});
 
if (args.captures_len != 0) {
astgen.extra.appendAssumeCapacity(args.captures_len);
}
if (args.decls_len != 0) {
astgen.extra.appendAssumeCapacity(args.decls_len);
}
@@ -13155,6 +13280,7 @@ const GenZir = struct {
.data = .{ .extended = .{
.opcode = .opaque_decl,
.small = @bitCast(Zir.Inst.OpaqueDecl.Small{
.has_captures_len = args.captures_len != 0,
.has_decls_len = args.decls_len != 0,
.name_strategy = gz.anon_name_strategy,
}),
@@ -13197,15 +13323,6 @@ const GenZir = struct {
}
}
 
fn addNamespaceCaptures(gz: *GenZir, namespace: *Scope.Namespace) !void {
if (namespace.captures.count() > 0) {
try gz.instructions.ensureUnusedCapacity(gz.astgen.gpa, namespace.captures.count());
for (namespace.captures.values()) |capture| {
gz.instructions.appendAssumeCapacity(capture);
}
}
}
 
fn addDbgVar(gz: *GenZir, tag: Zir.Inst.Tag, name: Zir.NullTerminatedString, inst: Zir.Inst.Ref) !void {
if (gz.is_comptime) return;
 
@@ -13305,7 +13422,7 @@ fn detectLocalShadowing(
}
s = local_ptr.parent;
},
.namespace, .enum_namespace => {
.namespace => {
outer_scope = true;
const ns = s.cast(Scope.Namespace).?;
const decl_node = ns.decls.get(ident_name) orelse {
@@ -13478,7 +13595,7 @@ fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.
}
s = local_ptr.parent;
},
.namespace, .enum_namespace => s = s.cast(Scope.Namespace).?.parent,
.namespace => s = s.cast(Scope.Namespace).?.parent,
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.top => break,
 
lib/std/zig/Zir.zig added: 4090, removed: 2946, total 1144
@@ -1004,17 +1004,6 @@ pub const Inst = struct {
@"resume",
@"await",
 
/// When a type or function refers to a comptime value from an outer
/// scope, that forms a closure over comptime value. The outer scope
/// will record a capture of that value, which encodes its current state
/// and marks it to persist. Uses `un_tok` field. Operand is the
/// instruction value to capture.
closure_capture,
/// The inner scope of a closure uses closure_get to retrieve the value
/// stored by the outer scope. Uses `inst_node` field. Operand is the
/// closure_capture instruction ref.
closure_get,
 
/// A defer statement.
/// Uses the `defer` union field.
@"defer",
@@ -1251,8 +1240,6 @@ pub const Inst = struct {
.@"await",
.ret_err_value_code,
.extended,
.closure_get,
.closure_capture,
.ret_ptr,
.ret_type,
.@"try",
@@ -1542,8 +1529,6 @@ pub const Inst = struct {
.@"resume",
.@"await",
.ret_err_value_code,
.closure_get,
.closure_capture,
.@"break",
.break_inline,
.condbr,
@@ -1829,9 +1814,6 @@ pub const Inst = struct {
.@"resume" = .un_node,
.@"await" = .un_node,
 
.closure_capture = .un_tok,
.closure_get = .inst_node,
 
.@"defer" = .@"defer",
.defer_err_code = .defer_err_code,
 
@@ -2074,6 +2056,10 @@ pub const Inst = struct {
/// `operand` is payload index to `RestoreErrRetIndex`.
/// `small` is undefined.
restore_err_ret_index,
/// Retrieves a value from the current type declaration scope's closure.
/// `operand` is `src_node: i32`.
/// `small` is closure index.
closure_get,
/// Used as a placeholder instruction which is just a dummy index for Sema to replace
/// with a specific value. For instance, this is used for the capture of an `errdefer`.
/// This should never appear in a body.
@@ -2949,7 +2935,7 @@ pub const Inst = struct {
/// These are stored in trailing data in `extra` for each prong.
pub const ProngInfo = packed struct(u32) {
body_len: u28,
capture: Capture,
capture: ProngInfo.Capture,
is_inline: bool,
has_tag_capture: bool,
 
@@ -3013,19 +2999,21 @@ pub const Inst = struct {
};
 
/// Trailing:
/// 0. fields_len: u32, // if has_fields_len
/// 1. decls_len: u32, // if has_decls_len
/// 2. backing_int_body_len: u32, // if has_backing_int
/// 3. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0
/// 4. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0
/// 5. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 6. flags: u32 // for every 8 fields
/// 0. captures_len: u32 // if has_captures_len
/// 1. fields_len: u32, // if has_fields_len
/// 2. decls_len: u32, // if has_decls_len
/// 3. capture: Capture // for every captures_len
/// 4. backing_int_body_len: u32, // if has_backing_int
/// 5. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0
/// 6. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0
/// 7. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 8. flags: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has an align expression
/// 0b00X0: whether corresponding field has a default expression
/// 0b0X00: whether corresponding field is comptime
/// 0bX000: whether corresponding field has a type expression
/// 7. fields: { // for every fields_len
/// 9. fields: { // for every fields_len
/// field_name: u32, // if !is_tuple
/// doc_comment: NullTerminatedString, // .empty if no doc comment
/// field_type: Ref, // if corresponding bit is not set. none means anytype.
@@ -3033,7 +3021,7 @@ pub const Inst = struct {
/// align_body_len: u32, // if corresponding bit is set
/// init_body_len: u32, // if corresponding bit is set
/// }
/// 8. bodies: { // for every fields_len
/// 10. bodies: { // for every fields_len
/// field_type_body_inst: Inst, // for each field_type_body_len
/// align_body_inst: Inst, // for each align_body_len
/// init_body_inst: Inst, // for each init_body_len
@@ -3052,6 +3040,7 @@ pub const Inst = struct {
}
 
pub const Small = packed struct {
has_captures_len: bool,
has_fields_len: bool,
has_decls_len: bool,
has_backing_int: bool,
@@ -3063,10 +3052,59 @@ pub const Inst = struct {
any_default_inits: bool,
any_comptime_fields: bool,
any_aligned_fields: bool,
_: u3 = undefined,
_: u2 = undefined,
};
};
 
/// Represents a single value being captured in a type declaration's closure.
pub const Capture = packed struct(u32) {
tag: enum(u2) {
/// `data` is a `u16` index into the parent closure.
nested,
/// `data` is a `Zir.Inst.Index` to an instruction whose value is being captured.
instruction,
/// `data` is a `NullTerminatedString` to a decl name.
decl_val,
/// `data` is a `NullTerminatedString` to a decl name.
decl_ref,
},
data: u30,
pub const Unwrapped = union(enum) {
nested: u16,
instruction: Zir.Inst.Index,
decl_val: NullTerminatedString,
decl_ref: NullTerminatedString,
};
pub fn wrap(cap: Unwrapped) Capture {
return switch (cap) {
.nested => |idx| .{
.tag = .nested,
.data = idx,
},
.instruction => |inst| .{
.tag = .instruction,
.data = @intCast(@intFromEnum(inst)),
},
.decl_val => |str| .{
.tag = .decl_val,
.data = @intCast(@intFromEnum(str)),
},
.decl_ref => |str| .{
.tag = .decl_ref,
.data = @intCast(@intFromEnum(str)),
},
};
}
pub fn unwrap(cap: Capture) Unwrapped {
return switch (cap.tag) {
.nested => .{ .nested = @intCast(cap.data) },
.instruction => .{ .instruction = @enumFromInt(cap.data) },
.decl_val => .{ .decl_val = @enumFromInt(cap.data) },
.decl_ref => .{ .decl_ref = @enumFromInt(cap.data) },
};
}
};
 
pub const NameStrategy = enum(u2) {
/// Use the same name as the parent declaration name.
/// e.g. `const Foo = struct {...};`.
@@ -3098,14 +3136,16 @@ pub const Inst = struct {
 
/// Trailing:
/// 0. tag_type: Ref, // if has_tag_type
/// 1. body_len: u32, // if has_body_len
/// 2. fields_len: u32, // if has_fields_len
/// 3. decls_len: u32, // if has_decls_len
/// 4. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 5. inst: Index // for every body_len
/// 6. has_bits: u32 // for every 32 fields
/// 1. captures_len: u32, // if has_captures_len
/// 2. body_len: u32, // if has_body_len
/// 3. fields_len: u32, // if has_fields_len
/// 4. decls_len: u32, // if has_decls_len
/// 5. capture: Capture // for every captures_len
/// 6. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 7. inst: Index // for every body_len
/// 8. has_bits: u32 // for every 32 fields
/// - the bit is whether corresponding field has an value expression
/// 7. fields: { // for every fields_len
/// 9. fields: { // for every fields_len
/// field_name: u32,
/// doc_comment: u32, // .empty if no doc_comment
/// value: Ref, // if corresponding bit is set
@@ -3125,29 +3165,32 @@ pub const Inst = struct {
 
pub const Small = packed struct {
has_tag_type: bool,
has_captures_len: bool,
has_body_len: bool,
has_fields_len: bool,
has_decls_len: bool,
name_strategy: NameStrategy,
nonexhaustive: bool,
_: u9 = undefined,
_: u8 = undefined,
};
};
 
/// Trailing:
/// 0. tag_type: Ref, // if has_tag_type
/// 1. body_len: u32, // if has_body_len
/// 2. fields_len: u32, // if has_fields_len
/// 3. decls_len: u32, // if has_decls_len
/// 4. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 5. inst: Index // for every body_len
/// 6. has_bits: u32 // for every 8 fields
/// 1. captures_len: u32 // if has_captures_len
/// 2. body_len: u32, // if has_body_len
/// 3. fields_len: u32, // if has_fields_len
/// 4. decls_len: u37, // if has_decls_len
/// 5. capture: Capture // for every captures_len
/// 6. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 7. inst: Index // for every body_len
/// 8. has_bits: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has a type expression
/// 0b00X0: whether corresponding field has a align expression
/// 0b0X00: whether corresponding field has a tag value expression
/// 0bX000: unused
/// 7. fields: { // for every fields_len
/// 9. fields: { // for every fields_len
/// field_name: NullTerminatedString, // null terminated string index
/// doc_comment: NullTerminatedString, // .empty if no doc comment
/// field_type: Ref, // if corresponding bit is set
@@ -3170,6 +3213,7 @@ pub const Inst = struct {
 
pub const Small = packed struct {
has_tag_type: bool,
has_captures_len: bool,
has_body_len: bool,
has_fields_len: bool,
has_decls_len: bool,
@@ -3183,13 +3227,15 @@ pub const Inst = struct {
/// true | false | union(T) { }
auto_enum_tag: bool,
any_aligned_fields: bool,
_: u6 = undefined,
_: u5 = undefined,
};
};
 
/// Trailing:
/// 0. decls_len: u32, // if has_decls_len
/// 1. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 0. captures_len: u32, // if has_captures_len
/// 1. decls_len: u32, // if has_decls_len
/// 2. capture: Capture, // for every captures_len
/// 3. decl: Index, // for every decls_len; points to a `declaration` instruction
pub const OpaqueDecl = struct {
src_node: i32,
 
@@ -3198,9 +3244,10 @@ pub const Inst = struct {
}
 
pub const Small = packed struct {
has_captures_len: bool,
has_decls_len: bool,
name_strategy: NameStrategy,
_: u13 = undefined,
_: u12 = undefined,
};
};
 
@@ -3502,6 +3549,11 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
.struct_decl => {
const small: Inst.StructDecl.Small = @bitCast(extended.small);
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.StructDecl).Struct.fields.len);
const captures_len = if (small.has_captures_len) captures_len: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :captures_len captures_len;
} else 0;
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) decls_len: {
const decls_len = zir.extra[extra_index];
@@ -3509,6 +3561,8 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
break :decls_len decls_len;
} else 0;
 
extra_index += captures_len;
 
if (small.has_backing_int) {
const backing_int_body_len = zir.extra[extra_index];
extra_index += 1; // backing_int_body_len
@@ -3529,6 +3583,11 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
const small: Inst.EnumDecl.Small = @bitCast(extended.small);
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.EnumDecl).Struct.fields.len);
extra_index += @intFromBool(small.has_tag_type);
const captures_len = if (small.has_captures_len) captures_len: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :captures_len captures_len;
} else 0;
extra_index += @intFromBool(small.has_body_len);
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) decls_len: {
@@ -3537,6 +3596,8 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
break :decls_len decls_len;
} else 0;
 
extra_index += captures_len;
 
return .{
.extra_index = extra_index,
.decls_remaining = decls_len,
@@ -3547,6 +3608,11 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
const small: Inst.UnionDecl.Small = @bitCast(extended.small);
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.UnionDecl).Struct.fields.len);
extra_index += @intFromBool(small.has_tag_type);
const captures_len = if (small.has_captures_len) captures_len: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :captures_len captures_len;
} else 0;
extra_index += @intFromBool(small.has_body_len);
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) decls_len: {
@@ -3555,6 +3621,8 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
break :decls_len decls_len;
} else 0;
 
extra_index += captures_len;
 
return .{
.extra_index = extra_index,
.decls_remaining = decls_len,
@@ -3569,6 +3637,13 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
extra_index += 1;
break :decls_len decls_len;
} else 0;
const captures_len = if (small.has_captures_len) captures_len: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :captures_len captures_len;
} else 0;
 
extra_index += captures_len;
 
return .{
.extra_index = extra_index,
 
src/Autodoc.zig added: 4090, removed: 2946, total 1144
@@ -450,7 +450,7 @@ const Scope = struct {
Zir.NullTerminatedString, // index into the current file's string table (decl name)
*DeclStatus,
) = .{},
 
captures: []const Zir.Inst.Capture = &.{},
enclosing_type: ?usize, // index into `types`, null = file top-level struct
 
pub const DeclStatus = union(enum) {
@@ -459,6 +459,24 @@ const Scope = struct {
NotRequested: u32, // instr_index
};
 
fn getCapture(scope: Scope, idx: u16) struct {
union(enum) { inst: Zir.Inst.Index, decl: Zir.NullTerminatedString },
*Scope,
} {
const parent = scope.parent.?;
return switch (scope.captures[idx].unwrap()) {
.nested => |parent_idx| parent.getCapture(parent_idx),
.instruction => |inst| .{
.{ .inst = inst },
parent,
},
.decl_val, .decl_ref => |str| .{
.{ .decl = str },
parent,
},
};
}
 
/// Returns a pointer so that the caller has a chance to modify the value
/// in case they decide to start analyzing a previously not requested decl.
/// Another reason is that in some places we use the pointer to uniquely
@@ -1151,29 +1169,6 @@ fn walkInstruction(
.expr = .{ .comptimeExpr = 0 },
};
},
.closure_get => {
const inst_node = data[@intFromEnum(inst)].inst_node;
 
const code = try self.getBlockSource(file, parent_src, inst_node.src_node);
const idx = self.comptime_exprs.items.len;
try self.exprs.append(self.arena, .{ .comptimeExpr = idx });
try self.comptime_exprs.append(self.arena, .{ .code = code });
 
return DocData.WalkResult{
.expr = .{ .comptimeExpr = idx },
};
},
.closure_capture => {
const un_tok = data[@intFromEnum(inst)].un_tok;
return try self.walkRef(
file,
parent_scope,
parent_src,
un_tok.operand,
need_type,
call_ctx,
);
},
.str => {
const str = data[@intFromEnum(inst)].str.get(file.zir);
 
@@ -3395,11 +3390,23 @@ fn walkInstruction(
.enclosing_type = type_slot_index,
};
 
const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small);
const extra = file.zir.extraData(Zir.Inst.OpaqueDecl, extended.operand);
var extra_index: usize = extra.end;
 
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src);
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
if (small.has_decls_len) extra_index += 1;
 
scope.captures = @ptrCast(file.zir.extra[extra_index..][0..captures_len]);
extra_index += captures_len;
 
var decl_indexes: std.ArrayListUnmanaged(usize) = .{};
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{};
 
@@ -3503,6 +3510,12 @@ fn walkInstruction(
break :blk tag_ref;
} else null;
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
const body_len = if (small.has_body_len) blk: {
const body_len = file.zir.extra[extra_index];
extra_index += 1;
@@ -3520,6 +3533,11 @@ fn walkInstruction(
else => .{ .enumLiteral = @tagName(small.layout) },
};
 
if (small.has_decls_len) extra_index += 1;
 
scope.captures = @ptrCast(file.zir.extra[extra_index..][0..captures_len]);
extra_index += captures_len;
 
var decl_indexes: std.ArrayListUnmanaged(usize) = .{};
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{};
 
@@ -3631,6 +3649,12 @@ fn walkInstruction(
break :blk wr.expr;
} else null;
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
const body_len = if (small.has_body_len) blk: {
const body_len = file.zir.extra[extra_index];
extra_index += 1;
@@ -3643,6 +3667,11 @@ fn walkInstruction(
break :blk fields_len;
} else 0;
 
if (small.has_decls_len) extra_index += 1;
 
scope.captures = @ptrCast(file.zir.extra[extra_index..][0..captures_len]);
extra_index += captures_len;
 
var decl_indexes: std.ArrayListUnmanaged(usize) = .{};
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{};
 
@@ -3759,6 +3788,12 @@ fn walkInstruction(
 
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src);
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
const fields_len = if (small.has_fields_len) blk: {
const fields_len = file.zir.extra[extra_index];
extra_index += 1;
@@ -3768,6 +3803,9 @@ fn walkInstruction(
// We don't care about decls yet
if (small.has_decls_len) extra_index += 1;
 
scope.captures = @ptrCast(file.zir.extra[extra_index..][0..captures_len]);
extra_index += captures_len;
 
var backing_int: ?DocData.Expr = null;
if (small.has_backing_int) {
const backing_int_body_len = file.zir.extra[extra_index];
@@ -4018,6 +4056,16 @@ fn walkInstruction(
.expr = .{ .cmpxchgIndex = cmpxchg_index },
};
},
.closure_get => {
const captured, const scope = parent_scope.getCapture(extended.small);
switch (captured) {
.inst => |cap_inst| return self.walkInstruction(file, scope, parent_src, cap_inst, need_type, call_ctx),
.decl => |str| {
const decl_status = parent_scope.resolveDeclName(str, file, inst.toOptional());
return .{ .expr = .{ .declRef = decl_status } };
},
}
},
}
},
}
 
src/Builtin.zig added: 4090, removed: 2946, total 1144
@@ -264,6 +264,8 @@ pub fn populateFile(comp: *Compilation, mod: *Module, file: *File) !void {
assert(!file.zir.hasCompileErrors()); // builtin.zig must not have astgen errors
file.zir_loaded = true;
file.status = .success_zir;
// Note that whilst we set `zir_loaded` here, we populated `path_digest`
// all the way back in `Package.Module.create`.
}
 
fn writeFile(file: *File, mod: *Module) !void {
 
src/Compilation.zig added: 4090, removed: 2946, total 1144
@@ -1326,6 +1326,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.global = options.config,
.parent = options.root_mod,
.builtin_mod = options.root_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is set
});
try options.root_mod.deps.putNoClobber(arena, "compiler_rt", compiler_rt_mod);
}
@@ -1430,6 +1431,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.global = options.config,
.parent = options.root_mod,
.builtin_mod = options.root_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is set
});
 
const zcu = try arena.create(Module);
@@ -6107,6 +6109,7 @@ fn buildOutputFromZig(
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
const root_name = src_basename[0 .. src_basename.len - std.fs.path.extension(src_basename).len];
const target = comp.getTarget();
@@ -6219,6 +6222,7 @@ pub fn build_crt_file(
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
 
for (c_source_files) |*item| {
 
src/InternPool.zig added: 4090, removed: 2946, total 1144
@@ -1,7 +1,6 @@
//! All interned objects have both a value and a type.
//! This data structure is self-contained, with the following exceptions:
//! * Module.Namespace has a pointer to Module.File
//! * Module.Decl has a pointer to Module.CaptureScope
 
/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are
/// constructed lazily.
@@ -345,6 +344,7 @@ const KeyAdapter = struct {
 
pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool {
_ = b_void;
if (ctx.intern_pool.items.items(.tag)[b_map_index] == .removed) return false;
return ctx.intern_pool.indexToKey(@as(Index, @enumFromInt(b_map_index))).eql(a, ctx.intern_pool);
}
 
@@ -502,6 +502,51 @@ pub const OptionalNullTerminatedString = enum(u32) {
}
};
 
/// A single value captured in the closure of a namespace type. This is not a plain
/// `Index` because we must differentiate between the following cases:
/// * runtime-known value (where we store the type)
/// * comptime-known value (where we store the value)
/// * decl val (so that we can analyze the value lazily)
/// * decl ref (so that we can analyze the reference lazily)
pub const CaptureValue = packed struct(u32) {
tag: enum { @"comptime", runtime, decl_val, decl_ref },
idx: u30,
 
pub fn wrap(val: Unwrapped) CaptureValue {
return switch (val) {
.@"comptime" => |i| .{ .tag = .@"comptime", .idx = @intCast(@intFromEnum(i)) },
.runtime => |i| .{ .tag = .runtime, .idx = @intCast(@intFromEnum(i)) },
.decl_val => |i| .{ .tag = .decl_val, .idx = @intCast(@intFromEnum(i)) },
.decl_ref => |i| .{ .tag = .decl_ref, .idx = @intCast(@intFromEnum(i)) },
};
}
pub fn unwrap(val: CaptureValue) Unwrapped {
return switch (val.tag) {
.@"comptime" => .{ .@"comptime" = @enumFromInt(val.idx) },
.runtime => .{ .runtime = @enumFromInt(val.idx) },
.decl_val => .{ .decl_val = @enumFromInt(val.idx) },
.decl_ref => .{ .decl_ref = @enumFromInt(val.idx) },
};
}
 
pub const Unwrapped = union(enum) {
/// Index refers to the value.
@"comptime": Index,
/// Index refers to the type.
runtime: Index,
decl_val: DeclIndex,
decl_ref: DeclIndex,
};
 
pub const Slice = struct {
start: u32,
len: u32,
pub fn get(slice: Slice, ip: *const InternPool) []CaptureValue {
return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
}
};
};
 
pub const Key = union(enum) {
int_type: IntType,
ptr_type: PtrType,
@@ -516,14 +561,14 @@ pub const Key = union(enum) {
/// This represents a struct that has been explicitly declared in source code,
/// or was created with `@Type`. It is unique and based on a declaration.
/// It may be a tuple, if declared like this: `struct {A, B, C}`.
struct_type: StructType,
struct_type: NamespaceType,
/// This is an anonymous struct or tuple type which has no corresponding
/// declaration. It is used for types that have no `struct` keyword in the
/// source code, and were not created via `@Type`.
anon_struct_type: AnonStructType,
union_type: Key.UnionType,
opaque_type: OpaqueType,
enum_type: EnumType,
union_type: NamespaceType,
opaque_type: NamespaceType,
enum_type: NamespaceType,
func_type: FuncType,
error_set_type: ErrorSetType,
/// The payload is the function body, either a `func_decl` or `func_instance`.
@@ -645,348 +690,6 @@ pub const Key = union(enum) {
child: Index,
};
 
pub const OpaqueType = extern struct {
/// The Decl that corresponds to the opaque itself.
decl: DeclIndex,
/// Represents the declarations inside this opaque.
namespace: NamespaceIndex,
zir_index: TrackedInst.Index.Optional,
};
 
/// Although packed structs and non-packed structs are encoded differently,
/// this struct is used for both categories since they share some common
/// functionality.
pub const StructType = struct {
extra_index: u32,
/// `none` when the struct is `@TypeOf(.{})`.
decl: OptionalDeclIndex,
/// `none` when the struct has no declarations.
namespace: OptionalNamespaceIndex,
/// Index of the struct_decl ZIR instruction.
zir_index: TrackedInst.Index.Optional,
layout: std.builtin.Type.ContainerLayout,
field_names: NullTerminatedString.Slice,
field_types: Index.Slice,
field_inits: Index.Slice,
field_aligns: Alignment.Slice,
runtime_order: RuntimeOrder.Slice,
comptime_bits: ComptimeBits,
offsets: Offsets,
names_map: OptionalMapIndex,
 
pub const ComptimeBits = struct {
start: u32,
/// This is the number of u32 elements, not the number of struct fields.
len: u32,
 
pub fn get(this: @This(), ip: *const InternPool) []u32 {
return ip.extra.items[this.start..][0..this.len];
}
 
pub fn getBit(this: @This(), ip: *const InternPool, i: usize) bool {
if (this.len == 0) return false;
return @as(u1, @truncate(this.get(ip)[i / 32] >> @intCast(i % 32))) != 0;
}
 
pub fn setBit(this: @This(), ip: *const InternPool, i: usize) void {
this.get(ip)[i / 32] |= @as(u32, 1) << @intCast(i % 32);
}
 
pub fn clearBit(this: @This(), ip: *const InternPool, i: usize) void {
this.get(ip)[i / 32] &= ~(@as(u32, 1) << @intCast(i % 32));
}
};
 
pub const Offsets = struct {
start: u32,
len: u32,
 
pub fn get(this: @This(), ip: *const InternPool) []u32 {
return @ptrCast(ip.extra.items[this.start..][0..this.len]);
}
};
 
pub const RuntimeOrder = enum(u32) {
/// Placeholder until layout is resolved.
unresolved = std.math.maxInt(u32) - 0,
/// Field not present at runtime
omitted = std.math.maxInt(u32) - 1,
_,
 
pub const Slice = struct {
start: u32,
len: u32,
 
pub fn get(slice: RuntimeOrder.Slice, ip: *const InternPool) []RuntimeOrder {
return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
}
};
 
pub fn toInt(i: @This()) ?u32 {
return switch (i) {
.omitted => null,
.unresolved => unreachable,
else => @intFromEnum(i),
};
}
};
 
/// Look up field index based on field name.
pub fn nameIndex(self: StructType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
const names_map = self.names_map.unwrap() orelse {
const i = name.toUnsigned(ip) orelse return null;
if (i >= self.field_types.len) return null;
return i;
};
const map = &ip.maps.items[@intFromEnum(names_map)];
const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
const field_index = map.getIndexAdapted(name, adapter) orelse return null;
return @intCast(field_index);
}
 
/// Returns the already-existing field with the same name, if any.
pub fn addFieldName(
self: @This(),
ip: *InternPool,
name: NullTerminatedString,
) ?u32 {
return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name);
}
 
pub fn fieldAlign(s: @This(), ip: *const InternPool, i: usize) Alignment {
if (s.field_aligns.len == 0) return .none;
return s.field_aligns.get(ip)[i];
}
 
pub fn fieldInit(s: @This(), ip: *const InternPool, i: usize) Index {
if (s.field_inits.len == 0) return .none;
assert(s.haveFieldInits(ip));
return s.field_inits.get(ip)[i];
}
 
/// Returns `none` in the case the struct is a tuple.
pub fn fieldName(s: @This(), ip: *const InternPool, i: usize) OptionalNullTerminatedString {
if (s.field_names.len == 0) return .none;
return s.field_names.get(ip)[i].toOptional();
}
 
pub fn fieldIsComptime(s: @This(), ip: *const InternPool, i: usize) bool {
return s.comptime_bits.getBit(ip, i);
}
 
pub fn setFieldComptime(s: @This(), ip: *InternPool, i: usize) void {
s.comptime_bits.setBit(ip, i);
}
 
/// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
/// complicated logic.
pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
return switch (s.layout) {
.Packed => false,
.Auto, .Extern => s.flagsPtr(ip).known_non_opv,
};
}
 
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStruct.Flags {
assert(self.layout != .Packed);
const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
}
 
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts that the struct is packed.
pub fn packedFlagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStructPacked.Flags {
assert(self.layout == .Packed);
const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
}
 
pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) {
flags_ptr.assumed_runtime_bits = true;
return true;
}
return false;
}
 
pub fn setTypesWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) return true;
flags_ptr.field_types_wip = true;
return false;
}
 
pub fn clearTypesWip(s: @This(), ip: *InternPool) void {
if (s.layout == .Packed) return;
s.flagsPtr(ip).field_types_wip = false;
}
 
pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.layout_wip) return true;
flags_ptr.layout_wip = true;
return false;
}
 
pub fn clearLayoutWip(s: @This(), ip: *InternPool) void {
if (s.layout == .Packed) return;
s.flagsPtr(ip).layout_wip = false;
}
 
pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.alignment_wip) return true;
flags_ptr.alignment_wip = true;
return false;
}
 
pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void {
if (s.layout == .Packed) return;
s.flagsPtr(ip).alignment_wip = false;
}
 
pub fn setInitsWip(s: @This(), ip: *InternPool) bool {
switch (s.layout) {
.Packed => {
const flag = &s.packedFlagsPtr(ip).field_inits_wip;
if (flag.*) return true;
flag.* = true;
return false;
},
.Auto, .Extern => {
const flag = &s.flagsPtr(ip).field_inits_wip;
if (flag.*) return true;
flag.* = true;
return false;
},
}
}
 
pub fn clearInitsWip(s: @This(), ip: *InternPool) void {
switch (s.layout) {
.Packed => s.packedFlagsPtr(ip).field_inits_wip = false,
.Auto, .Extern => s.flagsPtr(ip).field_inits_wip = false,
}
}
 
pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return true;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.fully_resolved) return true;
flags_ptr.fully_resolved = true;
return false;
}
 
pub fn clearFullyResolved(s: @This(), ip: *InternPool) void {
s.flagsPtr(ip).fully_resolved = false;
}
 
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn size(self: @This(), ip: *InternPool) *u32 {
assert(self.layout != .Packed);
const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
}
 
/// The backing integer type of the packed struct. Whether zig chooses
/// this type or the user specifies it, it is stored here. This will be
/// set to `none` until the layout is resolved.
/// Asserts the struct is packed.
pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
assert(s.layout == .Packed);
const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
}
 
/// Asserts the struct is not packed.
pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
assert(s.layout != .Packed);
const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
}
 
pub fn haveFieldTypes(s: @This(), ip: *const InternPool) bool {
const types = s.field_types.get(ip);
return types.len == 0 or types[0] != .none;
}
 
pub fn haveFieldInits(s: @This(), ip: *const InternPool) bool {
return switch (s.layout) {
.Packed => s.packedFlagsPtr(ip).inits_resolved,
.Auto, .Extern => s.flagsPtr(ip).inits_resolved,
};
}
 
pub fn setHaveFieldInits(s: @This(), ip: *InternPool) void {
switch (s.layout) {
.Packed => s.packedFlagsPtr(ip).inits_resolved = true,
.Auto, .Extern => s.flagsPtr(ip).inits_resolved = true,
}
}
 
pub fn haveLayout(s: @This(), ip: *InternPool) bool {
return switch (s.layout) {
.Packed => s.backingIntType(ip).* != .none,
.Auto, .Extern => s.flagsPtr(ip).layout_resolved,
};
}
 
pub fn isTuple(s: @This(), ip: *InternPool) bool {
return s.layout != .Packed and s.flagsPtr(ip).is_tuple;
}
 
pub fn hasReorderedFields(s: @This()) bool {
return s.layout == .Auto;
}
 
pub const RuntimeOrderIterator = struct {
ip: *InternPool,
field_index: u32,
struct_type: InternPool.Key.StructType,
 
pub fn next(it: *@This()) ?u32 {
var i = it.field_index;
 
if (i >= it.struct_type.field_types.len)
return null;
 
if (it.struct_type.hasReorderedFields()) {
it.field_index += 1;
return it.struct_type.runtime_order.get(it.ip)[i].toInt();
}
 
while (it.struct_type.fieldIsComptime(it.ip, i)) {
i += 1;
if (i >= it.struct_type.field_types.len)
return null;
}
 
it.field_index = i + 1;
return i;
}
};
 
/// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
/// May or may not include zero-bit fields.
/// Asserts the struct is not packed.
pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
assert(s.layout != .Packed);
return .{
.ip = ip,
.field_index = 0,
.struct_type = s,
};
}
};
 
pub const AnonStructType = struct {
types: Index.Slice,
/// This may be empty, indicating this is a tuple.
@@ -1010,192 +713,41 @@ pub const Key = union(enum) {
}
};
 
/// Serves two purposes:
/// * Being the key in the InternPool hash map, which only requires the `decl` field.
/// * Provide the other fields that do not require chasing the enum type.
pub const UnionType = struct {
/// The Decl that corresponds to the union itself.
decl: DeclIndex,
/// The index of the `Tag.TypeUnion` payload. Ignored by `get`,
/// populated by `indexToKey`.
extra_index: u32,
namespace: NamespaceIndex,
flags: Tag.TypeUnion.Flags,
/// The enum that provides the list of field names and values.
enum_tag_ty: Index,
zir_index: TrackedInst.Index.Optional,
 
/// The returned pointer expires with any addition to the `InternPool`.
pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeUnion.Flags {
const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
}
 
/// The returned pointer expires with any addition to the `InternPool`.
pub fn size(self: @This(), ip: *InternPool) *u32 {
const size_field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?;
return &ip.extra.items[self.extra_index + size_field_index];
}
 
/// The returned pointer expires with any addition to the `InternPool`.
pub fn padding(self: @This(), ip: *InternPool) *u32 {
const padding_field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?;
return &ip.extra.items[self.extra_index + padding_field_index];
}
 
pub fn haveFieldTypes(self: @This(), ip: *const InternPool) bool {
return self.flagsPtr(ip).status.haveFieldTypes();
}
 
pub fn hasTag(self: @This(), ip: *const InternPool) bool {
return self.flagsPtr(ip).runtime_tag.hasTag();
}
 
pub fn getLayout(self: @This(), ip: *const InternPool) std.builtin.Type.ContainerLayout {
return self.flagsPtr(ip).layout;
}
 
pub fn haveLayout(self: @This(), ip: *const InternPool) bool {
return self.flagsPtr(ip).status.haveLayout();
}
 
/// Pointer to an enum type which is used for the tag of the union.
/// This type is created even for untagged unions, even when the memory
/// layout does not store the tag.
/// Whether zig chooses this type or the user specifies it, it is stored here.
/// This will be set to the null type until status is `have_field_types`.
/// This accessor is provided so that the tag type can be mutated, and so that
/// when it is mutated, the mutations are observed.
/// The returned pointer is invalidated when something is added to the `InternPool`.
pub fn tagTypePtr(self: @This(), ip: *const InternPool) *Index {
const tag_ty_field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?;
return @ptrCast(&ip.extra.items[self.extra_index + tag_ty_field_index]);
}
 
pub fn setFieldTypes(self: @This(), ip: *InternPool, types: []const Index) void {
@memcpy((Index.Slice{
.start = @intCast(self.extra_index + @typeInfo(Tag.TypeUnion).Struct.fields.len),
.len = @intCast(types.len),
}).get(ip), types);
}
 
pub fn setFieldAligns(self: @This(), ip: *InternPool, aligns: []const Alignment) void {
if (aligns.len == 0) return;
assert(self.flagsPtr(ip).any_aligned_fields);
@memcpy((Alignment.Slice{
.start = @intCast(
self.extra_index + @typeInfo(Tag.TypeUnion).Struct.fields.len + aligns.len,
),
.len = @intCast(aligns.len),
}).get(ip), aligns);
}
};
 
pub const EnumType = struct {
/// The Decl that corresponds to the enum itself.
decl: DeclIndex,
/// Represents the declarations inside this enum.
namespace: OptionalNamespaceIndex,
/// An integer type which is used for the numerical value of the enum.
/// This field is present regardless of whether the enum has an
/// explicitly provided tag type or auto-numbered.
tag_ty: Index,
/// Set of field names in declaration order.
names: NullTerminatedString.Slice,
/// Maps integer tag value to field index.
/// Entries are in declaration order, same as `fields`.
/// If this is empty, it means the enum tags are auto-numbered.
values: Index.Slice,
tag_mode: TagMode,
/// This is ignored by `get` but will always be provided by `indexToKey`.
names_map: OptionalMapIndex = .none,
/// This is ignored by `get` but will be provided by `indexToKey` when
/// a value map exists.
values_map: OptionalMapIndex = .none,
zir_index: TrackedInst.Index.Optional,
 
pub const TagMode = enum {
/// The integer tag type was auto-numbered by zig.
auto,
/// The integer tag type was provided by the enum declaration, and the enum
/// is exhaustive.
explicit,
/// The integer tag type was provided by the enum declaration, and the enum
/// is non-exhaustive.
nonexhaustive,
};
 
/// Look up field index based on field name.
pub fn nameIndex(self: EnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)];
const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) };
const field_index = map.getIndexAdapted(name, adapter) orelse return null;
return @intCast(field_index);
}
 
/// Look up field index based on tag value.
/// Asserts that `values_map` is not `none`.
/// This function returns `null` when `tag_val` does not have the
/// integer tag type of the enum.
pub fn tagValueIndex(self: EnumType, ip: *const InternPool, tag_val: Index) ?u32 {
assert(tag_val != .none);
// TODO: we should probably decide a single interface for this function, but currently
// it's being called with both tag values and underlying ints. Fix this!
const int_tag_val = switch (ip.indexToKey(tag_val)) {
.enum_tag => |enum_tag| enum_tag.int,
.int => tag_val,
else => unreachable,
};
if (self.values_map.unwrap()) |values_map| {
const map = &ip.maps.items[@intFromEnum(values_map)];
const adapter: Index.Adapter = .{ .indexes = self.values.get(ip) };
const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null;
return @intCast(field_index);
}
// Auto-numbered enum. Convert `int_tag_val` to field index.
const field_index = switch (ip.indexToKey(int_tag_val).int.storage) {
inline .u64, .i64 => |x| std.math.cast(u32, x) orelse return null,
.big_int => |x| x.to(u32) catch return null,
.lazy_align, .lazy_size => unreachable,
};
return if (field_index < self.names.len) field_index else null;
}
};
 
pub const IncompleteEnumType = struct {
/// Same as corresponding `EnumType` field.
decl: DeclIndex,
/// Same as corresponding `EnumType` field.
namespace: OptionalNamespaceIndex,
/// The field names and field values are not known yet, but
/// the number of fields must be known ahead of time.
fields_len: u32,
/// This information is needed so that the size does not change
/// later when populating field values.
has_values: bool,
/// Same as corresponding `EnumType` field.
tag_mode: EnumType.TagMode,
/// This may be updated via `setTagType` later.
tag_ty: Index = .none,
zir_index: TrackedInst.Index.Optional,
 
pub fn toEnumType(self: @This()) EnumType {
return .{
.decl = self.decl,
.namespace = self.namespace,
.tag_ty = self.tag_ty,
.tag_mode = self.tag_mode,
.names = .{ .start = 0, .len = 0 },
.values = .{ .start = 0, .len = 0 },
.zir_index = self.zir_index,
};
}
 
/// Only the decl is used for hashing and equality, so we can construct
/// this minimal key for use with `map`.
pub fn toKey(self: @This()) Key {
return .{ .enum_type = self.toEnumType() };
}
/// This is the hashmap key. To fetch other data associated with the type, see:
/// * `loadStructType`
/// * `loadUnionType`
/// * `loadEnumType`
/// * `loadOpaqueType`
pub const NamespaceType = union(enum) {
/// This type corresponds to an actual source declaration, e.g. `struct { ... }`.
/// It is hashed based on its ZIR instruction index and set of captures.
declared: struct {
/// A `struct_decl`, `union_decl`, `enum_decl`, or `opaque_decl` instruction.
zir_index: TrackedInst.Index,
/// The captured values of this type. These values must be fully resolved per the language spec.
captures: union(enum) {
owned: CaptureValue.Slice,
external: []const CaptureValue,
},
},
/// This type is an automatically-generated enum tag type for a union.
/// It is hashed based on the index of the union type it corresponds to.
generated_tag: struct {
/// The union for which this is a tag type.
union_type: Index,
},
/// This type originates from a reification via `@Type`.
/// It is hased based on its ZIR instruction index and fields, attributes, etc.
/// To avoid making this key overly complex, the type-specific data is hased by Sema.
reified: struct {
/// A `reify` instruction.
zir_index: TrackedInst.Index,
/// A hash of this type's attributes, fields, etc, generated by Sema.
type_hash: u64,
},
/// This type is `@TypeOf(.{})`.
/// TODO: can we change the language spec to not special-case this type?
empty_struct: void,
};
 
pub const FuncType = struct {
@@ -1546,12 +1098,37 @@ pub const Key = union(enum) {
.payload => |y| Hash.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)),
},
 
inline .opaque_type,
.variable => |variable| Hash.hash(seed, asBytes(&variable.decl)),
 
.opaque_type,
.enum_type,
.variable,
.union_type,
.struct_type,
=> |x| Hash.hash(seed, asBytes(&x.decl)),
=> |namespace_type| {
var hasher = Hash.init(seed);
std.hash.autoHash(&hasher, std.meta.activeTag(namespace_type));
switch (namespace_type) {
.declared => |declared| {
std.hash.autoHash(&hasher, declared.zir_index);
const captures = switch (declared.captures) {
.owned => |cvs| cvs.get(ip),
.external => |cvs| cvs,
};
for (captures) |cv| {
std.hash.autoHash(&hasher, cv);
}
},
.generated_tag => |generated_tag| {
std.hash.autoHash(&hasher, generated_tag.union_type);
},
.reified => |reified| {
std.hash.autoHash(&hasher, reified.zir_index);
std.hash.autoHash(&hasher, reified.type_hash);
},
.empty_struct => {},
}
return hasher.final();
},
 
.int => |int| {
var hasher = Hash.init(seed);
@@ -1956,21 +1533,31 @@ pub const Key = union(enum) {
}
},
 
.opaque_type => |a_info| {
const b_info = b.opaque_type;
return a_info.decl == b_info.decl;
},
.enum_type => |a_info| {
const b_info = b.enum_type;
return a_info.decl == b_info.decl;
},
.union_type => |a_info| {
const b_info = b.union_type;
return a_info.decl == b_info.decl;
},
.struct_type => |a_info| {
const b_info = b.struct_type;
return a_info.decl == b_info.decl;
inline .opaque_type, .enum_type, .union_type, .struct_type => |a_info, a_tag_ct| {
const b_info = @field(b, @tagName(a_tag_ct));
if (std.meta.activeTag(a_info) != b_info) return false;
switch (a_info) {
.declared => |a_d| {
const b_d = b_info.declared;
if (a_d.zir_index != b_d.zir_index) return false;
const a_captures = switch (a_d.captures) {
.owned => |s| s.get(ip),
.external => |cvs| cvs,
};
const b_captures = switch (b_d.captures) {
.owned => |s| s.get(ip),
.external => |cvs| cvs,
};
return std.mem.eql(u32, @ptrCast(a_captures), @ptrCast(b_captures));
},
.generated_tag => |a_gt| return a_gt.union_type == b_info.generated_tag.union_type,
.reified => |a_r| {
const b_r = b_info.reified;
return a_r.zir_index == b_r.zir_index and
a_r.type_hash == b_r.type_hash;
},
.empty_struct => return true,
}
},
.aggregate => |a_info| {
const b_info = b.aggregate;
@@ -2112,21 +1699,15 @@ pub const RequiresComptime = enum(u2) { no, yes, unknown, wip };
// Unlike `Tag.TypeUnion` which is an encoding, and `Key.UnionType` which is a
// minimal hashmap key, this type is a convenience type that contains info
// needed by semantic analysis.
pub const UnionType = struct {
pub const LoadedUnionType = struct {
/// The index of the `Tag.TypeUnion` payload.
extra_index: u32,
/// The Decl that corresponds to the union itself.
decl: DeclIndex,
/// Represents the declarations inside this union.
namespace: NamespaceIndex,
namespace: OptionalNamespaceIndex,
/// The enum tag type.
enum_tag_ty: Index,
/// The integer tag type of the enum.
int_tag_ty: Index,
/// ABI size of the union, including padding
size: u64,
/// Trailing padding bytes
padding: u32,
/// List of field names in declaration order.
field_names: NullTerminatedString.Slice,
/// List of field types in declaration order.
/// These are `none` until `status` is `have_field_types` or `have_layout`.
field_types: Index.Slice,
@@ -2134,12 +1715,9 @@ pub const UnionType = struct {
/// `none` means the ABI alignment of the type.
/// If this slice has length 0 it means all elements are `none`.
field_aligns: Alignment.Slice,
/// Index of the union_decl ZIR instruction.
zir_index: TrackedInst.Index.Optional,
/// Index into extra array of the `flags` field.
flags_index: u32,
/// Copied from `enum_tag_ty`.
names_map: OptionalMapIndex,
/// Index of the union_decl or reify ZIR instruction.
zir_index: TrackedInst.Index,
captures: CaptureValue.Slice,
 
pub const RuntimeTag = enum(u2) {
none,
@@ -2194,78 +1772,783 @@ pub const UnionType = struct {
}
};
 
pub fn loadTagType(self: LoadedUnionType, ip: *InternPool) LoadedEnumType {
return ip.loadEnumType(self.enum_tag_ty);
}
 
/// Pointer to an enum type which is used for the tag of the union.
/// This type is created even for untagged unions, even when the memory
/// layout does not store the tag.
/// Whether zig chooses this type or the user specifies it, it is stored here.
/// This will be set to the null type until status is `have_field_types`.
/// This accessor is provided so that the tag type can be mutated, and so that
/// when it is mutated, the mutations are observed.
/// The returned pointer expires with any addition to the `InternPool`.
pub fn flagsPtr(self: UnionType, ip: *const InternPool) *Tag.TypeUnion.Flags {
return @ptrCast(&ip.extra.items[self.flags_index]);
pub fn tagTypePtr(self: LoadedUnionType, ip: *const InternPool) *Index {
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?;
return @ptrCast(&ip.extra.items[self.extra_index + field_index]);
}
 
/// Look up field index based on field name.
pub fn nameIndex(self: UnionType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)];
const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
const field_index = map.getIndexAdapted(name, adapter) orelse return null;
return @intCast(field_index);
/// The returned pointer expires with any addition to the `InternPool`.
pub fn flagsPtr(self: LoadedUnionType, ip: *const InternPool) *Tag.TypeUnion.Flags {
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + field_index]);
}
 
pub fn hasTag(self: UnionType, ip: *const InternPool) bool {
/// The returned pointer expires with any addition to the `InternPool`.
pub fn size(self: LoadedUnionType, ip: *const InternPool) *u32 {
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?;
return &ip.extra.items[self.extra_index + field_index];
}
 
/// The returned pointer expires with any addition to the `InternPool`.
pub fn padding(self: LoadedUnionType, ip: *const InternPool) *u32 {
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?;
return &ip.extra.items[self.extra_index + field_index];
}
 
pub fn hasTag(self: LoadedUnionType, ip: *const InternPool) bool {
return self.flagsPtr(ip).runtime_tag.hasTag();
}
 
pub fn haveFieldTypes(self: UnionType, ip: *const InternPool) bool {
pub fn haveFieldTypes(self: LoadedUnionType, ip: *const InternPool) bool {
return self.flagsPtr(ip).status.haveFieldTypes();
}
 
pub fn haveLayout(self: UnionType, ip: *const InternPool) bool {
pub fn haveLayout(self: LoadedUnionType, ip: *const InternPool) bool {
return self.flagsPtr(ip).status.haveLayout();
}
 
pub fn getLayout(self: UnionType, ip: *const InternPool) std.builtin.Type.ContainerLayout {
pub fn getLayout(self: LoadedUnionType, ip: *const InternPool) std.builtin.Type.ContainerLayout {
return self.flagsPtr(ip).layout;
}
 
pub fn fieldAlign(self: UnionType, ip: *const InternPool, field_index: u32) Alignment {
pub fn fieldAlign(self: LoadedUnionType, ip: *const InternPool, field_index: u32) Alignment {
if (self.field_aligns.len == 0) return .none;
return self.field_aligns.get(ip)[field_index];
}
 
/// This does not mutate the field of UnionType.
pub fn setZirIndex(self: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
/// This does not mutate the field of LoadedUnionType.
pub fn setZirIndex(self: LoadedUnionType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
const zir_index_field_index = std.meta.fieldIndex(Tag.TypeUnion, "zir_index").?;
const ptr: *TrackedInst.Index.Optional =
@ptrCast(&ip.extra.items[self.flags_index - flags_field_index + zir_index_field_index]);
ptr.* = new_zir_index;
}
 
pub fn setFieldTypes(self: LoadedUnionType, ip: *const InternPool, types: []const Index) void {
@memcpy(self.field_types.get(ip), types);
}
 
pub fn setFieldAligns(self: LoadedUnionType, ip: *const InternPool, aligns: []const Alignment) void {
if (aligns.len == 0) return;
assert(self.flagsPtr(ip).any_aligned_fields);
@memcpy(self.field_aligns.get(ip), aligns);
}
};
 
/// Fetch all the interesting fields of a union type into a convenient data
/// structure.
/// This asserts that the union's enum tag type has been resolved.
pub fn loadUnionType(ip: *InternPool, key: Key.UnionType) UnionType {
const type_union = ip.extraDataTrail(Tag.TypeUnion, key.extra_index);
const enum_ty = type_union.data.tag_ty;
const enum_info = ip.indexToKey(enum_ty).enum_type;
const fields_len: u32 = @intCast(enum_info.names.len);
pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType {
const data = ip.items.items(.data)[@intFromEnum(index)];
const type_union = ip.extraDataTrail(Tag.TypeUnion, data);
const fields_len = type_union.data.fields_len;
 
var extra_index = type_union.end;
const captures_len = if (type_union.data.flags.any_captures) c: {
const len = ip.extra.items[extra_index];
extra_index += 1;
break :c len;
} else 0;
 
const captures: CaptureValue.Slice = .{
.start = extra_index,
.len = captures_len,
};
extra_index += captures_len;
if (type_union.data.flags.is_reified) {
extra_index += 2; // PackedU64
}
 
const field_types: Index.Slice = .{
.start = extra_index,
.len = fields_len,
};
extra_index += fields_len;
 
const field_aligns: Alignment.Slice = if (type_union.data.flags.any_aligned_fields) a: {
const a: Alignment.Slice = .{
.start = extra_index,
.len = fields_len,
};
extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable;
break :a a;
} else .{ .start = 0, .len = 0 };
 
return .{
.extra_index = data,
.decl = type_union.data.decl,
.namespace = type_union.data.namespace,
.enum_tag_ty = enum_ty,
.int_tag_ty = enum_info.tag_ty,
.size = type_union.data.size,
.padding = type_union.data.padding,
.field_names = enum_info.names,
.names_map = enum_info.names_map,
.field_types = .{
.start = type_union.end,
.len = fields_len,
},
.field_aligns = .{
.start = type_union.end + fields_len,
.len = if (type_union.data.flags.any_aligned_fields) fields_len else 0,
},
.enum_tag_ty = type_union.data.tag_ty,
.field_types = field_types,
.field_aligns = field_aligns,
.zir_index = type_union.data.zir_index,
.flags_index = key.extra_index + std.meta.fieldIndex(Tag.TypeUnion, "flags").?,
.captures = captures,
};
}
 
pub const LoadedStructType = struct {
/// The index of the `Tag.TypeStruct` or `Tag.TypeStructPacked` payload.
extra_index: u32,
/// The struct's owner Decl. `none` when the struct is `@TypeOf(.{})`.
decl: OptionalDeclIndex,
/// `none` when the struct has no declarations.
namespace: OptionalNamespaceIndex,
/// Index of the `struct_decl` or `reify` ZIR instruction.
/// Only `none` when the struct is `@TypeOf(.{})`.
zir_index: TrackedInst.Index.Optional,
layout: std.builtin.Type.ContainerLayout,
field_names: NullTerminatedString.Slice,
field_types: Index.Slice,
field_inits: Index.Slice,
field_aligns: Alignment.Slice,
runtime_order: RuntimeOrder.Slice,
comptime_bits: ComptimeBits,
offsets: Offsets,
names_map: OptionalMapIndex,
captures: CaptureValue.Slice,
 
pub const ComptimeBits = struct {
start: u32,
/// This is the number of u32 elements, not the number of struct fields.
len: u32,
 
pub fn get(this: ComptimeBits, ip: *const InternPool) []u32 {
return ip.extra.items[this.start..][0..this.len];
}
 
pub fn getBit(this: ComptimeBits, ip: *const InternPool, i: usize) bool {
if (this.len == 0) return false;
return @as(u1, @truncate(this.get(ip)[i / 32] >> @intCast(i % 32))) != 0;
}
 
pub fn setBit(this: ComptimeBits, ip: *const InternPool, i: usize) void {
this.get(ip)[i / 32] |= @as(u32, 1) << @intCast(i % 32);
}
 
pub fn clearBit(this: ComptimeBits, ip: *const InternPool, i: usize) void {
this.get(ip)[i / 32] &= ~(@as(u32, 1) << @intCast(i % 32));
}
};
 
pub const Offsets = struct {
start: u32,
len: u32,
 
pub fn get(this: Offsets, ip: *const InternPool) []u32 {
return @ptrCast(ip.extra.items[this.start..][0..this.len]);
}
};
 
pub const RuntimeOrder = enum(u32) {
/// Placeholder until layout is resolved.
unresolved = std.math.maxInt(u32) - 0,
/// Field not present at runtime
omitted = std.math.maxInt(u32) - 1,
_,
 
pub const Slice = struct {
start: u32,
len: u32,
 
pub fn get(slice: RuntimeOrder.Slice, ip: *const InternPool) []RuntimeOrder {
return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
}
};
 
pub fn toInt(i: RuntimeOrder) ?u32 {
return switch (i) {
.omitted => null,
.unresolved => unreachable,
else => @intFromEnum(i),
};
}
};
 
/// Look up field index based on field name.
pub fn nameIndex(self: LoadedStructType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
const names_map = self.names_map.unwrap() orelse {
const i = name.toUnsigned(ip) orelse return null;
if (i >= self.field_types.len) return null;
return i;
};
const map = &ip.maps.items[@intFromEnum(names_map)];
const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
const field_index = map.getIndexAdapted(name, adapter) orelse return null;
return @intCast(field_index);
}
 
/// Returns the already-existing field with the same name, if any.
pub fn addFieldName(
self: @This(),
ip: *InternPool,
name: NullTerminatedString,
) ?u32 {
return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name);
}
 
pub fn fieldAlign(s: @This(), ip: *const InternPool, i: usize) Alignment {
if (s.field_aligns.len == 0) return .none;
return s.field_aligns.get(ip)[i];
}
 
pub fn fieldInit(s: @This(), ip: *const InternPool, i: usize) Index {
if (s.field_inits.len == 0) return .none;
assert(s.haveFieldInits(ip));
return s.field_inits.get(ip)[i];
}
 
/// Returns `none` in the case the struct is a tuple.
pub fn fieldName(s: @This(), ip: *const InternPool, i: usize) OptionalNullTerminatedString {
if (s.field_names.len == 0) return .none;
return s.field_names.get(ip)[i].toOptional();
}
 
pub fn fieldIsComptime(s: @This(), ip: *const InternPool, i: usize) bool {
return s.comptime_bits.getBit(ip, i);
}
 
pub fn setFieldComptime(s: @This(), ip: *InternPool, i: usize) void {
s.comptime_bits.setBit(ip, i);
}
 
/// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
/// complicated logic.
pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
return switch (s.layout) {
.Packed => false,
.Auto, .Extern => s.flagsPtr(ip).known_non_opv,
};
}
 
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStruct.Flags {
assert(self.layout != .Packed);
const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
}
 
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts that the struct is packed.
pub fn packedFlagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStructPacked.Flags {
assert(self.layout == .Packed);
const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
}
 
pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) {
flags_ptr.assumed_runtime_bits = true;
return true;
}
return false;
}
 
pub fn setTypesWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) return true;
flags_ptr.field_types_wip = true;
return false;
}
 
pub fn clearTypesWip(s: @This(), ip: *InternPool) void {
if (s.layout == .Packed) return;
s.flagsPtr(ip).field_types_wip = false;
}
 
pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.layout_wip) return true;
flags_ptr.layout_wip = true;
return false;
}
 
pub fn clearLayoutWip(s: @This(), ip: *InternPool) void {
if (s.layout == .Packed) return;
s.flagsPtr(ip).layout_wip = false;
}
 
pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.alignment_wip) return true;
flags_ptr.alignment_wip = true;
return false;
}
 
pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void {
if (s.layout == .Packed) return;
s.flagsPtr(ip).alignment_wip = false;
}
 
pub fn setInitsWip(s: @This(), ip: *InternPool) bool {
switch (s.layout) {
.Packed => {
const flag = &s.packedFlagsPtr(ip).field_inits_wip;
if (flag.*) return true;
flag.* = true;
return false;
},
.Auto, .Extern => {
const flag = &s.flagsPtr(ip).field_inits_wip;
if (flag.*) return true;
flag.* = true;
return false;
},
}
}
 
pub fn clearInitsWip(s: @This(), ip: *InternPool) void {
switch (s.layout) {
.Packed => s.packedFlagsPtr(ip).field_inits_wip = false,
.Auto, .Extern => s.flagsPtr(ip).field_inits_wip = false,
}
}
 
pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return true;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.fully_resolved) return true;
flags_ptr.fully_resolved = true;
return false;
}
 
pub fn clearFullyResolved(s: @This(), ip: *InternPool) void {
s.flagsPtr(ip).fully_resolved = false;
}
 
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn size(self: @This(), ip: *InternPool) *u32 {
assert(self.layout != .Packed);
const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
}
 
/// The backing integer type of the packed struct. Whether zig chooses
/// this type or the user specifies it, it is stored here. This will be
/// set to `none` until the layout is resolved.
/// Asserts the struct is packed.
pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
assert(s.layout == .Packed);
const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
}
 
/// Asserts the struct is not packed.
pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
assert(s.layout != .Packed);
const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
}
 
pub fn haveFieldTypes(s: @This(), ip: *const InternPool) bool {
const types = s.field_types.get(ip);
return types.len == 0 or types[0] != .none;
}
 
pub fn haveFieldInits(s: @This(), ip: *const InternPool) bool {
return switch (s.layout) {
.Packed => s.packedFlagsPtr(ip).inits_resolved,
.Auto, .Extern => s.flagsPtr(ip).inits_resolved,
};
}
 
pub fn setHaveFieldInits(s: @This(), ip: *InternPool) void {
switch (s.layout) {
.Packed => s.packedFlagsPtr(ip).inits_resolved = true,
.Auto, .Extern => s.flagsPtr(ip).inits_resolved = true,
}
}
 
pub fn haveLayout(s: @This(), ip: *InternPool) bool {
return switch (s.layout) {
.Packed => s.backingIntType(ip).* != .none,
.Auto, .Extern => s.flagsPtr(ip).layout_resolved,
};
}
 
pub fn isTuple(s: @This(), ip: *InternPool) bool {
return s.layout != .Packed and s.flagsPtr(ip).is_tuple;
}
 
pub fn hasReorderedFields(s: @This()) bool {
return s.layout == .Auto;
}
 
pub const RuntimeOrderIterator = struct {
ip: *InternPool,
field_index: u32,
struct_type: InternPool.LoadedStructType,
 
pub fn next(it: *@This()) ?u32 {
var i = it.field_index;
 
if (i >= it.struct_type.field_types.len)
return null;
 
if (it.struct_type.hasReorderedFields()) {
it.field_index += 1;
return it.struct_type.runtime_order.get(it.ip)[i].toInt();
}
 
while (it.struct_type.fieldIsComptime(it.ip, i)) {
i += 1;
if (i >= it.struct_type.field_types.len)
return null;
}
 
it.field_index = i + 1;
return i;
}
};
 
/// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
/// May or may not include zero-bit fields.
/// Asserts the struct is not packed.
pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
assert(s.layout != .Packed);
return .{
.ip = ip,
.field_index = 0,
.struct_type = s,
};
}
};
 
pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
const item = ip.items.get(@intFromEnum(index));
switch (item.tag) {
.type_struct => {
if (item.data == 0) return .{
.extra_index = 0,
.decl = .none,
.namespace = .none,
.zir_index = .none,
.layout = .Auto,
.field_names = .{ .start = 0, .len = 0 },
.field_types = .{ .start = 0, .len = 0 },
.field_inits = .{ .start = 0, .len = 0 },
.field_aligns = .{ .start = 0, .len = 0 },
.runtime_order = .{ .start = 0, .len = 0 },
.comptime_bits = .{ .start = 0, .len = 0 },
.offsets = .{ .start = 0, .len = 0 },
.names_map = .none,
.captures = .{ .start = 0, .len = 0 },
};
const extra = ip.extraDataTrail(Tag.TypeStruct, item.data);
const fields_len = extra.data.fields_len;
var extra_index = extra.end;
const captures_len = if (extra.data.flags.any_captures) c: {
const len = ip.extra.items[extra_index];
extra_index += 1;
break :c len;
} else 0;
const captures: CaptureValue.Slice = .{
.start = extra_index,
.len = captures_len,
};
extra_index += captures_len;
if (extra.data.flags.is_reified) {
extra_index += 2; // PackedU64
}
const field_types: Index.Slice = .{
.start = extra_index,
.len = fields_len,
};
extra_index += fields_len;
const names_map: OptionalMapIndex, const names: NullTerminatedString.Slice = if (!extra.data.flags.is_tuple) n: {
const names_map: OptionalMapIndex = @enumFromInt(ip.extra.items[extra_index]);
extra_index += 1;
const names: NullTerminatedString.Slice = .{ .start = extra_index, .len = fields_len };
extra_index += fields_len;
break :n .{ names_map, names };
} else .{ .none, .{ .start = 0, .len = 0 } };
const inits: Index.Slice = if (extra.data.flags.any_default_inits) i: {
const inits: Index.Slice = .{ .start = extra_index, .len = fields_len };
extra_index += fields_len;
break :i inits;
} else .{ .start = 0, .len = 0 };
const namespace: OptionalNamespaceIndex = if (extra.data.flags.has_namespace) n: {
const n: NamespaceIndex = @enumFromInt(ip.extra.items[extra_index]);
extra_index += 1;
break :n n.toOptional();
} else .none;
const aligns: Alignment.Slice = if (extra.data.flags.any_aligned_fields) a: {
const a: Alignment.Slice = .{ .start = extra_index, .len = fields_len };
extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable;
break :a a;
} else .{ .start = 0, .len = 0 };
const comptime_bits: LoadedStructType.ComptimeBits = if (extra.data.flags.any_comptime_fields) c: {
const len = std.math.divCeil(u32, fields_len, 32) catch unreachable;
const c: LoadedStructType.ComptimeBits = .{ .start = extra_index, .len = len };
extra_index += len;
break :c c;
} else .{ .start = 0, .len = 0 };
const runtime_order: LoadedStructType.RuntimeOrder.Slice = if (!extra.data.flags.is_extern) ro: {
const ro: LoadedStructType.RuntimeOrder.Slice = .{ .start = extra_index, .len = fields_len };
extra_index += fields_len;
break :ro ro;
} else .{ .start = 0, .len = 0 };
const offsets: LoadedStructType.Offsets = o: {
const o: LoadedStructType.Offsets = .{ .start = extra_index, .len = fields_len };
extra_index += fields_len;
break :o o;
};
return .{
.extra_index = item.data,
.decl = extra.data.decl.toOptional(),
.namespace = namespace,
.zir_index = extra.data.zir_index.toOptional(),
.layout = if (extra.data.flags.is_extern) .Extern else .Auto,
.field_names = names,
.field_types = field_types,
.field_inits = inits,
.field_aligns = aligns,
.runtime_order = runtime_order,
.comptime_bits = comptime_bits,
.offsets = offsets,
.names_map = names_map,
.captures = captures,
};
},
.type_struct_packed, .type_struct_packed_inits => {
const extra = ip.extraDataTrail(Tag.TypeStructPacked, item.data);
const has_inits = item.tag == .type_struct_packed_inits;
const fields_len = extra.data.fields_len;
var extra_index = extra.end;
const captures_len = if (extra.data.flags.any_captures) c: {
const len = ip.extra.items[extra_index];
extra_index += 1;
break :c len;
} else 0;
const captures: CaptureValue.Slice = .{
.start = extra_index,
.len = captures_len,
};
extra_index += captures_len;
if (extra.data.flags.is_reified) {
extra_index += 2; // PackedU64
}
const field_types: Index.Slice = .{
.start = extra_index,
.len = fields_len,
};
extra_index += fields_len;
const field_names: NullTerminatedString.Slice = .{
.start = extra_index,
.len = fields_len,
};
extra_index += fields_len;
const field_inits: Index.Slice = if (has_inits) inits: {
const i: Index.Slice = .{
.start = extra_index,
.len = fields_len,
};
extra_index += fields_len;
break :inits i;
} else .{ .start = 0, .len = 0 };
return .{
.extra_index = item.data,
.decl = extra.data.decl.toOptional(),
.namespace = extra.data.namespace,
.zir_index = extra.data.zir_index.toOptional(),
.layout = .Packed,
.field_names = field_names,
.field_types = field_types,
.field_inits = field_inits,
.field_aligns = .{ .start = 0, .len = 0 },
.runtime_order = .{ .start = 0, .len = 0 },
.comptime_bits = .{ .start = 0, .len = 0 },
.offsets = .{ .start = 0, .len = 0 },
.names_map = extra.data.names_map.toOptional(),
.captures = captures,
};
},
else => unreachable,
}
}
 
const LoadedEnumType = struct {
/// The Decl that corresponds to the enum itself.
decl: DeclIndex,
/// Represents the declarations inside this enum.
namespace: OptionalNamespaceIndex,
/// An integer type which is used for the numerical value of the enum.
/// This field is present regardless of whether the enum has an
/// explicitly provided tag type or auto-numbered.
tag_ty: Index,
/// Set of field names in declaration order.
names: NullTerminatedString.Slice,
/// Maps integer tag value to field index.
/// Entries are in declaration order, same as `fields`.
/// If this is empty, it means the enum tags are auto-numbered.
values: Index.Slice,
tag_mode: TagMode,
names_map: MapIndex,
/// This is guaranteed to not be `.none` if explicit values are provided.
values_map: OptionalMapIndex,
/// This is `none` only if this is a generated tag type.
zir_index: TrackedInst.Index.Optional,
captures: CaptureValue.Slice,
 
pub const TagMode = enum {
/// The integer tag type was auto-numbered by zig.
auto,
/// The integer tag type was provided by the enum declaration, and the enum
/// is exhaustive.
explicit,
/// The integer tag type was provided by the enum declaration, and the enum
/// is non-exhaustive.
nonexhaustive,
};
 
/// Look up field index based on field name.
pub fn nameIndex(self: LoadedEnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
const map = &ip.maps.items[@intFromEnum(self.names_map)];
const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) };
const field_index = map.getIndexAdapted(name, adapter) orelse return null;
return @intCast(field_index);
}
 
/// Look up field index based on tag value.
/// Asserts that `values_map` is not `none`.
/// This function returns `null` when `tag_val` does not have the
/// integer tag type of the enum.
pub fn tagValueIndex(self: LoadedEnumType, ip: *const InternPool, tag_val: Index) ?u32 {
assert(tag_val != .none);
// TODO: we should probably decide a single interface for this function, but currently
// it's being called with both tag values and underlying ints. Fix this!
const int_tag_val = switch (ip.indexToKey(tag_val)) {
.enum_tag => |enum_tag| enum_tag.int,
.int => tag_val,
else => unreachable,
};
if (self.values_map.unwrap()) |values_map| {
const map = &ip.maps.items[@intFromEnum(values_map)];
const adapter: Index.Adapter = .{ .indexes = self.values.get(ip) };
const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null;
return @intCast(field_index);
}
// Auto-numbered enum. Convert `int_tag_val` to field index.
const field_index = switch (ip.indexToKey(int_tag_val).int.storage) {
inline .u64, .i64 => |x| std.math.cast(u32, x) orelse return null,
.big_int => |x| x.to(u32) catch return null,
.lazy_align, .lazy_size => unreachable,
};
return if (field_index < self.names.len) field_index else null;
}
};
 
pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType {
const item = ip.items.get(@intFromEnum(index));
const tag_mode: LoadedEnumType.TagMode = switch (item.tag) {
.type_enum_auto => {
const extra = ip.extraDataTrail(EnumAuto, item.data);
var extra_index: u32 = @intCast(extra.end);
if (extra.data.zir_index == .none) {
extra_index += 1; // owner_union
}
const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) c: {
extra_index += 2; // type_hash: PackedU64
break :c 0;
} else extra.data.captures_len;
return .{
.decl = extra.data.decl,
.namespace = extra.data.namespace,
.tag_ty = extra.data.int_tag_type,
.names = .{
.start = extra_index + captures_len,
.len = extra.data.fields_len,
},
.values = .{ .start = 0, .len = 0 },
.tag_mode = .auto,
.names_map = extra.data.names_map,
.values_map = .none,
.zir_index = extra.data.zir_index,
.captures = .{
.start = extra_index,
.len = captures_len,
},
};
},
.type_enum_explicit => .explicit,
.type_enum_nonexhaustive => .nonexhaustive,
else => unreachable,
};
const extra = ip.extraDataTrail(EnumExplicit, item.data);
var extra_index: u32 = @intCast(extra.end);
if (extra.data.zir_index == .none) {
extra_index += 1; // owner_union
}
const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) c: {
extra_index += 2; // type_hash: PackedU64
break :c 0;
} else extra.data.captures_len;
return .{
.decl = extra.data.decl,
.namespace = extra.data.namespace,
.tag_ty = extra.data.int_tag_type,
.names = .{
.start = extra_index + captures_len,
.len = extra.data.fields_len,
},
.values = .{
.start = extra_index + captures_len + extra.data.fields_len,
.len = if (extra.data.values_map != .none) extra.data.fields_len else 0,
},
.tag_mode = tag_mode,
.names_map = extra.data.names_map,
.values_map = extra.data.values_map,
.zir_index = extra.data.zir_index,
.captures = .{
.start = extra_index,
.len = captures_len,
},
};
}
 
/// Note that this type doubles as the payload for `Tag.type_opaque`.
pub const LoadedOpaqueType = struct {
/// The opaque's owner Decl.
decl: DeclIndex,
/// Contains the declarations inside this opaque.
namespace: OptionalNamespaceIndex,
/// Index of the `opaque_decl` or `reify` instruction.
zir_index: TrackedInst.Index,
captures: CaptureValue.Slice,
};
 
pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType {
assert(ip.items.items(.tag)[@intFromEnum(index)] == .type_opaque);
const extra_index = ip.items.items(.data)[@intFromEnum(index)];
const extra = ip.extraDataTrail(Tag.TypeOpaque, extra_index);
const captures_len = if (extra.data.captures_len == std.math.maxInt(u32))
0
else
extra.data.captures_len;
return .{
.decl = extra.data.decl,
.namespace = extra.data.namespace,
.zir_index = extra.data.zir_index,
.captures = .{
.start = extra.end,
.len = captures_len,
},
};
}
 
@@ -2457,6 +2740,7 @@ pub const Index = enum(u32) {
},
};
 
removed: void,
type_int_signed: struct { data: u32 },
type_int_unsigned: struct { data: u32 },
type_array_big: struct { data: *Array },
@@ -2484,9 +2768,8 @@ pub const Index = enum(u32) {
type_enum_explicit: DataIsExtraIndexOfEnumExplicit,
type_enum_nonexhaustive: DataIsExtraIndexOfEnumExplicit,
simple_type: struct { data: SimpleType },
type_opaque: struct { data: *Key.OpaqueType },
type_opaque: struct { data: *Tag.TypeOpaque },
type_struct: struct { data: *Tag.TypeStruct },
type_struct_ns: struct { data: NamespaceIndex },
type_struct_anon: DataIsExtraIndexOfTypeStructAnon,
type_struct_packed: struct { data: *Tag.TypeStructPacked },
type_struct_packed_inits: struct { data: *Tag.TypeStructPacked },
@@ -2865,6 +3148,12 @@ comptime {
}
 
pub const Tag = enum(u8) {
/// This special tag represents a value which was removed from this pool via
/// `InternPool.remove`. The item remains allocated to preserve indices, but
/// lookups will consider it not equal to any other item, and all queries
/// assert not this tag. `data` is unused.
removed,
 
/// An integer type.
/// data is number of bits
type_int_signed,
@@ -2920,15 +3209,12 @@ pub const Tag = enum(u8) {
/// data is SimpleType enum value.
simple_type,
/// An opaque type.
/// data is index of Key.OpaqueType in extra.
/// data is index of Tag.TypeOpaque in extra.
type_opaque,
/// A non-packed struct type.
/// data is 0 or extra index of `TypeStruct`.
/// data == 0 represents `@TypeOf(.{})`.
type_struct,
/// A non-packed struct type that has only a namespace; no fields.
/// data is NamespaceIndex.
type_struct_ns,
/// An AnonStructType which stores types, names, and values for fields.
/// data is extra index of `TypeStructAnon`.
type_struct_anon,
@@ -3126,7 +3412,6 @@ pub const Tag = enum(u8) {
memoized_call,
 
const ErrorUnionType = Key.ErrorUnionType;
const OpaqueType = Key.OpaqueType;
const TypeValue = Key.TypeValue;
const Error = Key.Error;
const EnumTag = Key.EnumTag;
@@ -3136,6 +3421,7 @@ pub const Tag = enum(u8) {
 
fn Payload(comptime tag: Tag) type {
return switch (tag) {
.removed => unreachable,
.type_int_signed => unreachable,
.type_int_unsigned => unreachable,
.type_array_big => Array,
@@ -3153,9 +3439,8 @@ pub const Tag = enum(u8) {
.type_enum_explicit => EnumExplicit,
.type_enum_nonexhaustive => EnumExplicit,
.simple_type => unreachable,
.type_opaque => OpaqueType,
.type_opaque => TypeOpaque,
.type_struct => TypeStruct,
.type_struct_ns => unreachable,
.type_struct_anon => TypeStructAnon,
.type_struct_packed, .type_struct_packed_inits => TypeStructPacked,
.type_tuple_anon => TypeStructAnon,
@@ -3311,43 +3596,54 @@ pub const Tag = enum(u8) {
};
};
 
/// The number of fields is provided by the `tag_ty` field.
/// Trailing:
/// 0. field type: Index for each field; declaration order
/// 1. field align: Alignment for each field; declaration order
/// 0. captures_len: u32 // if `any_captures`
/// 1. capture: CaptureValue // for each `captures_len`
/// 2. type_hash: PackedU64 // if `is_reified`
/// 3. field type: Index for each field; declaration order
/// 4. field align: Alignment for each field; declaration order
pub const TypeUnion = struct {
flags: Flags,
/// This could be provided through the tag type, but it is more convenient
/// to store it directly. This is also necessary for `dumpStatsFallible` to
/// work on unresolved types.
fields_len: u32,
/// Only valid after .have_layout
size: u32,
/// Only valid after .have_layout
padding: u32,
decl: DeclIndex,
namespace: NamespaceIndex,
namespace: OptionalNamespaceIndex,
/// The enum that provides the list of field names and values.
tag_ty: Index,
zir_index: TrackedInst.Index.Optional,
zir_index: TrackedInst.Index,
 
pub const Flags = packed struct(u32) {
runtime_tag: UnionType.RuntimeTag,
any_captures: bool,
runtime_tag: LoadedUnionType.RuntimeTag,
/// If false, the field alignment trailing data is omitted.
any_aligned_fields: bool,
layout: std.builtin.Type.ContainerLayout,
status: UnionType.Status,
status: LoadedUnionType.Status,
requires_comptime: RequiresComptime,
assumed_runtime_bits: bool,
assumed_pointer_aligned: bool,
alignment: Alignment,
_: u14 = 0,
is_reified: bool,
_: u12 = 0,
};
};
 
/// Trailing:
/// 0. type: Index for each fields_len
/// 1. name: NullTerminatedString for each fields_len
/// 2. init: Index for each fields_len // if tag is type_struct_packed_inits
/// 0. captures_len: u32 // if `any_captures`
/// 1. capture: CaptureValue // for each `captures_len`
/// 2. type_hash: PackedU64 // if `is_reified`
/// 3. type: Index for each fields_len
/// 4. name: NullTerminatedString for each fields_len
/// 5. init: Index for each fields_len // if tag is type_struct_packed_inits
pub const TypeStructPacked = struct {
decl: DeclIndex,
zir_index: TrackedInst.Index.Optional,
zir_index: TrackedInst.Index,
fields_len: u32,
namespace: OptionalNamespaceIndex,
backing_int_ty: Index,
@@ -3355,10 +3651,12 @@ pub const Tag = enum(u8) {
flags: Flags,
 
pub const Flags = packed struct(u32) {
any_captures: bool,
/// Dependency loop detection when resolving field inits.
field_inits_wip: bool,
inits_resolved: bool,
_: u30 = 0,
is_reified: bool,
_: u28 = 0,
};
};
 
@@ -3377,29 +3675,33 @@ pub const Tag = enum(u8) {
/// than coming up with some other scheme for the data.
///
/// Trailing:
/// 0. type: Index for each field in declared order
/// 1. if not is_tuple:
/// 0. captures_len: u32 // if `any_captures`
/// 1. capture: CaptureValue // for each `captures_len`
/// 2. type_hash: PackedU64 // if `is_reified`
/// 3. type: Index for each field in declared order
/// 4. if not is_tuple:
/// names_map: MapIndex,
/// name: NullTerminatedString // for each field in declared order
/// 2. if any_default_inits:
/// 5. if any_default_inits:
/// init: Index // for each field in declared order
/// 3. if has_namespace:
/// 6. if has_namespace:
/// namespace: NamespaceIndex
/// 4. if any_aligned_fields:
/// 7. if any_aligned_fields:
/// align: Alignment // for each field in declared order
/// 5. if any_comptime_fields:
/// 8. if any_comptime_fields:
/// field_is_comptime_bits: u32 // minimal number of u32s needed, LSB is field 0
/// 6. if not is_extern:
/// 9. if not is_extern:
/// field_index: RuntimeOrder // for each field in runtime order
/// 7. field_offset: u32 // for each field in declared order, undef until layout_resolved
/// 10. field_offset: u32 // for each field in declared order, undef until layout_resolved
pub const TypeStruct = struct {
decl: DeclIndex,
zir_index: TrackedInst.Index.Optional,
zir_index: TrackedInst.Index,
fields_len: u32,
flags: Flags,
size: u32,
 
pub const Flags = packed struct(u32) {
any_captures: bool,
is_extern: bool,
known_non_opv: bool,
requires_comptime: RequiresComptime,
@@ -3428,10 +3730,23 @@ pub const Tag = enum(u8) {
// The types and all its fields have had their layout resolved. Even through pointer,
// which `layout_resolved` does not ensure.
fully_resolved: bool,
 
_: u8 = 0,
is_reified: bool,
_: u6 = 0,
};
};
 
/// Trailing:
/// 0. capture: CaptureValue // for each `captures_len`
pub const TypeOpaque = struct {
/// The opaque's owner Decl.
decl: DeclIndex,
/// Contains the declarations inside this opaque.
namespace: OptionalNamespaceIndex,
/// The index of the `opaque_decl` instruction.
zir_index: TrackedInst.Index,
/// `std.math.maxInt(u32)` indicates this type is reified.
captures_len: u32,
};
};
 
/// State that is mutable during semantic analysis. This data is not used for
@@ -3738,11 +4053,16 @@ pub const Array = struct {
};
 
/// Trailing:
/// 0. field name: NullTerminatedString for each fields_len; declaration order
/// 1. tag value: Index for each fields_len; declaration order
/// 0. owner_union: Index // if `zir_index == .none`
/// 1. capture: CaptureValue // for each `captures_len`
/// 2. type_hash: PackedU64 // if reified (`captures_len == std.math.maxInt(u32)`)
/// 3. field name: NullTerminatedString for each fields_len; declaration order
/// 4. tag value: Index for each fields_len; declaration order
pub const EnumExplicit = struct {
/// The Decl that corresponds to the enum itself.
decl: DeclIndex,
/// `std.math.maxInt(u32)` indicates this type is reified.
captures_len: u32,
/// This may be `none` if there are no declarations.
namespace: OptionalNamespaceIndex,
/// An integer type which is used for the numerical value of the enum, which
@@ -3755,14 +4075,21 @@ pub const EnumExplicit = struct {
/// If this is `none`, it means the trailing tag values are absent because
/// they are auto-numbered.
values_map: OptionalMapIndex,
/// `none` means this is a generated tag type.
/// There will be a trailing union type for which this is a tag.
zir_index: TrackedInst.Index.Optional,
};
 
/// Trailing:
/// 0. field name: NullTerminatedString for each fields_len; declaration order
/// 0. owner_union: Index // if `zir_index == .none`
/// 1. capture: CaptureValue // for each `captures_len`
/// 2. type_hash: PackedU64 // if reified (`captures_len == std.math.maxInt(u32)`)
/// 3. field name: NullTerminatedString for each fields_len; declaration order
pub const EnumAuto = struct {
/// The Decl that corresponds to the enum itself.
decl: DeclIndex,
/// `std.math.maxInt(u32)` indicates this type is reified.
captures_len: u32,
/// This may be `none` if there are no declarations.
namespace: OptionalNamespaceIndex,
/// An integer type which is used for the numerical value of the enum, which
@@ -3771,6 +4098,8 @@ pub const EnumAuto = struct {
fields_len: u32,
/// Maps field names to declaration index.
names_map: MapIndex,
/// `none` means this is a generated tag type.
/// There will be a trailing union type for which this is a tag.
zir_index: TrackedInst.Index.Optional,
};
 
@@ -4011,6 +4340,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
const item = ip.items.get(@intFromEnum(index));
const data = item.data;
return switch (item.tag) {
.removed => unreachable,
.type_int_signed => .{
.int_type = .{
.signedness = .signed,
@@ -4072,68 +4402,124 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.inferred_error_set_type = @enumFromInt(data),
},
 
.type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) },
.type_opaque => .{ .opaque_type = ns: {
const extra = ip.extraDataTrail(Tag.TypeOpaque, data);
if (extra.data.captures_len == std.math.maxInt(u32)) {
break :ns .{ .reified = .{
.zir_index = extra.data.zir_index,
.type_hash = 0,
} };
}
break :ns .{ .declared = .{
.zir_index = extra.data.zir_index,
.captures = .{ .owned = .{
.start = extra.end,
.len = extra.data.captures_len,
} },
} };
} },
 
.type_struct => .{ .struct_type = if (data == 0) .{
.extra_index = 0,
.namespace = .none,
.decl = .none,
.zir_index = undefined,
.layout = .Auto,
.field_names = .{ .start = 0, .len = 0 },
.field_types = .{ .start = 0, .len = 0 },
.field_inits = .{ .start = 0, .len = 0 },
.field_aligns = .{ .start = 0, .len = 0 },
.runtime_order = .{ .start = 0, .len = 0 },
.comptime_bits = .{ .start = 0, .len = 0 },
.offsets = .{ .start = 0, .len = 0 },
.names_map = undefined,
} else extraStructType(ip, data) },
.type_struct => .{ .struct_type = ns: {
if (data == 0) break :ns .empty_struct;
const extra = ip.extraDataTrail(Tag.TypeStruct, data);
if (extra.data.flags.is_reified) {
assert(!extra.data.flags.any_captures);
break :ns .{ .reified = .{
.zir_index = extra.data.zir_index,
.type_hash = ip.extraData(PackedU64, extra.end).get(),
} };
}
break :ns .{ .declared = .{
.zir_index = extra.data.zir_index,
.captures = .{ .owned = if (extra.data.flags.any_captures) .{
.start = extra.end + 1,
.len = ip.extra.items[extra.end],
} else .{ .start = 0, .len = 0 } },
} };
} },
 
.type_struct_ns => .{ .struct_type = .{
.extra_index = 0,
.namespace = @as(NamespaceIndex, @enumFromInt(data)).toOptional(),
.decl = .none,
.zir_index = undefined,
.layout = .Auto,
.field_names = .{ .start = 0, .len = 0 },
.field_types = .{ .start = 0, .len = 0 },
.field_inits = .{ .start = 0, .len = 0 },
.field_aligns = .{ .start = 0, .len = 0 },
.runtime_order = .{ .start = 0, .len = 0 },
.comptime_bits = .{ .start = 0, .len = 0 },
.offsets = .{ .start = 0, .len = 0 },
.names_map = undefined,
.type_struct_packed, .type_struct_packed_inits => .{ .struct_type = ns: {
const extra = ip.extraDataTrail(Tag.TypeStructPacked, data);
if (extra.data.flags.is_reified) {
assert(!extra.data.flags.any_captures);
break :ns .{ .reified = .{
.zir_index = extra.data.zir_index,
.type_hash = ip.extraData(PackedU64, extra.end).get(),
} };
}
break :ns .{ .declared = .{
.zir_index = extra.data.zir_index,
.captures = .{ .owned = if (extra.data.flags.any_captures) .{
.start = extra.end + 1,
.len = ip.extra.items[extra.end],
} else .{ .start = 0, .len = 0 } },
} };
} },
 
.type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(ip, data) },
.type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(ip, data) },
.type_struct_packed => .{ .struct_type = extraPackedStructType(ip, data, false) },
.type_struct_packed_inits => .{ .struct_type = extraPackedStructType(ip, data, true) },
.type_union => .{ .union_type = extraUnionType(ip, data) },
 
.type_enum_auto => {
const enum_auto = ip.extraDataTrail(EnumAuto, data);
return .{ .enum_type = .{
.decl = enum_auto.data.decl,
.namespace = enum_auto.data.namespace,
.tag_ty = enum_auto.data.int_tag_type,
.names = .{
.start = @intCast(enum_auto.end),
.len = enum_auto.data.fields_len,
},
.values = .{
.start = 0,
.len = 0,
},
.tag_mode = .auto,
.names_map = enum_auto.data.names_map.toOptional(),
.values_map = .none,
.zir_index = enum_auto.data.zir_index,
.type_union => .{ .union_type = ns: {
const extra = ip.extraDataTrail(Tag.TypeUnion, data);
if (extra.data.flags.is_reified) {
assert(!extra.data.flags.any_captures);
break :ns .{ .reified = .{
.zir_index = extra.data.zir_index,
.type_hash = ip.extraData(PackedU64, extra.end).get(),
} };
}
break :ns .{ .declared = .{
.zir_index = extra.data.zir_index,
.captures = .{ .owned = if (extra.data.flags.any_captures) .{
.start = extra.end + 1,
.len = ip.extra.items[extra.end],
} else .{ .start = 0, .len = 0 } },
} };
},
.type_enum_explicit => ip.indexToKeyEnum(data, .explicit),
.type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive),
} },
 
.type_enum_auto => .{ .enum_type = ns: {
const extra = ip.extraDataTrail(EnumAuto, data);
const zir_index = extra.data.zir_index.unwrap() orelse {
assert(extra.data.captures_len == 0);
break :ns .{ .generated_tag = .{
.union_type = @enumFromInt(ip.extra.items[extra.end]),
} };
};
if (extra.data.captures_len == std.math.maxInt(u32)) {
break :ns .{ .reified = .{
.zir_index = zir_index,
.type_hash = ip.extraData(PackedU64, extra.end).get(),
} };
}
break :ns .{ .declared = .{
.zir_index = zir_index,
.captures = .{ .owned = .{
.start = extra.end,
.len = extra.data.captures_len,
} },
} };
} },
.type_enum_explicit, .type_enum_nonexhaustive => .{ .enum_type = ns: {
const extra = ip.extraDataTrail(EnumExplicit, data);
const zir_index = extra.data.zir_index.unwrap() orelse {
assert(extra.data.captures_len == 0);
break :ns .{ .generated_tag = .{
.union_type = @enumFromInt(ip.extra.items[extra.end]),
} };
};
if (extra.data.captures_len == std.math.maxInt(u32)) {
break :ns .{ .reified = .{
.zir_index = zir_index,
.type_hash = ip.extraData(PackedU64, extra.end).get(),
} };
}
break :ns .{ .declared = .{
.zir_index = zir_index,
.captures = .{ .owned = .{
.start = extra.end,
.len = extra.data.captures_len,
} },
} };
} },
.type_function => .{ .func_type = ip.extraFuncType(data) },
 
.undef => .{ .undef = @as(Index, @enumFromInt(data)) },
@@ -4366,7 +4752,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.type_array_small,
.type_vector,
.type_struct_ns,
.type_struct_packed,
=> .{ .aggregate = .{
.ty = ty,
@@ -4375,16 +4760,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
 
// There is only one possible value precisely due to the
// fact that this values slice is fully populated!
.type_struct => {
const info = extraStructType(ip, ty_item.data);
return .{ .aggregate = .{
.ty = ty,
.storage = .{ .elems = @ptrCast(info.field_inits.get(ip)) },
} };
},
 
.type_struct_packed_inits => {
const info = extraPackedStructType(ip, ty_item.data, true);
.type_struct, .type_struct_packed_inits => {
const info = loadStructType(ip, ty);
return .{ .aggregate = .{
.ty = ty,
.storage = .{ .elems = @ptrCast(info.field_inits.get(ip)) },
@@ -4476,18 +4853,6 @@ fn extraErrorSet(ip: *const InternPool, extra_index: u32) Key.ErrorSetType {
};
}
 
fn extraUnionType(ip: *const InternPool, extra_index: u32) Key.UnionType {
const type_union = ip.extraData(Tag.TypeUnion, extra_index);
return .{
.decl = type_union.decl,
.namespace = type_union.namespace,
.flags = type_union.flags,
.enum_tag_ty = type_union.tag_ty,
.zir_index = type_union.zir_index,
.extra_index = extra_index,
};
}
 
fn extraTypeStructAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructType {
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, extra_index);
const fields_len = type_struct_anon.data.fields_len;
@@ -4526,109 +4891,6 @@ fn extraTypeTupleAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructTyp
};
}
 
fn extraStructType(ip: *const InternPool, extra_index: u32) Key.StructType {
const s = ip.extraDataTrail(Tag.TypeStruct, extra_index);
const fields_len = s.data.fields_len;
 
var index = s.end;
 
const field_types = t: {
const types: Index.Slice = .{ .start = index, .len = fields_len };
index += fields_len;
break :t types;
};
const names_map, const field_names: NullTerminatedString.Slice = t: {
if (s.data.flags.is_tuple) break :t .{ .none, .{ .start = 0, .len = 0 } };
const names_map: MapIndex = @enumFromInt(ip.extra.items[index]);
index += 1;
const names: NullTerminatedString.Slice = .{ .start = index, .len = fields_len };
index += fields_len;
break :t .{ names_map.toOptional(), names };
};
const field_inits: Index.Slice = t: {
if (!s.data.flags.any_default_inits) break :t .{ .start = 0, .len = 0 };
const inits: Index.Slice = .{ .start = index, .len = fields_len };
index += fields_len;
break :t inits;
};
const namespace = t: {
if (!s.data.flags.has_namespace) break :t .none;
const namespace: NamespaceIndex = @enumFromInt(ip.extra.items[index]);
index += 1;
break :t namespace.toOptional();
};
const field_aligns: Alignment.Slice = t: {
if (!s.data.flags.any_aligned_fields) break :t .{ .start = 0, .len = 0 };
const aligns: Alignment.Slice = .{ .start = index, .len = fields_len };
index += (fields_len + 3) / 4;
break :t aligns;
};
const comptime_bits: Key.StructType.ComptimeBits = t: {
if (!s.data.flags.any_comptime_fields) break :t .{ .start = 0, .len = 0 };
const comptime_bits: Key.StructType.ComptimeBits = .{ .start = index, .len = fields_len };
index += (fields_len + 31) / 32;
break :t comptime_bits;
};
const runtime_order: Key.StructType.RuntimeOrder.Slice = t: {
if (s.data.flags.is_extern) break :t .{ .start = 0, .len = 0 };
const ro: Key.StructType.RuntimeOrder.Slice = .{ .start = index, .len = fields_len };
index += fields_len;
break :t ro;
};
const offsets = t: {
const offsets: Key.StructType.Offsets = .{ .start = index, .len = fields_len };
index += fields_len;
break :t offsets;
};
return .{
.extra_index = extra_index,
.decl = s.data.decl.toOptional(),
.zir_index = s.data.zir_index,
.layout = if (s.data.flags.is_extern) .Extern else .Auto,
.field_types = field_types,
.names_map = names_map,
.field_names = field_names,
.field_inits = field_inits,
.namespace = namespace,
.field_aligns = field_aligns,
.comptime_bits = comptime_bits,
.runtime_order = runtime_order,
.offsets = offsets,
};
}
 
fn extraPackedStructType(ip: *const InternPool, extra_index: u32, inits: bool) Key.StructType {
const type_struct_packed = ip.extraDataTrail(Tag.TypeStructPacked, extra_index);
const fields_len = type_struct_packed.data.fields_len;
return .{
.extra_index = extra_index,
.decl = type_struct_packed.data.decl.toOptional(),
.namespace = type_struct_packed.data.namespace,
.zir_index = type_struct_packed.data.zir_index,
.layout = .Packed,
.field_types = .{
.start = type_struct_packed.end,
.len = fields_len,
},
.field_names = .{
.start = type_struct_packed.end + fields_len,
.len = fields_len,
},
.field_inits = if (inits) .{
.start = type_struct_packed.end + fields_len * 2,
.len = fields_len,
} else .{
.start = 0,
.len = 0,
},
.field_aligns = .{ .start = 0, .len = 0 },
.runtime_order = .{ .start = 0, .len = 0 },
.comptime_bits = .{ .start = 0, .len = 0 },
.offsets = .{ .start = 0, .len = 0 },
.names_map = type_struct_packed.data.names_map.toOptional(),
};
}
 
fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType {
const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index);
var index: usize = type_function.end;
@@ -4720,28 +4982,6 @@ fn extraFuncCoerced(ip: *const InternPool, extra_index: u32) Key.Func {
return func;
}
 
fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key {
const enum_explicit = ip.extraDataTrail(EnumExplicit, data);
const fields_len = enum_explicit.data.fields_len;
return .{ .enum_type = .{
.decl = enum_explicit.data.decl,
.namespace = enum_explicit.data.namespace,
.tag_ty = enum_explicit.data.int_tag_type,
.names = .{
.start = @intCast(enum_explicit.end),
.len = fields_len,
},
.values = .{
.start = @intCast(enum_explicit.end + fields_len),
.len = if (enum_explicit.data.values_map != .none) fields_len else 0,
},
.tag_mode = tag_mode,
.names_map = enum_explicit.data.names_map.toOptional(),
.values_map = enum_explicit.data.values_map,
.zir_index = enum_explicit.data.zir_index,
} };
}
 
fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key {
const int_info = ip.limbData(Int, limb_index);
return .{ .int = .{
@@ -4901,15 +5141,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.struct_type => unreachable, // use getStructType() instead
.anon_struct_type => unreachable, // use getAnonStructType() instead
.union_type => unreachable, // use getUnionType() instead
.opaque_type => unreachable, // use getOpaqueType() instead
 
.opaque_type => |opaque_type| {
ip.items.appendAssumeCapacity(.{
.tag = .type_opaque,
.data = try ip.addExtra(gpa, opaque_type),
});
},
 
.enum_type => unreachable, // use getEnum() or getIncompleteEnum() instead
.enum_type => unreachable, // use getEnumType() instead
.func_type => unreachable, // use getFuncType() instead
.extern_func => unreachable, // use getExternFunc() instead
.func => unreachable, // use getFuncInstance() or getFuncDecl() instead
@@ -5027,14 +5261,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(ptr.addr == .field);
assert(base_index.index < anon_struct_type.types.len);
},
.struct_type => |struct_type| {
.struct_type => {
assert(ptr.addr == .field);
assert(base_index.index < struct_type.field_types.len);
assert(base_index.index < ip.loadStructType(base_ptr_type.child).field_types.len);
},
.union_type => |union_key| {
const union_type = ip.loadUnionType(union_key);
.union_type => {
const union_type = ip.loadUnionType(base_ptr_type.child);
assert(ptr.addr == .field);
assert(base_index.index < union_type.field_names.len);
assert(base_index.index < union_type.field_types.len);
},
.ptr_type => |slice_type| {
assert(ptr.addr == .field);
@@ -5305,7 +5539,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(ip.isEnumType(enum_tag.ty));
switch (ip.indexToKey(enum_tag.ty)) {
.simple_type => assert(ip.isIntegerType(ip.typeOf(enum_tag.int))),
.enum_type => |enum_type| assert(ip.typeOf(enum_tag.int) == enum_type.tag_ty),
.enum_type => assert(ip.typeOf(enum_tag.int) == ip.loadEnumType(enum_tag.ty).tag_ty),
else => unreachable,
}
ip.items.appendAssumeCapacity(.{
@@ -5398,8 +5632,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(ip.typeOf(elem) == child);
}
},
.struct_type => |t| {
for (aggregate.storage.values(), t.field_types.get(ip)) |elem, field_ty| {
.struct_type => {
for (aggregate.storage.values(), ip.loadStructType(aggregate.ty).field_types.get(ip)) |elem, field_ty| {
assert(ip.typeOf(elem) == field_ty);
}
},
@@ -5572,10 +5806,17 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
 
pub const UnionTypeInit = struct {
flags: Tag.TypeUnion.Flags,
decl: DeclIndex,
namespace: NamespaceIndex,
zir_index: TrackedInst.Index.Optional,
flags: packed struct {
runtime_tag: LoadedUnionType.RuntimeTag,
any_aligned_fields: bool,
layout: std.builtin.Type.ContainerLayout,
status: LoadedUnionType.Status,
requires_comptime: RequiresComptime,
assumed_runtime_bits: bool,
assumed_pointer_aligned: bool,
alignment: Alignment,
},
has_namespace: bool,
fields_len: u32,
enum_tag_ty: Index,
/// May have length 0 which leaves the values unset until later.
@@ -5584,27 +5825,84 @@ pub const UnionTypeInit = struct {
/// The logic for `any_aligned_fields` is asserted to have been done before
/// calling this function.
field_aligns: []const Alignment,
key: union(enum) {
declared: struct {
zir_index: TrackedInst.Index,
captures: []const CaptureValue,
},
reified: struct {
zir_index: TrackedInst.Index,
type_hash: u64,
},
},
};
 
pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocator.Error!Index {
const prev_extra_len = ip.extra.items.len;
pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocator.Error!WipNamespaceType.Result {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .union_type = switch (ini.key) {
.declared => |d| .{ .declared = .{
.zir_index = d.zir_index,
.captures = .{ .external = d.captures },
} },
.reified => |r| .{ .reified = .{
.zir_index = r.zir_index,
.type_hash = r.type_hash,
} },
} }, adapter);
if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) };
errdefer _ = ip.map.pop();
 
const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0;
const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4);
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeUnion).Struct.fields.len +
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
.reified => 2, // type_hash: PackedU64
} +
// zig fmt: on
ini.fields_len + // field types
align_elements_len);
try ip.items.ensureUnusedCapacity(gpa, 1);
 
const union_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeUnion{
.flags = ini.flags,
const extra_index = ip.addExtraAssumeCapacity(Tag.TypeUnion{
.flags = .{
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
.runtime_tag = ini.flags.runtime_tag,
.any_aligned_fields = ini.flags.any_aligned_fields,
.layout = ini.flags.layout,
.status = ini.flags.status,
.requires_comptime = ini.flags.requires_comptime,
.assumed_runtime_bits = ini.flags.assumed_runtime_bits,
.assumed_pointer_aligned = ini.flags.assumed_pointer_aligned,
.alignment = ini.flags.alignment,
.is_reified = ini.key == .reified,
},
.fields_len = ini.fields_len,
.size = std.math.maxInt(u32),
.padding = std.math.maxInt(u32),
.decl = ini.decl,
.namespace = ini.namespace,
.decl = undefined, // set by `finish`
.namespace = .none, // set by `finish`
.tag_ty = ini.enum_tag_ty,
.zir_index = ini.zir_index,
.zir_index = switch (ini.key) {
inline else => |x| x.zir_index,
},
});
 
ip.items.appendAssumeCapacity(.{
.tag = .type_union,
.data = extra_index,
});
 
switch (ini.key) {
.declared => |d| if (d.captures.len != 0) {
ip.extra.appendAssumeCapacity(@intCast(d.captures.len));
ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures));
},
.reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)),
}
 
// field types
if (ini.field_types.len > 0) {
assert(ini.field_types.len == ini.fields_len);
@@ -5627,27 +5925,41 @@ pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocat
assert(ini.field_aligns.len == 0);
}
 
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{
.union_type = extraUnionType(ip, union_type_extra_index),
}, adapter);
if (gop.found_existing) {
ip.extra.items.len = prev_extra_len;
return @enumFromInt(gop.index);
}
 
ip.items.appendAssumeCapacity(.{
.tag = .type_union,
.data = union_type_extra_index,
});
return @enumFromInt(ip.items.len - 1);
return .{ .wip = .{
.index = @enumFromInt(ip.items.len - 1),
.decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "decl").?,
.namespace_extra_index = if (ini.has_namespace)
extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").?
else
null,
} };
}
 
pub const WipNamespaceType = struct {
index: Index,
decl_extra_index: u32,
namespace_extra_index: ?u32,
pub fn finish(wip: WipNamespaceType, ip: *InternPool, decl: DeclIndex, namespace: OptionalNamespaceIndex) Index {
ip.extra.items[wip.decl_extra_index] = @intFromEnum(decl);
if (wip.namespace_extra_index) |i| {
ip.extra.items[i] = @intFromEnum(namespace.unwrap().?);
} else {
assert(namespace == .none);
}
return wip.index;
}
pub fn cancel(wip: WipNamespaceType, ip: *InternPool) void {
ip.remove(wip.index);
}
 
pub const Result = union(enum) {
wip: WipNamespaceType,
existing: Index,
};
};
 
pub const StructTypeInit = struct {
decl: DeclIndex,
namespace: OptionalNamespaceIndex,
layout: std.builtin.Type.ContainerLayout,
zir_index: TrackedInst.Index.Optional,
fields_len: u32,
known_non_opv: bool,
requires_comptime: RequiresComptime,
@@ -5656,69 +5968,101 @@ pub const StructTypeInit = struct {
any_default_inits: bool,
inits_resolved: bool,
any_aligned_fields: bool,
has_namespace: bool,
key: union(enum) {
declared: struct {
zir_index: TrackedInst.Index,
captures: []const CaptureValue,
},
reified: struct {
zir_index: TrackedInst.Index,
type_hash: u64,
},
},
};
 
pub fn getStructType(
ip: *InternPool,
gpa: Allocator,
ini: StructTypeInit,
) Allocator.Error!Index {
) Allocator.Error!WipNamespaceType.Result {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const key: Key = .{
.struct_type = .{
// Only the decl matters for hashing and equality purposes.
.decl = ini.decl.toOptional(),
 
.extra_index = undefined,
.namespace = undefined,
.zir_index = undefined,
.layout = undefined,
.field_names = undefined,
.field_types = undefined,
.field_inits = undefined,
.field_aligns = undefined,
.runtime_order = undefined,
.comptime_bits = undefined,
.offsets = undefined,
.names_map = undefined,
},
};
const key: Key = .{ .struct_type = switch (ini.key) {
.declared => |d| .{ .declared = .{
.zir_index = d.zir_index,
.captures = .{ .external = d.captures },
} },
.reified => |r| .{ .reified = .{
.zir_index = r.zir_index,
.type_hash = r.type_hash,
} },
} };
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) };
errdefer _ = ip.map.pop();
 
const names_map = try ip.addMap(gpa, ini.fields_len);
errdefer _ = ip.maps.pop();
 
const zir_index = switch (ini.key) {
inline else => |x| x.zir_index,
};
 
const is_extern = switch (ini.layout) {
.Auto => false,
.Extern => true,
.Packed => {
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStructPacked).Struct.fields.len +
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
.reified => 2, // type_hash: PackedU64
} +
// zig fmt: on
ini.fields_len + // types
ini.fields_len + // names
ini.fields_len); // inits
const extra_index = ip.addExtraAssumeCapacity(Tag.TypeStructPacked{
.decl = undefined, // set by `finish`
.zir_index = zir_index,
.fields_len = ini.fields_len,
.namespace = .none,
.backing_int_ty = .none,
.names_map = names_map,
.flags = .{
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
.field_inits_wip = false,
.inits_resolved = ini.inits_resolved,
.is_reified = ini.key == .reified,
},
});
try ip.items.append(gpa, .{
.tag = if (ini.any_default_inits) .type_struct_packed_inits else .type_struct_packed,
.data = ip.addExtraAssumeCapacity(Tag.TypeStructPacked{
.decl = ini.decl,
.zir_index = ini.zir_index,
.fields_len = ini.fields_len,
.namespace = ini.namespace,
.backing_int_ty = .none,
.names_map = names_map,
.flags = .{
.field_inits_wip = false,
.inits_resolved = ini.inits_resolved,
},
}),
.data = extra_index,
});
switch (ini.key) {
.declared => |d| if (d.captures.len != 0) {
ip.extra.appendAssumeCapacity(@intCast(d.captures.len));
ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures));
},
.reified => |r| {
_ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash));
},
}
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(OptionalNullTerminatedString.none), ini.fields_len);
if (ini.any_default_inits) {
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
}
return @enumFromInt(ip.items.len - 1);
return .{ .wip = .{
.index = @enumFromInt(ip.items.len - 1),
.decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "decl").?,
.namespace_extra_index = if (ini.has_namespace)
extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?
else
null,
} };
},
};
 
@@ -5727,38 +6071,57 @@ pub fn getStructType(
const comptime_elements_len = if (ini.any_comptime_fields) (ini.fields_len + 31) / 32 else 0;
 
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStruct).Struct.fields.len +
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
.reified => 2, // type_hash: PackedU64
} +
// zig fmt: on
(ini.fields_len * 5) + // types, names, inits, runtime order, offsets
align_elements_len + comptime_elements_len +
2); // names_map + namespace
const extra_index = ip.addExtraAssumeCapacity(Tag.TypeStruct{
.decl = undefined, // set by `finish`
.zir_index = zir_index,
.fields_len = ini.fields_len,
.size = std.math.maxInt(u32),
.flags = .{
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
.is_extern = is_extern,
.known_non_opv = ini.known_non_opv,
.requires_comptime = ini.requires_comptime,
.is_tuple = ini.is_tuple,
.assumed_runtime_bits = false,
.assumed_pointer_aligned = false,
.has_namespace = ini.has_namespace,
.any_comptime_fields = ini.any_comptime_fields,
.any_default_inits = ini.any_default_inits,
.any_aligned_fields = ini.any_aligned_fields,
.alignment = .none,
.alignment_wip = false,
.field_types_wip = false,
.layout_wip = false,
.layout_resolved = false,
.field_inits_wip = false,
.inits_resolved = ini.inits_resolved,
.fully_resolved = false,
.is_reified = ini.key == .reified,
},
});
try ip.items.append(gpa, .{
.tag = .type_struct,
.data = ip.addExtraAssumeCapacity(Tag.TypeStruct{
.decl = ini.decl,
.zir_index = ini.zir_index,
.fields_len = ini.fields_len,
.size = std.math.maxInt(u32),
.flags = .{
.is_extern = is_extern,
.known_non_opv = ini.known_non_opv,
.requires_comptime = ini.requires_comptime,
.is_tuple = ini.is_tuple,
.assumed_runtime_bits = false,
.assumed_pointer_aligned = false,
.has_namespace = ini.namespace != .none,
.any_comptime_fields = ini.any_comptime_fields,
.any_default_inits = ini.any_default_inits,
.any_aligned_fields = ini.any_aligned_fields,
.alignment = .none,
.alignment_wip = false,
.field_types_wip = false,
.layout_wip = false,
.layout_resolved = false,
.field_inits_wip = false,
.inits_resolved = ini.inits_resolved,
.fully_resolved = false,
},
}),
.data = extra_index,
});
switch (ini.key) {
.declared => |d| if (d.captures.len != 0) {
ip.extra.appendAssumeCapacity(@intCast(d.captures.len));
ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures));
},
.reified => |r| {
_ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash));
},
}
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
if (!ini.is_tuple) {
ip.extra.appendAssumeCapacity(@intFromEnum(names_map));
@@ -5767,9 +6130,10 @@ pub fn getStructType(
if (ini.any_default_inits) {
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
}
if (ini.namespace.unwrap()) |namespace| {
ip.extra.appendAssumeCapacity(@intFromEnum(namespace));
}
const namespace_extra_index: ?u32 = if (ini.has_namespace) i: {
ip.extra.appendAssumeCapacity(undefined); // set by `finish`
break :i @intCast(ip.extra.items.len - 1);
} else null;
if (ini.any_aligned_fields) {
ip.extra.appendNTimesAssumeCapacity(align_element, align_elements_len);
}
@@ -5777,10 +6141,14 @@ pub fn getStructType(
ip.extra.appendNTimesAssumeCapacity(0, comptime_elements_len);
}
if (ini.layout == .Auto) {
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Key.StructType.RuntimeOrder.unresolved), ini.fields_len);
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(LoadedStructType.RuntimeOrder.unresolved), ini.fields_len);
}
ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len);
return @enumFromInt(ip.items.len - 1);
return .{ .wip = .{
.index = @enumFromInt(ip.items.len - 1),
.decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "decl").?,
.namespace_extra_index = namespace_extra_index,
} };
}
 
pub const AnonStructTypeInit = struct {
@@ -6395,7 +6763,6 @@ fn finishFuncInstance(
.@"addrspace" = fn_owner_decl.@"addrspace",
.analysis = .complete,
.zir_decl_index = fn_owner_decl.zir_decl_index,
.src_scope = fn_owner_decl.src_scope,
.is_pub = fn_owner_decl.is_pub,
.is_exported = fn_owner_decl.is_exported,
.alive = true,
@@ -6417,257 +6784,386 @@ fn finishFuncInstance(
return func_index;
}
 
/// Provides API for completing an enum type after calling `getIncompleteEnum`.
pub const IncompleteEnumType = struct {
pub const EnumTypeInit = struct {
has_namespace: bool,
has_values: bool,
tag_mode: LoadedEnumType.TagMode,
fields_len: u32,
key: union(enum) {
declared: struct {
zir_index: TrackedInst.Index,
captures: []const CaptureValue,
},
reified: struct {
zir_index: TrackedInst.Index,
type_hash: u64,
},
},
};
 
pub const WipEnumType = struct {
index: Index,
tag_ty_index: u32,
decl_index: u32,
namespace_index: ?u32,
names_map: MapIndex,
names_start: u32,
values_map: OptionalMapIndex,
values_start: u32,
 
pub fn setTagType(self: @This(), ip: *InternPool, tag_ty: Index) void {
assert(tag_ty == .noreturn_type or ip.isIntegerType(tag_ty));
ip.extra.items[self.tag_ty_index] = @intFromEnum(tag_ty);
pub fn prepare(
wip: WipEnumType,
ip: *InternPool,
decl: DeclIndex,
namespace: OptionalNamespaceIndex,
) void {
ip.extra.items[wip.decl_index] = @intFromEnum(decl);
if (wip.namespace_index) |i| {
ip.extra.items[i] = @intFromEnum(namespace.unwrap().?);
} else {
assert(namespace == .none);
}
}
 
/// Returns the already-existing field with the same name, if any.
pub fn addFieldName(
self: @This(),
ip: *InternPool,
name: NullTerminatedString,
) ?u32 {
return ip.addFieldName(self.names_map, self.names_start, name);
pub fn setTagTy(wip: WipEnumType, ip: *InternPool, tag_ty: Index) void {
assert(ip.isIntegerType(tag_ty));
ip.extra.items[wip.tag_ty_index] = @intFromEnum(tag_ty);
}
 
/// Returns the already-existing field with the same value, if any.
/// Make sure the type of the value has the integer tag type of the enum.
pub fn addFieldValue(
self: @This(),
ip: *InternPool,
value: Index,
) ?u32 {
assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[self.tag_ty_index])));
const map = &ip.maps.items[@intFromEnum(self.values_map.unwrap().?)];
pub const FieldConflict = struct {
kind: enum { name, value },
prev_field_idx: u32,
};
 
/// Returns the already-existing field with the same name or value, if any.
/// If the enum is automatially numbered, `value` must be `.none`.
/// Otherwise, the type of `value` must be the integer tag type of the enum.
pub fn nextField(wip: WipEnumType, ip: *InternPool, name: NullTerminatedString, value: Index) ?FieldConflict {
if (ip.addFieldName(wip.names_map, wip.names_start, name)) |conflict| {
return .{ .kind = .name, .prev_field_idx = conflict };
}
if (value == .none) {
assert(wip.values_map == .none);
return null;
}
assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[wip.tag_ty_index])));
const map = &ip.maps.items[@intFromEnum(wip.values_map.unwrap().?)];
const field_index = map.count();
const indexes = ip.extra.items[self.values_start..][0..field_index];
const indexes = ip.extra.items[wip.values_start..][0..field_index];
const adapter: Index.Adapter = .{ .indexes = @ptrCast(indexes) };
const gop = map.getOrPutAssumeCapacityAdapted(value, adapter);
if (gop.found_existing) return @intCast(gop.index);
ip.extra.items[self.values_start + field_index] = @intFromEnum(value);
if (gop.found_existing) {
return .{ .kind = .value, .prev_field_idx = @intCast(gop.index) };
}
ip.extra.items[wip.values_start + field_index] = @intFromEnum(value);
return null;
}
};
 
/// This is used to create an enum type in the `InternPool`, with the ability
/// to update the tag type, field names, and field values later.
pub fn getIncompleteEnum(
ip: *InternPool,
gpa: Allocator,
enum_type: Key.IncompleteEnumType,
) Allocator.Error!IncompleteEnumType {
switch (enum_type.tag_mode) {
.auto => return getIncompleteEnumAuto(ip, gpa, enum_type),
.explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit),
.nonexhaustive => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_nonexhaustive),
pub fn cancel(wip: WipEnumType, ip: *InternPool) void {
ip.remove(wip.index);
}
}
 
fn getIncompleteEnumAuto(
ip: *InternPool,
gpa: Allocator,
enum_type: Key.IncompleteEnumType,
) Allocator.Error!IncompleteEnumType {
const int_tag_type = if (enum_type.tag_ty != .none)
enum_type.tag_ty
else
try ip.get(gpa, .{ .int_type = .{
.bits = if (enum_type.fields_len == 0) 0 else std.math.log2_int_ceil(u32, enum_type.fields_len),
.signedness = .unsigned,
} });
 
// We must keep the map in sync with `items`. The hash and equality functions
// for enum types only look at the decl field, which is present even in
// an `IncompleteEnumType`.
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter);
assert(!gop.found_existing);
 
const names_map = try ip.addMap(gpa, enum_type.fields_len);
 
const extra_fields_len: u32 = @typeInfo(EnumAuto).Struct.fields.len;
try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + enum_type.fields_len);
try ip.items.ensureUnusedCapacity(gpa, 1);
 
const extra_index = ip.addExtraAssumeCapacity(EnumAuto{
.decl = enum_type.decl,
.namespace = enum_type.namespace,
.int_tag_type = int_tag_type,
.names_map = names_map,
.fields_len = enum_type.fields_len,
.zir_index = enum_type.zir_index,
});
 
ip.items.appendAssumeCapacity(.{
.tag = .type_enum_auto,
.data = extra_index,
});
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), enum_type.fields_len);
return .{
.index = @enumFromInt(ip.items.len - 1),
.tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?,
.names_map = names_map,
.names_start = extra_index + extra_fields_len,
.values_map = .none,
.values_start = undefined,
pub const Result = union(enum) {
wip: WipEnumType,
existing: Index,
};
}
 
fn getIncompleteEnumExplicit(
ip: *InternPool,
gpa: Allocator,
enum_type: Key.IncompleteEnumType,
tag: Tag,
) Allocator.Error!IncompleteEnumType {
// We must keep the map in sync with `items`. The hash and equality functions
// for enum types only look at the decl field, which is present even in
// an `IncompleteEnumType`.
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter);
assert(!gop.found_existing);
 
const names_map = try ip.addMap(gpa, enum_type.fields_len);
const values_map: OptionalMapIndex = if (!enum_type.has_values) .none else m: {
const values_map = try ip.addMap(gpa, enum_type.fields_len);
break :m values_map.toOptional();
};
 
const reserved_len = enum_type.fields_len +
if (enum_type.has_values) enum_type.fields_len else 0;
 
const extra_fields_len: u32 = @typeInfo(EnumExplicit).Struct.fields.len;
try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + reserved_len);
try ip.items.ensureUnusedCapacity(gpa, 1);
 
const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{
.decl = enum_type.decl,
.namespace = enum_type.namespace,
.int_tag_type = enum_type.tag_ty,
.fields_len = enum_type.fields_len,
.names_map = names_map,
.values_map = values_map,
.zir_index = enum_type.zir_index,
});
 
ip.items.appendAssumeCapacity(.{
.tag = tag,
.data = extra_index,
});
// This is both fields and values (if present).
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), reserved_len);
return .{
.index = @enumFromInt(ip.items.len - 1),
.tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?,
.names_map = names_map,
.names_start = extra_index + extra_fields_len,
.values_map = values_map,
.values_start = extra_index + extra_fields_len + enum_type.fields_len,
};
}
 
pub const GetEnumInit = struct {
decl: DeclIndex,
namespace: OptionalNamespaceIndex,
tag_ty: Index,
names: []const NullTerminatedString,
values: []const Index,
tag_mode: Key.EnumType.TagMode,
zir_index: TrackedInst.Index.Optional,
};
 
pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Error!Index {
pub fn getEnumType(
ip: *InternPool,
gpa: Allocator,
ini: EnumTypeInit,
) Allocator.Error!WipEnumType.Result {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{
.enum_type = .{
// Only the decl is used for hashing and equality.
.decl = ini.decl,
 
.namespace = undefined,
.tag_ty = undefined,
.names = undefined,
.values = undefined,
.tag_mode = undefined,
.names_map = undefined,
.values_map = undefined,
.zir_index = undefined,
},
}, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .enum_type = switch (ini.key) {
.declared => |d| .{ .declared = .{
.zir_index = d.zir_index,
.captures = .{ .external = d.captures },
} },
.reified => |r| .{ .reified = .{
.zir_index = r.zir_index,
.type_hash = r.type_hash,
} },
} }, adapter);
if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) };
assert(gop.index == ip.items.len);
errdefer _ = ip.map.pop();
 
try ip.items.ensureUnusedCapacity(gpa, 1);
 
assert(ini.tag_ty == .noreturn_type or ip.isIntegerType(ini.tag_ty));
for (ini.values) |value| assert(ip.typeOf(value) == ini.tag_ty);
const names_map = try ip.addMap(gpa, ini.fields_len);
errdefer _ = ip.maps.pop();
 
switch (ini.tag_mode) {
.auto => {
const names_map = try ip.addMap(gpa, ini.names.len);
addStringsToMap(ip, names_map, ini.names);
 
const fields_len: u32 = @intCast(ini.names.len);
assert(!ini.has_values);
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len +
fields_len);
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
.declared => |d| d.captures.len,
.reified => 2, // type_hash: PackedU64
} +
// zig fmt: on
ini.fields_len); // field types
 
const extra_index = ip.addExtraAssumeCapacity(EnumAuto{
.decl = undefined, // set by `prepare`
.captures_len = switch (ini.key) {
.declared => |d| @intCast(d.captures.len),
.reified => std.math.maxInt(u32),
},
.namespace = .none,
.int_tag_type = .none, // set by `prepare`
.fields_len = ini.fields_len,
.names_map = names_map,
.zir_index = switch (ini.key) {
inline else => |x| x.zir_index,
}.toOptional(),
});
ip.items.appendAssumeCapacity(.{
.tag = .type_enum_auto,
.data = extra_index,
});
switch (ini.key) {
.declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)),
.reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)),
}
const names_start = ip.extra.items.len;
ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len);
return .{ .wip = .{
.index = @enumFromInt(gop.index),
.tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?,
.decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?,
.namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null,
.names_map = names_map,
.names_start = @intCast(names_start),
.values_map = .none,
.values_start = undefined,
} };
},
.explicit, .nonexhaustive => {
const values_map: OptionalMapIndex = if (!ini.has_values) .none else m: {
const values_map = try ip.addMap(gpa, ini.fields_len);
break :m values_map.toOptional();
};
errdefer if (ini.has_values) {
_ = ip.map.pop();
};
 
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len +
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
.declared => |d| d.captures.len,
.reified => 2, // type_hash: PackedU64
} +
// zig fmt: on
ini.fields_len + // field types
ini.fields_len * @intFromBool(ini.has_values)); // field values
 
const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{
.decl = undefined, // set by `prepare`
.captures_len = switch (ini.key) {
.declared => |d| @intCast(d.captures.len),
.reified => std.math.maxInt(u32),
},
.namespace = .none,
.int_tag_type = .none, // set by `prepare`
.fields_len = ini.fields_len,
.names_map = names_map,
.values_map = values_map,
.zir_index = switch (ini.key) {
inline else => |x| x.zir_index,
}.toOptional(),
});
ip.items.appendAssumeCapacity(.{
.tag = switch (ini.tag_mode) {
.auto => unreachable,
.explicit => .type_enum_explicit,
.nonexhaustive => .type_enum_nonexhaustive,
},
.data = extra_index,
});
switch (ini.key) {
.declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)),
.reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)),
}
const names_start = ip.extra.items.len;
ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len);
const values_start = ip.extra.items.len;
if (ini.has_values) {
ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len);
}
return .{ .wip = .{
.index = @enumFromInt(gop.index),
.tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?,
.decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?,
.namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null,
.names_map = names_map,
.names_start = @intCast(names_start),
.values_map = values_map,
.values_start = @intCast(values_start),
} };
},
}
}
 
const GeneratedTagEnumTypeInit = struct {
decl: DeclIndex,
owner_union_ty: Index,
tag_ty: Index,
names: []const NullTerminatedString,
values: []const Index,
tag_mode: LoadedEnumType.TagMode,
};
 
/// Creates an enum type which was automatically-generated as the tag type of a
/// `union` with no explicit tag type. Since this is only called once per union
/// type, it asserts that no matching type yet exists.
pub fn getGeneratedTagEnumType(ip: *InternPool, gpa: Allocator, ini: GeneratedTagEnumTypeInit) Allocator.Error!Index {
assert(ip.isUnion(ini.owner_union_ty));
assert(ip.isIntegerType(ini.tag_ty));
for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty);
 
try ip.map.ensureUnusedCapacity(gpa, 1);
try ip.items.ensureUnusedCapacity(gpa, 1);
 
const names_map = try ip.addMap(gpa, ini.names.len);
errdefer _ = ip.maps.pop();
ip.addStringsToMap(names_map, ini.names);
 
const fields_len: u32 = @intCast(ini.names.len);
 
switch (ini.tag_mode) {
.auto => {
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len +
1 + // owner_union
fields_len); // field names
ip.items.appendAssumeCapacity(.{
.tag = .type_enum_auto,
.data = ip.addExtraAssumeCapacity(EnumAuto{
.decl = ini.decl,
.namespace = ini.namespace,
.captures_len = 0,
.namespace = .none,
.int_tag_type = ini.tag_ty,
.names_map = names_map,
.fields_len = fields_len,
.zir_index = ini.zir_index,
.names_map = names_map,
.zir_index = .none,
}),
});
ip.extra.appendAssumeCapacity(@intFromEnum(ini.owner_union_ty));
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names));
return @enumFromInt(ip.items.len - 1);
},
.explicit => return finishGetEnum(ip, gpa, ini, .type_enum_explicit),
.nonexhaustive => return finishGetEnum(ip, gpa, ini, .type_enum_nonexhaustive),
.explicit, .nonexhaustive => {
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len +
1 + // owner_union
fields_len + // field names
ini.values.len); // field values
 
const values_map: OptionalMapIndex = if (ini.values.len != 0) m: {
const map = try ip.addMap(gpa, ini.values.len);
addIndexesToMap(ip, map, ini.values);
break :m map.toOptional();
} else .none;
// We don't clean up the values map on error!
errdefer @compileError("error path leaks values_map");
 
ip.items.appendAssumeCapacity(.{
.tag = switch (ini.tag_mode) {
.explicit => .type_enum_explicit,
.nonexhaustive => .type_enum_nonexhaustive,
.auto => unreachable,
},
.data = ip.addExtraAssumeCapacity(EnumExplicit{
.decl = ini.decl,
.captures_len = 0,
.namespace = .none,
.int_tag_type = ini.tag_ty,
.fields_len = fields_len,
.names_map = names_map,
.values_map = values_map,
.zir_index = .none,
}),
});
ip.extra.appendAssumeCapacity(@intFromEnum(ini.owner_union_ty));
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names));
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values));
},
}
// Same as above
errdefer @compileError("error path leaks values_map and extra data");
 
// Capacity for this was ensured earlier
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ .enum_type = .{
.generated_tag = .{ .union_type = ini.owner_union_ty },
} }, adapter);
assert(!gop.found_existing);
assert(gop.index == ip.items.len - 1);
return @enumFromInt(gop.index);
}
 
pub fn finishGetEnum(
ip: *InternPool,
gpa: Allocator,
ini: GetEnumInit,
tag: Tag,
) Allocator.Error!Index {
const names_map = try ip.addMap(gpa, ini.names.len);
addStringsToMap(ip, names_map, ini.names);
pub const OpaqueTypeIni = struct {
has_namespace: bool,
key: union(enum) {
declared: struct {
zir_index: TrackedInst.Index,
captures: []const CaptureValue,
},
reified: struct {
zir_index: TrackedInst.Index,
// No type hash since reifid opaques have no data other than the `@Type` location
},
},
};
 
const values_map: OptionalMapIndex = if (ini.values.len == 0) .none else m: {
const values_map = try ip.addMap(gpa, ini.values.len);
addIndexesToMap(ip, values_map, ini.values);
break :m values_map.toOptional();
};
const fields_len: u32 = @intCast(ini.names.len);
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len +
fields_len);
ip.items.appendAssumeCapacity(.{
.tag = tag,
.data = ip.addExtraAssumeCapacity(EnumExplicit{
.decl = ini.decl,
.namespace = ini.namespace,
.int_tag_type = ini.tag_ty,
.fields_len = fields_len,
.names_map = names_map,
.values_map = values_map,
.zir_index = ini.zir_index,
}),
pub fn getOpaqueType(ip: *InternPool, gpa: Allocator, ini: OpaqueTypeIni) Allocator.Error!WipNamespaceType.Result {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .opaque_type = switch (ini.key) {
.declared => |d| .{ .declared = .{
.zir_index = d.zir_index,
.captures = .{ .external = d.captures },
} },
.reified => |r| .{ .reified = .{
.zir_index = r.zir_index,
.type_hash = 0,
} },
} }, adapter);
if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) };
errdefer _ = ip.map.pop();
try ip.items.ensureUnusedCapacity(gpa, 1);
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeOpaque).Struct.fields.len + switch (ini.key) {
.declared => |d| d.captures.len,
.reified => 0,
});
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names));
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values));
return @enumFromInt(ip.items.len - 1);
const extra_index = ip.addExtraAssumeCapacity(Tag.TypeOpaque{
.decl = undefined, // set by `finish`
.namespace = .none,
.zir_index = switch (ini.key) {
inline else => |x| x.zir_index,
},
.captures_len = switch (ini.key) {
.declared => |d| @intCast(d.captures.len),
.reified => std.math.maxInt(u32),
},
});
ip.items.appendAssumeCapacity(.{
.tag = .type_opaque,
.data = extra_index,
});
switch (ini.key) {
.declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)),
.reified => {},
}
return .{ .wip = .{
.index = @enumFromInt(gop.index),
.decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "decl").?,
.namespace_extra_index = if (ini.has_namespace)
extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "namespace").?
else
null,
} };
}
 
pub fn getIfExists(ip: *const InternPool, key: Key) ?Index {
@@ -6716,8 +7212,34 @@ fn addMap(ip: *InternPool, gpa: Allocator, cap: usize) Allocator.Error!MapIndex
 
/// This operation only happens under compile error conditions.
/// Leak the index until the next garbage collection.
/// TODO: this is a bit problematic to implement, can we get away without it?
pub const remove = @compileError("InternPool.remove is not currently a supported operation; put a TODO there instead");
/// Invalidates all references to this index.
pub fn remove(ip: *InternPool, index: Index) void {
if (@intFromEnum(index) < static_keys.len) {
// The item being removed replaced a special index via `InternPool.resolveBuiltinType`.
// Restore the original item at this index.
switch (static_keys[@intFromEnum(index)]) {
.simple_type => |s| {
ip.items.set(@intFromEnum(index), .{
.tag = .simple_type,
.data = @intFromEnum(s),
});
},
else => unreachable,
}
return;
}
 
if (@intFromEnum(index) == ip.items.len - 1) {
// Happy case - we can just drop the item without affecting any other indices.
ip.items.len -= 1;
_ = ip.map.pop();
} else {
// We must preserve the item so that indices following it remain valid.
// Thus, we will rewrite the tag to `removed`, leaking the item until
// next GC but causing `KeyAdapter` to ignore it.
ip.items.set(@intFromEnum(index), .{ .tag = .removed, .data = undefined });
}
}
 
fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void {
const limbs_len = @as(u32, @intCast(limbs.len));
@@ -7077,9 +7599,9 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.func => unreachable,
 
.int => |int| switch (ip.indexToKey(new_ty)) {
.enum_type => |enum_type| return ip.get(gpa, .{ .enum_tag = .{
.enum_type => return ip.get(gpa, .{ .enum_tag = .{
.ty = new_ty,
.int = try ip.getCoerced(gpa, val, enum_type.tag_ty),
.int = try ip.getCoerced(gpa, val, ip.loadEnumType(new_ty).tag_ty),
} }),
.ptr_type => return ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
@@ -7108,7 +7630,8 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty),
.enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
.enum_type => |enum_type| {
.enum_type => {
const enum_type = ip.loadEnumType(new_ty);
const index = enum_type.nameIndex(ip, enum_literal).?;
return ip.get(gpa, .{ .enum_tag = .{
.ty = new_ty,
@@ -7249,7 +7772,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
const new_elem_ty = switch (ip.indexToKey(new_ty)) {
inline .array_type, .vector_type => |seq_type| seq_type.child,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[i],
.struct_type => |struct_type| struct_type.field_types.get(ip)[i],
.struct_type => ip.loadStructType(new_ty).field_types.get(ip)[i],
else => unreachable,
};
elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty);
@@ -7513,6 +8036,10 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
if (!gop.found_existing) gop.value_ptr.* = .{};
gop.value_ptr.count += 1;
gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) {
// Note that in this case, we have technically leaked some extra data
// bytes which we do not account for here.
.removed => 0,
 
.type_int_signed => 0,
.type_int_unsigned => 0,
.type_array_small => @sizeOf(Vector),
@@ -7529,12 +8056,31 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len);
},
.type_inferred_error_set => 0,
.type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit),
.type_enum_auto => @sizeOf(EnumAuto),
.type_opaque => @sizeOf(Key.OpaqueType),
.type_enum_explicit, .type_enum_nonexhaustive => b: {
const info = ip.extraData(EnumExplicit, data);
var ints = @typeInfo(EnumExplicit).Struct.fields.len + info.captures_len + info.fields_len;
if (info.values_map != .none) ints += info.fields_len;
break :b @sizeOf(u32) * ints;
},
.type_enum_auto => b: {
const info = ip.extraData(EnumAuto, data);
const ints = @typeInfo(EnumAuto).Struct.fields.len + info.captures_len + info.fields_len;
break :b @sizeOf(u32) * ints;
},
.type_opaque => b: {
const info = ip.extraData(Tag.TypeOpaque, data);
const ints = @typeInfo(Tag.TypeOpaque).Struct.fields.len + info.captures_len;
break :b @sizeOf(u32) * ints;
},
.type_struct => b: {
const info = ip.extraData(Tag.TypeStruct, data);
if (data == 0) break :b 0;
const extra = ip.extraDataTrail(Tag.TypeStruct, data);
const info = extra.data;
var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len;
if (info.flags.any_captures) {
const captures_len = ip.extra.items[extra.end];
ints += 1 + captures_len;
}
ints += info.fields_len; // types
if (!info.flags.is_tuple) {
ints += 1; // names_map
@@ -7552,20 +8098,29 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
ints += info.fields_len; // offsets
break :b @sizeOf(u32) * ints;
},
.type_struct_ns => @sizeOf(Module.Namespace),
.type_struct_anon => b: {
const info = ip.extraData(TypeStructAnon, data);
break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len);
},
.type_struct_packed => b: {
const info = ip.extraData(Tag.TypeStructPacked, data);
const extra = ip.extraDataTrail(Tag.TypeStructPacked, data);
const captures_len = if (extra.data.flags.any_captures)
ip.extra.items[extra.end]
else
0;
break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len +
info.fields_len + info.fields_len);
@intFromBool(extra.data.flags.any_captures) + captures_len +
extra.data.fields_len * 2);
},
.type_struct_packed_inits => b: {
const info = ip.extraData(Tag.TypeStructPacked, data);
const extra = ip.extraDataTrail(Tag.TypeStructPacked, data);
const captures_len = if (extra.data.flags.any_captures)
ip.extra.items[extra.end]
else
0;
break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len +
info.fields_len + info.fields_len + info.fields_len);
@intFromBool(extra.data.flags.any_captures) + captures_len +
extra.data.fields_len * 3);
},
.type_tuple_anon => b: {
const info = ip.extraData(TypeStructAnon, data);
@@ -7573,16 +8128,20 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
},
 
.type_union => b: {
const info = ip.extraData(Tag.TypeUnion, data);
const enum_info = ip.indexToKey(info.tag_ty).enum_type;
const fields_len: u32 = @intCast(enum_info.names.len);
const per_field = @sizeOf(u32); // field type
// 1 byte per field for alignment, rounded up to the nearest 4 bytes
const alignments = if (info.flags.any_aligned_fields)
((fields_len + 3) / 4) * 4
const extra = ip.extraDataTrail(Tag.TypeUnion, data);
const captures_len = if (extra.data.flags.any_captures)
ip.extra.items[extra.end]
else
0;
break :b @sizeOf(Tag.TypeUnion) + (fields_len * per_field) + alignments;
const per_field = @sizeOf(u32); // field type
// 1 byte per field for alignment, rounded up to the nearest 4 bytes
const alignments = if (extra.data.flags.any_aligned_fields)
((extra.data.fields_len + 3) / 4) * 4
else
0;
break :b @sizeOf(Tag.TypeUnion) +
4 * (@intFromBool(extra.data.flags.any_captures) + captures_len) +
(extra.data.fields_len * per_field) + alignments;
},
 
.type_function => b: {
@@ -7698,6 +8257,8 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
for (tags, datas, 0..) |tag, data, i| {
try w.print("${d} = {s}(", .{ i, @tagName(tag) });
switch (tag) {
.removed => {},
 
.simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(data)))}),
.simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(data)))}),
 
@@ -7718,7 +8279,6 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
.type_enum_auto,
.type_opaque,
.type_struct,
.type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
@@ -8105,6 +8665,8 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
// This optimization on tags is needed so that indexToKey can call
// typeOf without being recursive.
_ => switch (ip.items.items(.tag)[@intFromEnum(index)]) {
.removed => unreachable,
 
.type_int_signed,
.type_int_unsigned,
.type_array_big,
@@ -8124,7 +8686,6 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.simple_type,
.type_opaque,
.type_struct,
.type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
@@ -8218,7 +8779,7 @@ pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E {
 
pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
return switch (ip.indexToKey(ty)) {
.struct_type => |struct_type| struct_type.field_types.len,
.struct_type => ip.loadStructType(ty).field_types.len,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.array_type => |array_type| array_type.len,
.vector_type => |vector_type| vector_type.len,
@@ -8228,7 +8789,7 @@ pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
 
pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 {
return switch (ip.indexToKey(ty)) {
.struct_type => |struct_type| struct_type.field_types.len,
.struct_type => ip.loadStructType(ty).field_types.len,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.array_type => |array_type| array_type.len + @intFromBool(array_type.sentinel != .none),
.vector_type => |vector_type| vector_type.len,
@@ -8423,6 +8984,8 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.var_args_param_type => unreachable, // special tag
 
_ => switch (ip.items.items(.tag)[@intFromEnum(index)]) {
.removed => unreachable,
 
.type_int_signed,
.type_int_unsigned,
=> .Int,
@@ -8458,7 +9021,6 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.type_opaque => .Opaque,
 
.type_struct,
.type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
 
src/Liveness.zig added: 4090, removed: 2946, total 1144
@@ -131,7 +131,7 @@ fn LivenessPassData(comptime pass: LivenessPass) type {
};
}
 
pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocator.Error!Liveness {
pub fn analyze(gpa: Allocator, air: Air, intern_pool: *InternPool) Allocator.Error!Liveness {
const tracy = trace(@src());
defer tracy.end();
 
@@ -836,7 +836,7 @@ pub const BigTomb = struct {
const Analysis = struct {
gpa: Allocator,
air: Air,
intern_pool: *const InternPool,
intern_pool: *InternPool,
tomb_bits: []usize,
special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32),
extra: std.ArrayListUnmanaged(u32),
 
src/Module.zig added: 4090, removed: 2946, total 1144
@@ -101,17 +101,6 @@ embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .{},
/// is not yet implemented.
intern_pool: InternPool = .{},
 
/// The index type for this array is `CaptureScope.Index` and the elements here are
/// the indexes of the parent capture scopes.
/// Memory is owned by gpa; garbage collected.
capture_scope_parents: std.ArrayListUnmanaged(CaptureScope.Index) = .{},
/// Value is index of type
/// Memory is owned by gpa; garbage collected.
runtime_capture_scopes: std.AutoArrayHashMapUnmanaged(CaptureScope.Key, InternPool.Index) = .{},
/// Value is index of value
/// Memory is owned by gpa; garbage collected.
comptime_capture_scopes: std.AutoArrayHashMapUnmanaged(CaptureScope.Key, InternPool.Index) = .{},
 
/// To be eliminated in a future commit by moving more data into InternPool.
/// Current uses that must be eliminated:
/// * comptime pointer mutation
@@ -305,28 +294,6 @@ pub const Export = struct {
}
};
 
pub const CaptureScope = struct {
pub const Key = extern struct {
zir_index: Zir.Inst.Index,
index: Index,
};
 
/// Index into `capture_scope_parents` which uniquely identifies a capture scope.
pub const Index = enum(u32) {
none = std.math.maxInt(u32),
_,
 
pub fn parent(i: Index, mod: *Module) Index {
return mod.capture_scope_parents.items[@intFromEnum(i)];
}
};
};
 
pub fn createCaptureScope(mod: *Module, parent: CaptureScope.Index) error{OutOfMemory}!CaptureScope.Index {
try mod.capture_scope_parents.append(mod.gpa, parent);
return @enumFromInt(mod.capture_scope_parents.items.len - 1);
}
 
const ValueArena = struct {
state: std.heap.ArenaAllocator.State,
state_acquired: ?*std.heap.ArenaAllocator.State = null,
@@ -386,9 +353,6 @@ pub const Decl = struct {
/// there is no parent.
src_namespace: Namespace.Index,
 
/// The scope which lexically contains this decl.
src_scope: CaptureScope.Index,
 
/// The AST node index of this declaration.
/// Must be recomputed when the corresponding source file is modified.
src_node: Ast.Node.Index,
@@ -563,7 +527,7 @@ pub const Decl = struct {
 
/// If the Decl owns its value and it is a union, return it,
/// otherwise null.
pub fn getOwnedUnion(decl: Decl, zcu: *Zcu) ?InternPool.UnionType {
pub fn getOwnedUnion(decl: Decl, zcu: *Zcu) ?InternPool.LoadedUnionType {
if (!decl.owns_tv) return null;
if (decl.val.ip_index == .none) return null;
return zcu.typeToUnion(decl.val.toType());
@@ -599,14 +563,15 @@ pub const Decl = struct {
/// enum, or opaque.
pub fn getInnerNamespaceIndex(decl: Decl, zcu: *Zcu) Namespace.OptionalIndex {
if (!decl.has_tv) return .none;
const ip = &zcu.intern_pool;
return switch (decl.val.ip_index) {
.empty_struct_type => .none,
.none => .none,
else => switch (zcu.intern_pool.indexToKey(decl.val.toIntern())) {
.opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
.struct_type => |struct_type| struct_type.namespace,
.union_type => |union_type| union_type.namespace.toOptional(),
.enum_type => |enum_type| enum_type.namespace,
else => switch (ip.indexToKey(decl.val.toIntern())) {
.opaque_type => ip.loadOpaqueType(decl.val.toIntern()).namespace,
.struct_type => ip.loadStructType(decl.val.toIntern()).namespace,
.union_type => ip.loadUnionType(decl.val.toIntern()).namespace,
.enum_type => ip.loadEnumType(decl.val.toIntern()).namespace,
else => .none,
},
};
@@ -792,7 +757,6 @@ pub const Namespace = struct {
/// These are only declarations named directly by the AST; anonymous
/// declarations are not stored here.
decls: std.ArrayHashMapUnmanaged(Decl.Index, void, DeclContext, true) = .{},
 
/// Key is usingnamespace Decl itself. To find the namespace being included,
/// the Decl Value has to be resolved as a Type which has a Namespace.
/// Value is whether the usingnamespace decl is marked `pub`.
@@ -2140,10 +2104,6 @@ pub fn deinit(zcu: *Zcu) void {
 
zcu.intern_pool.deinit(gpa);
zcu.tmp_hack_arena.deinit();
 
zcu.capture_scope_parents.deinit(gpa);
zcu.runtime_capture_scopes.deinit(gpa);
zcu.comptime_capture_scopes.deinit(gpa);
}
 
pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
@@ -3342,6 +3302,70 @@ pub fn semaPkg(mod: *Module, pkg: *Package.Module) !void {
return mod.semaFile(file);
}
 
fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespace.Index, file: *File) Allocator.Error!InternPool.Index {
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
assert(extended.opcode == .struct_decl);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
assert(!small.has_captures_len);
assert(!small.has_backing_int);
assert(small.layout == .Auto);
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk fields_len;
} else 0;
const decls_len = if (small.has_decls_len) blk: {
const decls_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
const decls = file.zir.bodySlice(extra_index, decls_len);
extra_index += decls_len;
 
const tracked_inst = try ip.trackZir(gpa, file, .main_struct_inst);
const wip_ty = switch (try ip.getStructType(gpa, .{
.layout = .Auto,
.fields_len = fields_len,
.known_non_opv = small.known_non_opv,
.requires_comptime = if (small.known_comptime_only) .yes else .unknown,
.is_tuple = small.is_tuple,
.any_comptime_fields = small.any_comptime_fields,
.any_default_inits = small.any_default_inits,
.inits_resolved = false,
.any_aligned_fields = small.any_aligned_fields,
.has_namespace = true,
.key = .{ .declared = .{
.zir_index = tracked_inst,
.captures = &.{},
} },
})) {
.existing => unreachable, // we wouldn't be analysing the file root if this type existed
.wip => |wip| wip,
};
errdefer wip_ty.cancel(ip);
 
if (zcu.comp.debug_incremental) {
try ip.addDependency(
gpa,
InternPool.Depender.wrap(.{ .decl = decl_index }),
.{ .src_hash = tracked_inst },
);
}
 
const decl = zcu.declPtr(decl_index);
decl.val = Value.fromInterned(wip_ty.index);
decl.has_tv = true;
decl.owns_tv = true;
decl.analysis = .complete;
 
try zcu.scanNamespace(namespace_index, decls, decl);
 
return wip_ty.finish(ip, decl_index, namespace_index.toOptional());
}
 
/// Regardless of the file status, will create a `Decl` so that we
/// can track dependencies and re-analyze when the file becomes outdated.
pub fn semaFile(mod: *Module, file: *File) SemaError!void {
@@ -3363,15 +3387,14 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.decl_index = undefined,
.file_scope = file,
});
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer mod.destroyNamespace(new_namespace_index);
 
const new_decl_index = try mod.allocateNewDecl(new_namespace_index, 0, .none);
const new_decl_index = try mod.allocateNewDecl(new_namespace_index, 0);
const new_decl = mod.declPtr(new_decl_index);
errdefer @panic("TODO error handling");
 
file.root_decl = new_decl_index.toOptional();
new_namespace.decl_index = new_decl_index;
mod.namespacePtr(new_namespace_index).decl_index = new_decl_index;
 
new_decl.name = try file.fullyQualifiedName(mod);
new_decl.name_fully_qualified = true;
@@ -3390,54 +3413,10 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
}
assert(file.zir_loaded);
 
var sema_arena = std.heap.ArenaAllocator.init(gpa);
defer sema_arena.deinit();
const sema_arena_allocator = sema_arena.allocator();
const struct_ty = try mod.getFileRootStruct(new_decl_index, new_namespace_index, file);
errdefer mod.intern_pool.remove(struct_ty);
 
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
defer comptime_mutable_decls.deinit();
 
var comptime_err_ret_trace = std.ArrayList(SrcLoc).init(gpa);
defer comptime_err_ret_trace.deinit();
 
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = sema_arena_allocator,
.code = file.zir,
.owner_decl = new_decl,
.owner_decl_index = new_decl_index,
.func_index = .none,
.func_is_naked = false,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
.comptime_err_ret_trace = &comptime_err_ret_trace,
};
defer sema.deinit();
 
const struct_ty = sema.getStructType(
new_decl_index,
new_namespace_index,
try mod.intern_pool.trackZir(gpa, file, .main_struct_inst),
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
};
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(struct_ty);
for (comptime_mutable_decls.items) |decl_index| {
const decl = mod.declPtr(decl_index);
_ = try decl.internValue(mod);
}
 
new_decl.val = Value.fromInterned(struct_ty);
new_decl.has_tv = true;
new_decl.owns_tv = true;
new_decl.analysis = .complete;
 
const comp = mod.comp;
switch (comp.cache_use) {
switch (mod.comp.cache_use) {
.whole => |whole| if (whole.cache_manifest) |man| {
const source = file.getSource(gpa) catch |err| {
try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)});
@@ -3573,7 +3552,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
.sema = &sema,
.src_decl = decl_index,
.namespace = decl.src_namespace,
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -4205,7 +4183,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void
);
const comp = zcu.comp;
if (!gop.found_existing) {
const new_decl_index = try zcu.allocateNewDecl(namespace_index, decl_node, iter.parent_decl.src_scope);
const new_decl_index = try zcu.allocateNewDecl(namespace_index, decl_node);
const new_decl = zcu.declPtr(new_decl_index);
new_decl.kind = kind;
new_decl.name = decl_name;
@@ -4438,7 +4416,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
.sema = &sema,
.src_decl = decl_index,
.namespace = decl.src_namespace,
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = false,
@@ -4639,7 +4616,6 @@ pub fn allocateNewDecl(
mod: *Module,
namespace: Namespace.Index,
src_node: Ast.Node.Index,
src_scope: CaptureScope.Index,
) !Decl.Index {
const ip = &mod.intern_pool;
const gpa = mod.gpa;
@@ -4657,7 +4633,6 @@ pub fn allocateNewDecl(
.@"addrspace" = .generic,
.analysis = .unreferenced,
.zir_decl_index = .none,
.src_scope = src_scope,
.is_pub = false,
.is_exported = false,
.alive = false,
@@ -4697,17 +4672,16 @@ pub fn errorSetBits(mod: *Module) u16 {
 
pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !Decl.Index {
const src_decl = mod.declPtr(block.src_decl);
return mod.createAnonymousDeclFromDecl(src_decl, block.namespace, block.wip_capture_scope, typed_value);
return mod.createAnonymousDeclFromDecl(src_decl, block.namespace, typed_value);
}
 
pub fn createAnonymousDeclFromDecl(
mod: *Module,
src_decl: *Decl,
namespace: Namespace.Index,
src_scope: CaptureScope.Index,
tv: TypedValue,
) !Decl.Index {
const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope);
const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node);
errdefer mod.destroyDecl(new_decl_index);
const name = try mod.intern_pool.getOrPutStringFmt(mod.gpa, "{}__anon_{d}", .{
src_decl.name.fmt(&mod.intern_pool), @intFromEnum(new_decl_index),
@@ -5276,7 +5250,7 @@ pub fn populateTestFunctions(
.len = test_decl_name.len,
.child = .u8_type,
});
const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .none, .{
const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .{
.ty = test_name_decl_ty,
.val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = test_name_decl_ty.toIntern(),
@@ -5322,7 +5296,7 @@ pub fn populateTestFunctions(
.child = test_fn_ty.toIntern(),
.sentinel = .none,
});
const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .none, .{
const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .{
.ty = array_decl_ty,
.val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = array_decl_ty.toIntern(),
@@ -5686,7 +5660,7 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er
pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value {
const ip = &mod.intern_pool;
const gpa = mod.gpa;
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(ty.toIntern());
 
if (enum_type.values.len == 0) {
// Auto-numbered fields.
@@ -5976,14 +5950,6 @@ pub fn atomicPtrAlignment(
return .none;
}
 
pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc {
return mod.declPtr(opaque_type.decl).srcLoc(mod);
}
 
pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) !InternPool.NullTerminatedString {
return mod.declPtr(opaque_type.decl).fullyQualifiedName(mod);
}
 
pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File {
return mod.declPtr(decl_index).getFileScope(mod);
}
@@ -5992,28 +5958,26 @@ pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File {
/// * `@TypeOf(.{})`
/// * A struct which has no fields (`struct {}`).
/// * Not a struct.
pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
if (ty.ip_index == .none) return null;
return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => |t| t,
else => null,
};
}
 
pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
if (ty.ip_index == .none) return null;
return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => |t| if (t.layout == .Packed) t else null,
else => null,
};
}
 
/// This asserts that the union's enum tag type has been resolved.
pub fn typeToUnion(mod: *Module, ty: Type) ?InternPool.UnionType {
pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.LoadedStructType {
if (ty.ip_index == .none) return null;
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.ip_index)) {
.union_type => |k| ip.loadUnionType(k),
.struct_type => ip.loadStructType(ty.ip_index),
else => null,
};
}
 
pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.LoadedStructType {
const s = mod.typeToStruct(ty) orelse return null;
if (s.layout != .Packed) return null;
return s;
}
 
pub fn typeToUnion(mod: *Module, ty: Type) ?InternPool.LoadedUnionType {
if (ty.ip_index == .none) return null;
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.ip_index)) {
.union_type => ip.loadUnionType(ty.ip_index),
else => null,
};
}
@@ -6115,7 +6079,7 @@ pub const UnionLayout = struct {
padding: u32,
};
 
pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
pub fn getUnionLayout(mod: *Module, u: InternPool.LoadedUnionType) UnionLayout {
const ip = &mod.intern_pool;
assert(u.haveLayout(ip));
var most_aligned_field: u32 = undefined;
@@ -6161,7 +6125,7 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
const tag_size = Type.fromInterned(u.enum_tag_ty).abiSize(mod);
const tag_align = Type.fromInterned(u.enum_tag_ty).abiAlignment(mod).max(.@"1");
return .{
.abi_size = u.size,
.abi_size = u.size(ip).*,
.abi_align = tag_align.max(payload_align),
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
@@ -6170,16 +6134,16 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
.payload_align = payload_align,
.tag_align = tag_align,
.tag_size = tag_size,
.padding = u.padding,
.padding = u.padding(ip).*,
};
}
 
pub fn unionAbiSize(mod: *Module, u: InternPool.UnionType) u64 {
pub fn unionAbiSize(mod: *Module, u: InternPool.LoadedUnionType) u64 {
return mod.getUnionLayout(u).abi_size;
}
 
/// Returns 0 if the union is represented with 0 bits at runtime.
pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) Alignment {
pub fn unionAbiAlignment(mod: *Module, u: InternPool.LoadedUnionType) Alignment {
const ip = &mod.intern_pool;
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
var max_align: Alignment = .none;
@@ -6196,7 +6160,7 @@ pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) Alignment {
/// Returns the field alignment, assuming the union is not packed.
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
/// Prefer to call that function instead of this one during Sema.
pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_index: u32) Alignment {
pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.LoadedUnionType, field_index: u32) Alignment {
const ip = &mod.intern_pool;
const field_align = u.fieldAlign(ip, field_index);
if (field_align != .none) return field_align;
@@ -6205,12 +6169,11 @@ pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_in
}
 
/// Returns the index of the active field, given the current tag value
pub fn unionTagFieldIndex(mod: *Module, u: InternPool.UnionType, enum_tag: Value) ?u32 {
pub fn unionTagFieldIndex(mod: *Module, u: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
const ip = &mod.intern_pool;
if (enum_tag.toIntern() == .none) return null;
assert(ip.typeOf(enum_tag.toIntern()) == u.enum_tag_ty);
const enum_type = ip.indexToKey(u.enum_tag_ty).enum_type;
return enum_type.tagValueIndex(ip, enum_tag.toIntern());
return u.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
}
 
/// Returns the field alignment of a non-packed struct in byte units.
@@ -6257,7 +6220,7 @@ pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment {
/// projects.
pub fn structPackedFieldBitOffset(
mod: *Module,
struct_type: InternPool.Key.StructType,
struct_type: InternPool.LoadedStructType,
field_index: u32,
) u16 {
const ip = &mod.intern_pool;
 
src/Package/Module.zig added: 4090, removed: 2946, total 1144
@@ -63,6 +63,11 @@ pub const CreateOptions = struct {
 
builtin_mod: ?*Package.Module,
 
/// Allocated into the given `arena`. Should be shared across all module creations in a Compilation.
/// Ignored if `builtin_mod` is passed or if `!have_zcu`.
/// Otherwise, may be `null` only if this Compilation consists of a single module.
builtin_modules: ?*std.StringHashMapUnmanaged(*Module),
 
pub const Paths = struct {
root: Package.Path,
/// Relative to `root`. May contain path separators.
@@ -364,11 +369,37 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
.wasi_exec_model = options.global.wasi_exec_model,
}, arena);
 
const new = if (options.builtin_modules) |builtins| new: {
const gop = try builtins.getOrPut(arena, generated_builtin_source);
if (gop.found_existing) break :b gop.value_ptr.*;
errdefer builtins.removeByPtr(gop.key_ptr);
const new = try arena.create(Module);
gop.value_ptr.* = new;
break :new new;
} else try arena.create(Module);
errdefer if (options.builtin_modules) |builtins| assert(builtins.remove(generated_builtin_source));
 
const new_file = try arena.create(File);
 
const digest = Cache.HashHelper.oneShot(generated_builtin_source);
const builtin_sub_path = try arena.dupe(u8, "b" ++ std.fs.path.sep_str ++ digest);
const new = try arena.create(Module);
const bin_digest, const hex_digest = digest: {
var hasher: Cache.Hasher = Cache.hasher_init;
hasher.update(generated_builtin_source);
 
var bin_digest: Cache.BinDigest = undefined;
hasher.final(&bin_digest);
 
var hex_digest: Cache.HexDigest = undefined;
_ = std.fmt.bufPrint(
&hex_digest,
"{s}",
.{std.fmt.fmtSliceHexLower(&bin_digest)},
) catch unreachable;
 
break :digest .{ bin_digest, hex_digest };
};
 
const builtin_sub_path = try arena.dupe(u8, "b" ++ std.fs.path.sep_str ++ hex_digest);
 
new.* = .{
.root = .{
.root_dir = options.global_cache_directory,
@@ -415,6 +446,9 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
.status = .never_loaded,
.mod = new,
.root_decl = .none,
// We might as well use this digest for the File `path digest`, since there's a
// one-to-one correspondence here between distinct paths and distinct contents.
.path_digest = bin_digest,
};
break :b new;
};
 
src/Sema.zig added: 4090, removed: 2946, total 1144
@@ -155,7 +155,6 @@ const Namespace = Module.Namespace;
const CompileError = Module.CompileError;
const SemaError = Module.SemaError;
const Decl = Module.Decl;
const CaptureScope = Module.CaptureScope;
const LazySrcLoc = std.zig.LazySrcLoc;
const RangeSet = @import("RangeSet.zig");
const target_util = @import("target.zig");
@@ -331,8 +330,6 @@ pub const Block = struct {
/// used to add a `func_instance` into the `InternPool`.
params: std.MultiArrayList(Param) = .{},
 
wip_capture_scope: CaptureScope.Index,
 
label: ?*Label = null,
inlining: ?*Inlining,
/// If runtime_index is not 0 then one of these is guaranteed to be non null.
@@ -475,7 +472,6 @@ pub const Block = struct {
.src_decl = parent.src_decl,
.namespace = parent.namespace,
.instructions = .{},
.wip_capture_scope = parent.wip_capture_scope,
.label = null,
.inlining = parent.inlining,
.is_comptime = parent.is_comptime,
@@ -974,12 +970,6 @@ fn analyzeBodyInner(
 
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, body);
 
// Most of the time, we don't need to construct a new capture scope for a
// block. However, successive iterations of comptime loops can capture
// different values for the same Zir.Inst.Index, so in those cases, we will
// have to create nested capture scopes; see the `.repeat` case below.
const parent_capture_scope = block.wip_capture_scope;
 
const mod = sema.mod;
const map = &sema.inst_map;
const tags = sema.code.instructions.items(.tag);
@@ -1028,7 +1018,6 @@ fn analyzeBodyInner(
.c_import => try sema.zirCImport(block, inst),
.call => try sema.zirCall(block, inst, .direct),
.field_call => try sema.zirCall(block, inst, .field),
.closure_get => try sema.zirClosureGet(block, inst),
.cmp_lt => try sema.zirCmp(block, inst, .lt),
.cmp_lte => try sema.zirCmp(block, inst, .lte),
.cmp_eq => try sema.zirCmpEq(block, inst, .eq, Air.Inst.Tag.fromCmpOp(.eq, block.float_mode == .Optimized)),
@@ -1275,6 +1264,7 @@ fn analyzeBodyInner(
.work_group_size => try sema.zirWorkItem( block, extended, extended.opcode),
.work_group_id => try sema.zirWorkItem( block, extended, extended.opcode),
.in_comptime => try sema.zirInComptime( block),
.closure_get => try sema.zirClosureGet( block, extended),
// zig fmt: on
 
.fence => {
@@ -1453,11 +1443,6 @@ fn analyzeBodyInner(
i += 1;
continue;
},
.closure_capture => {
try sema.zirClosureCapture(block, inst);
i += 1;
continue;
},
.memcpy => {
try sema.zirMemcpy(block, inst);
i += 1;
@@ -1534,11 +1519,6 @@ fn analyzeBodyInner(
// Send comptime control flow back to the beginning of this block.
const src = LazySrcLoc.nodeOffset(datas[@intFromEnum(inst)].node);
try sema.emitBackwardBranch(block, src);
 
// We need to construct new capture scopes for the next loop iteration so it
// can capture values without clobbering the earlier iteration's captures.
block.wip_capture_scope = try mod.createCaptureScope(parent_capture_scope);
 
i = 0;
continue;
} else {
@@ -1552,11 +1532,6 @@ fn analyzeBodyInner(
// Send comptime control flow back to the beginning of this block.
const src = LazySrcLoc.nodeOffset(datas[@intFromEnum(inst)].node);
try sema.emitBackwardBranch(block, src);
 
// We need to construct new capture scopes for the next loop iteration so it
// can capture values without clobbering the earlier iteration's captures.
block.wip_capture_scope = try mod.createCaptureScope(parent_capture_scope);
 
i = 0;
continue;
},
@@ -1855,10 +1830,6 @@ fn analyzeBodyInner(
map.putAssumeCapacity(inst, air_inst);
i += 1;
}
 
// We may have overwritten the capture scope due to a `repeat` instruction where
// the body had a capture; restore it now.
block.wip_capture_scope = parent_capture_scope;
}
 
pub fn resolveInstAllowNone(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref {
@@ -2698,21 +2669,61 @@ fn analyzeAsInt(
return (try val.getUnsignedIntAdvanced(mod, sema)).?;
}
 
pub fn getStructType(
/// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`,
/// resolves this into a list of `InternPool.CaptureValue` allocated by `arena`.
fn getCaptures(sema: *Sema, block: *Block, extra_index: usize, captures_len: u32) ![]InternPool.CaptureValue {
const zcu = sema.mod;
const ip = &zcu.intern_pool;
const parent_captures: InternPool.CaptureValue.Slice = zcu.namespacePtr(block.namespace).getType(zcu).getCaptures(zcu);
 
const captures = try sema.arena.alloc(InternPool.CaptureValue, captures_len);
 
for (sema.code.extra[extra_index..][0..captures_len], captures) |raw, *capture| {
const zir_capture: Zir.Inst.Capture = @bitCast(raw);
capture.* = switch (zir_capture.unwrap()) {
.nested => |parent_idx| parent_captures.get(ip)[parent_idx],
.instruction => |inst| InternPool.CaptureValue.wrap(capture: {
const air_ref = try sema.resolveInst(inst.toRef());
if (try sema.resolveValueResolveLazy(air_ref)) |val| {
break :capture .{ .@"comptime" = val.toIntern() };
}
break :capture .{ .runtime = sema.typeOf(air_ref).toIntern() };
}),
.decl_val => |str| capture: {
const decl_name = try ip.getOrPutString(sema.gpa, sema.code.nullTerminatedString(str));
const decl = try sema.lookupIdentifier(block, .unneeded, decl_name); // TODO: could we need this src loc?
break :capture InternPool.CaptureValue.wrap(.{ .decl_val = decl });
},
.decl_ref => |str| capture: {
const decl_name = try ip.getOrPutString(sema.gpa, sema.code.nullTerminatedString(str));
const decl = try sema.lookupIdentifier(block, .unneeded, decl_name); // TODO: could we need this src loc?
break :capture InternPool.CaptureValue.wrap(.{ .decl_ref = decl });
},
};
}
 
return captures;
}
 
fn zirStructDecl(
sema: *Sema,
decl: InternPool.DeclIndex,
namespace: InternPool.NamespaceIndex,
tracked_inst: InternPool.TrackedInst.Index,
) !InternPool.Index {
block: *Block,
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const zir_index = tracked_inst.resolve(ip);
const extended = sema.code.instructions.items(.data)[@intFromEnum(zir_index)].extended;
assert(extended.opcode == .struct_decl);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
const extra = sema.code.extraData(Zir.Inst.StructDecl, extended.operand);
const src = extra.data.src();
var extra_index = extra.end;
 
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = sema.code.extra[extra_index];
extra_index += 1;
@@ -2724,6 +2735,9 @@ pub fn getStructType(
break :blk decls_len;
} else 0;
 
const captures = try sema.getCaptures(block, extra_index, captures_len);
extra_index += captures_len;
 
if (small.has_backing_int) {
const backing_int_body_len = sema.code.extra[extra_index];
extra_index += 1; // backing_int_body_len
@@ -2734,49 +2748,38 @@ pub fn getStructType(
}
}
 
const decls = sema.code.bodySlice(extra_index, decls_len);
try mod.scanNamespace(namespace, decls, mod.declPtr(decl));
extra_index += decls_len;
 
const ty = try ip.getStructType(gpa, .{
.decl = decl,
.namespace = namespace.toOptional(),
.zir_index = tracked_inst.toOptional(),
const wip_ty = switch (try ip.getStructType(gpa, .{
.layout = small.layout,
.known_non_opv = small.known_non_opv,
.is_tuple = small.is_tuple,
.fields_len = fields_len,
.known_non_opv = small.known_non_opv,
.requires_comptime = if (small.known_comptime_only) .yes else .unknown,
.any_default_inits = small.any_default_inits,
.is_tuple = small.is_tuple,
.any_comptime_fields = small.any_comptime_fields,
.any_default_inits = small.any_default_inits,
.inits_resolved = false,
.any_aligned_fields = small.any_aligned_fields,
});
 
return ty;
}
 
fn zirStructDecl(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
const src = sema.code.extraData(Zir.Inst.StructDecl, extended.operand).data.src();
 
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the struct type gains an
// InternPool index.
.has_namespace = true or decls_len > 0, // TODO: see below
.key = .{ .declared = .{
.zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst),
.captures = captures,
} },
})) {
.existing => |ty| return Air.internedToRef(ty),
.wip => |wip| wip: {
if (sema.builtin_type_target_index == .none) break :wip wip;
var new = wip;
new.index = sema.builtin_type_target_index;
ip.resolveBuiltinType(new.index, wip.index);
break :wip new;
},
};
errdefer wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, small.name_strategy, "struct", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
if (sema.mod.comp.debug_incremental) {
@@ -2787,31 +2790,21 @@ fn zirStructDecl(
);
}
 
const new_namespace_index = try mod.createNamespace(.{
// TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace.
const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.decl_index = new_decl_index,
.file_scope = block.getFileScope(mod),
});
errdefer mod.destroyNamespace(new_namespace_index);
})).toOptional() else .none;
errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns);
 
const struct_ty = ty: {
const tracked_inst = try ip.trackZir(mod.gpa, block.getFileScope(mod), inst);
const ty = try sema.getStructType(new_decl_index, new_namespace_index, tracked_inst);
if (sema.builtin_type_target_index != .none) {
ip.resolveBuiltinType(sema.builtin_type_target_index, ty);
break :ty sema.builtin_type_target_index;
}
break :ty ty;
};
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(struct_ty);
if (new_namespace_index.unwrap()) |ns| {
const decls = sema.code.bodySlice(extra_index, decls_len);
try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index));
}
 
new_decl.ty = Type.type;
new_decl.val = Value.fromInterned(struct_ty);
 
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index));
}
 
fn createAnonymousDeclTypeNamed(
@@ -2827,10 +2820,9 @@ fn createAnonymousDeclTypeNamed(
const ip = &mod.intern_pool;
const gpa = sema.gpa;
const namespace = block.namespace;
const src_scope = block.wip_capture_scope;
const src_decl = mod.declPtr(block.src_decl);
const src_node = src_decl.relativeToNodeIndex(src.node_offset.x);
const new_decl_index = try mod.allocateNewDecl(namespace, src_node, src_scope);
const new_decl_index = try mod.allocateNewDecl(namespace, src_node);
errdefer mod.destroyDecl(new_decl_index);
 
switch (name_strategy) {
@@ -2922,6 +2914,7 @@ fn zirEnumDecl(
 
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
const extra = sema.code.extraData(Zir.Inst.EnumDecl, extended.operand);
var extra_index: usize = extra.end;
@@ -2935,6 +2928,12 @@ fn zirEnumDecl(
break :blk tag_type_ref;
} else .none;
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
const body_len = if (small.has_body_len) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
@@ -2953,36 +2952,10 @@ fn zirEnumDecl(
break :blk decls_len;
} else 0;
 
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the enum type gains an
// InternPool index.
 
var done = false;
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
}, small.name_strategy, "enum", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer if (!done) mod.abortAnonDecl(new_decl_index);
 
if (sema.mod.comp.debug_incremental) {
try mod.intern_pool.addDependency(
sema.gpa,
InternPool.Depender.wrap(.{ .decl = new_decl_index }),
.{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
);
}
 
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.decl_index = new_decl_index,
.file_scope = block.getFileScope(mod),
});
errdefer if (!done) mod.destroyNamespace(new_namespace_index);
const captures = try sema.getCaptures(block, extra_index, captures_len);
extra_index += captures_len;
 
const decls = sema.code.bodySlice(extra_index, decls_len);
try mod.scanNamespace(new_namespace_index, decls, new_decl);
extra_index += decls_len;
 
const body = sema.code.bodySlice(extra_index, body_len);
@@ -2996,34 +2969,68 @@ fn zirEnumDecl(
if (bag != 0) break true;
} else false;
 
const incomplete_enum = incomplete_enum: {
var incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{
.decl = new_decl_index,
.namespace = new_namespace_index.toOptional(),
.fields_len = fields_len,
.has_values = any_values,
.tag_mode = if (small.nonexhaustive)
.nonexhaustive
else if (tag_type_ref == .none)
.auto
else
.explicit,
.zir_index = (try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst)).toOptional(),
});
if (sema.builtin_type_target_index != .none) {
mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, incomplete_enum.index);
incomplete_enum.index = sema.builtin_type_target_index;
}
break :incomplete_enum incomplete_enum;
const wip_ty = switch (try ip.getEnumType(gpa, .{
.has_namespace = true or decls_len > 0, // TODO: see below
.has_values = any_values,
.tag_mode = if (small.nonexhaustive)
.nonexhaustive
else if (tag_type_ref == .none)
.auto
else
.explicit,
.fields_len = fields_len,
.key = .{ .declared = .{
.zir_index = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst),
.captures = captures,
} },
})) {
.wip => |wip| wip: {
if (sema.builtin_type_target_index == .none) break :wip wip;
var new = wip;
new.index = sema.builtin_type_target_index;
ip.resolveBuiltinType(new.index, wip.index);
break :wip new;
},
.existing => |ty| return Air.internedToRef(ty),
};
// TODO: figure out InternPool removals for incremental compilation
//errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index);
 
new_decl.ty = Type.type;
new_decl.val = Value.fromInterned(incomplete_enum.index);
// Once this is `true`, we will not delete the decl or type even upon failure, since we
// have finished constructing the type and are in the process of analyzing it.
var done = false;
 
const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
errdefer if (!done) wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, small.name_strategy, "enum", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer if (!done) mod.abortAnonDecl(new_decl_index);
 
if (sema.mod.comp.debug_incremental) {
try mod.intern_pool.addDependency(
sema.gpa,
InternPool.Depender.wrap(.{ .decl = new_decl_index }),
.{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
);
}
 
// TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace.
const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.decl_index = new_decl_index,
.file_scope = block.getFileScope(mod),
})).toOptional() else .none;
errdefer if (!done) if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns);
 
if (new_namespace_index.unwrap()) |ns| {
try mod.scanNamespace(ns, decls, new_decl);
}
 
// We've finished the initial construction of this type, and are about to perform analysis.
// Set the decl and namespace appropriately, and don't destroy anything on failure.
wip_ty.prepare(ip, new_decl_index, new_namespace_index);
done = true;
 
const int_tag_ty = ty: {
@@ -3053,8 +3060,7 @@ fn zirEnumDecl(
.parent = null,
.sema = sema,
.src_decl = new_decl_index,
.namespace = new_namespace_index,
.wip_capture_scope = try mod.createCaptureScope(new_decl.src_scope),
.namespace = new_namespace_index.unwrap() orelse block.namespace,
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -3070,7 +3076,6 @@ fn zirEnumDecl(
if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) {
return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)});
}
incomplete_enum.setTagType(&mod.intern_pool, ty.toIntern());
break :ty ty;
} else if (fields_len == 0) {
break :ty try mod.intType(.unsigned, 0);
@@ -3080,6 +3085,8 @@ fn zirEnumDecl(
}
};
 
wip_ty.setTagTy(ip, int_tag_ty.toIntern());
 
if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) {
if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) {
return sema.fail(block, src, "non-exhaustive enum specifies every value", .{});
@@ -3103,7 +3110,6 @@ fn zirEnumDecl(
extra_index += 2; // field name, doc comment
 
const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir);
assert(incomplete_enum.addFieldName(&mod.intern_pool, field_name) == null);
 
const tag_overflow = if (has_tag_value) overflow: {
const tag_val_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
@@ -3124,12 +3130,13 @@ fn zirEnumDecl(
};
if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true;
last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty);
if (incomplete_enum.addFieldValue(&mod.intern_pool, last_tag_val.?.toIntern())) |other_index| {
if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| {
assert(conflict.kind == .value); // AstGen validated names are unique
const value_src = mod.fieldSrcLoc(new_decl_index, .{
.index = field_i,
.range = .value,
}).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = conflict.prev_field_idx }).lazy;
const msg = msg: {
const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)});
errdefer msg.destroy(gpa);
@@ -3146,9 +3153,10 @@ fn zirEnumDecl(
else
try mod.intValue(int_tag_ty, 0);
if (overflow != null) break :overflow true;
if (incomplete_enum.addFieldValue(&mod.intern_pool, last_tag_val.?.toIntern())) |other_index| {
if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| {
assert(conflict.kind == .value); // AstGen validated names are unique
const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = conflict.prev_field_idx }).lazy;
const msg = msg: {
const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)});
errdefer msg.destroy(gpa);
@@ -3159,6 +3167,7 @@ fn zirEnumDecl(
}
break :overflow false;
} else overflow: {
assert(wip_ty.nextField(&mod.intern_pool, field_name, .none) == null);
last_tag_val = try mod.intValue(Type.comptime_int, field_i);
if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true;
last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty);
@@ -3176,7 +3185,9 @@ fn zirEnumDecl(
return sema.failWithOwnedErrorMsg(block, msg);
}
}
return decl_val;
 
try mod.finalizeAnonDecl(new_decl_index);
return Air.internedToRef(wip_ty.index);
}
 
fn zirUnionDecl(
@@ -3190,6 +3201,7 @@ fn zirUnionDecl(
 
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
const extra = sema.code.extraData(Zir.Inst.UnionDecl, extended.operand);
var extra_index: usize = extra.end;
@@ -3197,6 +3209,11 @@ fn zirUnionDecl(
const src = extra.data.src();
 
extra_index += @intFromBool(small.has_tag_type);
const captures_len = if (small.has_captures_len) blk: {
const captures_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
extra_index += @intFromBool(small.has_body_len);
const fields_len = if (small.has_fields_len) blk: {
const fields_len = sema.code.extra[extra_index];
@@ -3210,16 +3227,53 @@ fn zirUnionDecl(
break :blk decls_len;
} else 0;
 
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the union type gains an
// InternPool index.
const captures = try sema.getCaptures(block, extra_index, captures_len);
extra_index += captures_len;
 
const wip_ty = switch (try ip.getUnionType(gpa, .{
.flags = .{
.layout = small.layout,
.status = .none,
.runtime_tag = if (small.has_tag_type or small.auto_enum_tag)
.tagged
else if (small.layout != .Auto)
.none
else switch (block.wantSafety()) {
true => .safety,
false => .none,
},
.any_aligned_fields = small.any_aligned_fields,
.requires_comptime = .unknown,
.assumed_runtime_bits = false,
.assumed_pointer_aligned = false,
.alignment = .none,
},
.has_namespace = true or decls_len != 0, // TODO: see below
.fields_len = fields_len,
.enum_tag_ty = .none, // set later
.field_types = &.{}, // set later
.field_aligns = &.{}, // set later
.key = .{ .declared = .{
.zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst),
.captures = captures,
} },
})) {
.wip => |wip| wip: {
if (sema.builtin_type_target_index == .none) break :wip wip;
var new = wip;
new.index = sema.builtin_type_target_index;
ip.resolveBuiltinType(new.index, wip.index);
break :wip new;
},
.existing => |ty| return Air.internedToRef(ty),
};
errdefer wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, small.name_strategy, "union", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
if (sema.mod.comp.debug_incremental) {
@@ -3230,58 +3284,22 @@ fn zirUnionDecl(
);
}
 
const new_namespace_index = try mod.createNamespace(.{
// TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace.
const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.decl_index = new_decl_index,
.file_scope = block.getFileScope(mod),
});
errdefer mod.destroyNamespace(new_namespace_index);
})).toOptional() else .none;
errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns);
 
const union_ty = ty: {
const ty = try mod.intern_pool.getUnionType(gpa, .{
.flags = .{
.layout = small.layout,
.status = .none,
.runtime_tag = if (small.has_tag_type or small.auto_enum_tag)
.tagged
else if (small.layout != .Auto)
.none
else switch (block.wantSafety()) {
true => .safety,
false => .none,
},
.any_aligned_fields = small.any_aligned_fields,
.requires_comptime = .unknown,
.assumed_runtime_bits = false,
.assumed_pointer_aligned = false,
.alignment = .none,
},
.decl = new_decl_index,
.namespace = new_namespace_index,
.zir_index = (try mod.intern_pool.trackZir(gpa, block.getFileScope(mod), inst)).toOptional(),
.fields_len = fields_len,
.enum_tag_ty = .none,
.field_types = &.{},
.field_aligns = &.{},
});
if (sema.builtin_type_target_index != .none) {
mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, ty);
break :ty sema.builtin_type_target_index;
}
break :ty ty;
};
// TODO: figure out InternPool removals for incremental compilation
//errdefer mod.intern_pool.remove(union_ty);
if (new_namespace_index.unwrap()) |ns| {
const decls = sema.code.bodySlice(extra_index, decls_len);
try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index));
}
 
new_decl.ty = Type.type;
new_decl.val = Value.fromInterned(union_ty);
 
const decls = sema.code.bodySlice(extra_index, decls_len);
try mod.scanNamespace(new_namespace_index, decls, new_decl);
 
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
 
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index));
}
 
fn zirOpaqueDecl(
@@ -3294,62 +3312,72 @@ fn zirOpaqueDecl(
defer tracy.end();
 
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
 
const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small);
const extra = sema.code.extraData(Zir.Inst.OpaqueDecl, extended.operand);
var extra_index: usize = extra.end;
 
const src = extra.data.src();
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
const decls_len = if (small.has_decls_len) blk: {
const decls_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
 
// Because these three things each reference each other, `undefined`
// placeholders are used in two places before being set after the opaque
// type gains an InternPool index.
const captures = try sema.getCaptures(block, extra_index, captures_len);
extra_index += captures_len;
 
const wip_ty = switch (try ip.getOpaqueType(gpa, .{
.has_namespace = decls_len != 0,
.key = .{ .declared = .{
.zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst),
.captures = captures,
} },
})) {
.wip => |wip| wip,
.existing => |ty| return Air.internedToRef(ty),
};
errdefer wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, small.name_strategy, "opaque", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
if (sema.mod.comp.debug_incremental) {
try mod.intern_pool.addDependency(
sema.gpa,
try ip.addDependency(
gpa,
InternPool.Depender.wrap(.{ .decl = new_decl_index }),
.{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
.{ .src_hash = try ip.trackZir(gpa, block.getFileScope(mod), inst) },
);
}
 
const new_namespace_index = try mod.createNamespace(.{
const new_namespace_index: InternPool.OptionalNamespaceIndex = if (decls_len > 0) (try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.decl_index = new_decl_index,
.file_scope = block.getFileScope(mod),
});
errdefer mod.destroyNamespace(new_namespace_index);
})).toOptional() else .none;
errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns);
 
const opaque_ty = try mod.intern(.{ .opaque_type = .{
.decl = new_decl_index,
.namespace = new_namespace_index,
.zir_index = (try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst)).toOptional(),
} });
// TODO: figure out InternPool removals for incremental compilation
//errdefer mod.intern_pool.remove(opaque_ty);
if (new_namespace_index.unwrap()) |ns| {
const decls = sema.code.bodySlice(extra_index, decls_len);
try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index));
}
 
new_decl.ty = Type.type;
new_decl.val = Value.fromInterned(opaque_ty);
 
const decls = sema.code.bodySlice(extra_index, decls_len);
try mod.scanNamespace(new_namespace_index, decls, new_decl);
 
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
 
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index));
}
 
fn zirErrorSetDecl(
@@ -5333,7 +5361,7 @@ fn failWithBadMemberAccess(
fn failWithBadStructFieldAccess(
sema: *Sema,
block: *Block,
struct_type: InternPool.Key.StructType,
struct_type: InternPool.LoadedStructType,
field_src: LazySrcLoc,
field_name: InternPool.NullTerminatedString,
) CompileError {
@@ -5359,7 +5387,7 @@ fn failWithBadStructFieldAccess(
fn failWithBadUnionFieldAccess(
sema: *Sema,
block: *Block,
union_obj: InternPool.UnionType,
union_obj: InternPool.LoadedUnionType,
field_src: LazySrcLoc,
field_name: InternPool.NullTerminatedString,
) CompileError {
@@ -5780,7 +5808,6 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
.sema = sema,
.src_decl = parent_block.src_decl,
.namespace = parent_block.namespace,
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
.is_comptime = true,
@@ -5831,6 +5858,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
.global = comp.config,
.parent = parent_mod,
.builtin_mod = parent_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is set
}) catch |err| switch (err) {
// None of these are possible because we are creating a package with
// the exact same configuration as the parent package, which already
@@ -5900,7 +5928,6 @@ fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, force_compt
.sema = sema,
.src_decl = parent_block.src_decl,
.namespace = parent_block.namespace,
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.label = &label,
.inlining = parent_block.inlining,
@@ -7515,7 +7542,6 @@ fn analyzeCall(
.sema = sema,
.src_decl = module_fn.owner_decl,
.namespace = fn_owner_decl.src_namespace,
.wip_capture_scope = try mod.createCaptureScope(fn_owner_decl.src_scope),
.instructions = .{},
.label = null,
.inlining = &inlining,
@@ -8036,7 +8062,6 @@ fn instantiateGenericCall(
.sema = &child_sema,
.src_decl = generic_owner_func.owner_decl,
.namespace = namespace_index,
.wip_capture_scope = try mod.createCaptureScope(fn_owner_decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -11409,7 +11434,6 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
.sema = sema,
.src_decl = block.src_decl,
.namespace = block.namespace,
.wip_capture_scope = block.wip_capture_scope,
.instructions = .{},
.label = &label,
.inlining = block.inlining,
@@ -12117,7 +12141,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
.sema = sema,
.src_decl = block.src_decl,
.namespace = block.namespace,
.wip_capture_scope = block.wip_capture_scope,
.instructions = .{},
.label = &label,
.inlining = block.inlining,
@@ -12281,7 +12304,6 @@ fn analyzeSwitchRuntimeBlock(
extra_index += info.body_len;
 
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = try mod.createCaptureScope(child_block.wip_capture_scope);
 
const item = case_vals.items[scalar_i];
// `item` is already guaranteed to be constant known.
@@ -12339,7 +12361,6 @@ fn analyzeSwitchRuntimeBlock(
case_val_idx += items_len;
 
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
 
// Generate all possible cases as scalar prongs.
if (info.is_inline) {
@@ -12371,7 +12392,6 @@ fn analyzeSwitchRuntimeBlock(
const item_ref = Air.internedToRef(item.toIntern());
 
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
 
if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
@@ -12411,7 +12431,6 @@ fn analyzeSwitchRuntimeBlock(
cases_len += 1;
 
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
 
const analyze_body = if (union_originally) blk: {
const item_val = sema.resolveConstDefinedValue(block, .unneeded, item, undefined) catch unreachable;
@@ -12557,7 +12576,6 @@ fn analyzeSwitchRuntimeBlock(
defer gpa.free(cond_body);
 
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = try mod.createCaptureScope(child_block.wip_capture_scope);
 
const body = sema.code.bodySlice(extra_index, info.body_len);
extra_index += info.body_len;
@@ -12618,7 +12636,6 @@ fn analyzeSwitchRuntimeBlock(
const item_ref = Air.internedToRef(item_val.toIntern());
 
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
 
const analyze_body = if (union_originally) blk: {
const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?;
@@ -12669,7 +12686,6 @@ fn analyzeSwitchRuntimeBlock(
const item_ref = Air.internedToRef(item_val);
 
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
 
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
emit_bb = true;
@@ -12700,7 +12716,6 @@ fn analyzeSwitchRuntimeBlock(
const item_ref = Air.internedToRef(cur);
 
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
 
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
emit_bb = true;
@@ -12728,7 +12743,6 @@ fn analyzeSwitchRuntimeBlock(
cases_len += 1;
 
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
 
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
emit_bb = true;
@@ -12754,7 +12768,6 @@ fn analyzeSwitchRuntimeBlock(
cases_len += 1;
 
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
 
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
emit_bb = true;
@@ -12783,7 +12796,6 @@ fn analyzeSwitchRuntimeBlock(
};
 
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = try mod.createCaptureScope(child_block.wip_capture_scope);
 
if (mod.backendSupportsFeature(.is_named_enum_value) and
special.body.len != 0 and block.wantSafety() and
@@ -13327,7 +13339,7 @@ fn validateSwitchItemEnum(
const ip = &sema.mod.intern_pool;
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, src_node_offset, switch_prong_src, .none);
const int = ip.indexToKey(item.val).enum_tag.int;
const field_index = ip.indexToKey(ip.typeOf(item.val)).enum_type.tagValueIndex(ip, int) orelse {
const field_index = ip.loadEnumType(ip.typeOf(item.val)).tagValueIndex(ip, int) orelse {
const maybe_prev_src = try range_set.add(int, int, switch_prong_src);
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
return item.ref;
@@ -13607,15 +13619,15 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :hf field_index < ty.structFieldCount(mod);
}
},
.struct_type => |struct_type| {
break :hf struct_type.nameIndex(ip, field_name) != null;
.struct_type => {
break :hf ip.loadStructType(ty.toIntern()).nameIndex(ip, field_name) != null;
},
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
break :hf union_obj.nameIndex(ip, field_name) != null;
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
break :hf union_type.loadTagType(ip).nameIndex(ip, field_name) != null;
},
.enum_type => |enum_type| {
break :hf enum_type.nameIndex(ip, field_name) != null;
.enum_type => {
break :hf ip.loadEnumType(ty.toIntern()).nameIndex(ip, field_name) != null;
},
.array_type => break :hf ip.stringEqlSlice(field_name, "len"),
else => {},
@@ -17264,49 +17276,19 @@ fn zirThis(
return sema.analyzeDeclVal(block, src, this_decl_index);
}
 
fn zirClosureCapture(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok;
// Closures are not necessarily constant values. For example, the
// code might do something like this:
// fn foo(x: anytype) void { const S = struct {field: @TypeOf(x)}; }
// ...in which case the closure_capture instruction has access to a runtime
// value only. In such case only the type is saved into the scope.
const operand = try sema.resolveInst(inst_data.operand);
const ty = sema.typeOf(operand);
const key: CaptureScope.Key = .{
.zir_index = inst,
.index = block.wip_capture_scope,
};
if (try sema.resolveValue(operand)) |val| {
try mod.comptime_capture_scopes.put(gpa, key, try val.intern(ty, mod));
} else {
try mod.runtime_capture_scopes.put(gpa, key, ty.toIntern());
}
}
const ip = &mod.intern_pool;
const captures = mod.namespacePtr(block.namespace).getType(mod).getCaptures(mod);
 
fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
//const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].inst_node;
var scope: CaptureScope.Index = mod.declPtr(block.src_decl).src_scope;
assert(scope != .none);
// Note: The target closure must be in this scope list.
// If it's not here, the zir is invalid, or the list is broken.
const capture_ty = while (true) {
// Note: We don't need to add a dependency here, because
// decls always depend on their lexical parents.
const key: CaptureScope.Key = .{
.zir_index = inst_data.inst,
.index = scope,
};
if (mod.comptime_capture_scopes.get(key)) |val|
return Air.internedToRef(val);
if (mod.runtime_capture_scopes.get(key)) |ty|
break ty;
scope = scope.parent(mod);
assert(scope != .none);
const src_node: i32 = @bitCast(extended.operand);
const src = LazySrcLoc.nodeOffset(src_node);
 
const capture_ty = switch (captures.get(ip)[extended.small].unwrap()) {
.@"comptime" => |index| return Air.internedToRef(index),
.runtime => |index| index,
.decl_val => |decl_index| return sema.analyzeDeclVal(block, src, decl_index),
.decl_ref => |decl_index| return sema.analyzeDeclRef(decl_index),
};
 
// The comptime case is handled already above. Runtime case below.
@@ -17322,15 +17304,15 @@ fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
});
break :name null;
};
const node = sema.owner_decl.relativeToNodeIndex(inst_data.src_node);
const node = sema.owner_decl.relativeToNodeIndex(src_node);
const token = tree.nodes.items(.main_token)[node];
break :name tree.tokenSlice(token);
};
 
const msg = if (name) |some|
try sema.errMsg(block, inst_data.src(), "'{s}' not accessible outside function scope", .{some})
try sema.errMsg(block, src, "'{s}' not accessible outside function scope", .{some})
else
try sema.errMsg(block, inst_data.src(), "variable not accessible outside function scope", .{});
try sema.errMsg(block, src, "variable not accessible outside function scope", .{});
errdefer msg.destroy(sema.gpa);
 
// TODO add "declared here" note
@@ -17350,15 +17332,15 @@ fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
});
break :name null;
};
const node = sema.owner_decl.relativeToNodeIndex(inst_data.src_node);
const node = sema.owner_decl.relativeToNodeIndex(src_node);
const token = tree.nodes.items(.main_token)[node];
break :name tree.tokenSlice(token);
};
 
const msg = if (name) |some|
try sema.errMsg(block, inst_data.src(), "'{s}' not accessible from inner function", .{some})
try sema.errMsg(block, src, "'{s}' not accessible from inner function", .{some})
else
try sema.errMsg(block, inst_data.src(), "variable not accessible from inner function", .{});
try sema.errMsg(block, src, "variable not accessible from inner function", .{});
errdefer msg.destroy(sema.gpa);
 
try sema.errNote(block, LazySrcLoc.nodeOffset(0), msg, "crossed function definition here", .{});
@@ -17954,7 +17936,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} })));
},
.Enum => {
const is_exhaustive = Value.makeBool(ip.indexToKey(ty.toIntern()).enum_type.tag_mode != .nonexhaustive);
const is_exhaustive = Value.makeBool(ip.loadEnumType(ty.toIntern()).tag_mode != .nonexhaustive);
 
const enum_field_ty = t: {
const enum_field_ty_decl_index = (try sema.namespaceLookup(
@@ -17968,9 +17950,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :t enum_field_ty_decl.val.toType();
};
 
const enum_field_vals = try sema.arena.alloc(InternPool.Index, ip.indexToKey(ty.toIntern()).enum_type.names.len);
const enum_field_vals = try sema.arena.alloc(InternPool.Index, ip.loadEnumType(ty.toIntern()).names.len);
for (enum_field_vals, 0..) |*field_val, i| {
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(ty.toIntern());
const value_val = if (enum_type.values.len > 0)
try mod.intern_pool.getCoercedInts(
mod.gpa,
@@ -18045,7 +18027,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} });
};
 
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ip.indexToKey(ty.toIntern()).enum_type.namespace);
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ip.loadEnumType(ty.toIntern()).namespace);
 
const type_enum_ty = t: {
const type_enum_ty_decl_index = (try sema.namespaceLookup(
@@ -18061,7 +18043,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
const field_values = .{
// tag_type: type,
ip.indexToKey(ty.toIntern()).enum_type.tag_ty,
ip.loadEnumType(ty.toIntern()).tag_ty,
// fields: []const EnumField,
fields_val,
// decls: []const Declaration,
@@ -18105,14 +18087,15 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
const union_obj = mod.typeToUnion(ty).?;
const tag_type = union_obj.loadTagType(ip);
const layout = union_obj.getLayout(ip);
 
const union_field_vals = try gpa.alloc(InternPool.Index, union_obj.field_names.len);
const union_field_vals = try gpa.alloc(InternPool.Index, tag_type.names.len);
defer gpa.free(union_field_vals);
 
for (union_field_vals, 0..) |*field_val, i| {
// TODO: write something like getCoercedInts to avoid needing to dupe
const name = try sema.arena.dupeZ(u8, ip.stringToSlice(union_obj.field_names.get(ip)[i]));
const name = try sema.arena.dupeZ(u8, ip.stringToSlice(tag_type.names.get(ip)[i]));
const name_val = v: {
const new_decl_ty = try mod.arrayType(.{
.len = name.len,
@@ -18314,7 +18297,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
break :fv;
},
.struct_type => |s| s,
.struct_type => ip.loadStructType(ty.toIntern()),
else => unreachable,
};
struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len);
@@ -18631,7 +18614,6 @@ fn zirTypeofBuiltin(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
.sema = sema,
.src_decl = block.src_decl,
.namespace = block.namespace,
.wip_capture_scope = block.wip_capture_scope,
.instructions = .{},
.inlining = block.inlining,
.is_comptime = false,
@@ -18710,7 +18692,6 @@ fn zirTypeofPeer(
.sema = sema,
.src_decl = block.src_decl,
.namespace = block.namespace,
.wip_capture_scope = block.wip_capture_scope,
.instructions = .{},
.inlining = block.inlining,
.is_comptime = false,
@@ -19186,7 +19167,6 @@ fn ensurePostHoc(sema: *Sema, block: *Block, dest_block: Zir.Inst.Index) !*Label
.sema = sema,
.src_decl = block.src_decl,
.namespace = block.namespace,
.wip_capture_scope = block.wip_capture_scope,
.instructions = .{},
.label = &labeled_block.label,
.inlining = block.inlining,
@@ -20082,7 +20062,8 @@ fn finishStructInit(
}
}
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(struct_ty.toIntern());
for (0..struct_type.field_types.len) |i| {
if (field_inits[i] != .none) {
// Coerce the init value to the field type.
@@ -20683,7 +20664,8 @@ fn fieldType(
try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src);
return Air.internedToRef(anon_struct.types.get(ip)[field_index]);
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(cur_ty.toIntern());
const field_index = struct_type.nameIndex(ip, field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_type, field_src, field_name);
const field_ty = struct_type.field_types.get(ip)[field_index];
@@ -20693,7 +20675,7 @@ fn fieldType(
},
.Union => {
const union_obj = mod.typeToUnion(cur_ty).?;
const field_index = union_obj.nameIndex(ip, field_name) orelse
const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
const field_ty = union_obj.field_types.get(ip)[field_index];
return Air.internedToRef(field_ty);
@@ -21022,7 +21004,7 @@ fn zirReify(
.AnyFrame => return sema.failWithUseOfAsync(block, src),
.EnumLiteral => return .enum_literal_type,
.Int => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const signedness_val = try Value.fromInterned(union_val.val).fieldValue(
mod,
struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness")).?,
@@ -21038,7 +21020,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Vector => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "len"),
@@ -21060,7 +21042,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Float => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const bits_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "bits"),
@@ -21078,7 +21060,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Pointer => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const size_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "size"),
@@ -21190,7 +21172,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Array => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "len"),
@@ -21219,7 +21201,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Optional => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child"),
@@ -21231,7 +21213,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.ErrorUnion => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const error_set_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "error_set"),
@@ -21260,7 +21242,7 @@ fn zirReify(
try names.ensureUnusedCapacity(sema.arena, len);
for (0..len) |i| {
const elem_val = try payload_val.elemValue(mod, i);
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern()));
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
@@ -21280,7 +21262,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Struct => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "layout"),
@@ -21316,7 +21298,7 @@ fn zirReify(
return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool());
},
.Enum => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "tag_type"),
@@ -21334,105 +21316,14 @@ fn zirReify(
try ip.getOrPutString(gpa, "is_exhaustive"),
).?);
 
// Decls
if (decls_val.sliceLen(mod) > 0) {
return sema.fail(block, src, "reified enums must have no decls", .{});
}
 
const int_tag_ty = tag_type_val.toType();
if (int_tag_ty.zigTypeTag(mod) != .Int) {
return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{});
}
 
// Because these things each reference each other, `undefined`
// placeholders are used before being set after the enum type gains
// an InternPool index.
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
}, name_strategy, "enum", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer {
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
mod.abortAnonDecl(new_decl_index);
}
 
// Define our empty enum decl
const fields_len: u32 = @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod)));
const incomplete_enum = try ip.getIncompleteEnum(gpa, .{
.decl = new_decl_index,
.namespace = .none,
.fields_len = fields_len,
.has_values = true,
.tag_mode = if (!is_exhaustive_val.toBool())
.nonexhaustive
else
.explicit,
.tag_ty = int_tag_ty.toIntern(),
.zir_index = .none,
});
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(incomplete_enum.index);
 
new_decl.ty = Type.type;
new_decl.val = Value.fromInterned(incomplete_enum.index);
 
for (0..fields_len) |field_i| {
const elem_val = try fields_val.elemValue(mod, field_i);
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
).?);
const value_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "value"),
).?);
 
const field_name = try name_val.toIpString(Type.slice_const_u8, mod);
 
if (!try sema.intFitsInType(value_val, int_tag_ty, null)) {
// TODO: better source location
return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{
field_name.fmt(ip),
value_val.fmtValue(Type.comptime_int, mod),
int_tag_ty.fmt(mod),
});
}
 
if (incomplete_enum.addFieldName(ip, field_name)) |other_index| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "duplicate enum field '{}'", .{
field_name.fmt(ip),
});
errdefer msg.destroy(gpa);
_ = other_index; // TODO: this note is incorrect
try sema.errNote(block, src, msg, "other field here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
 
if (incomplete_enum.addFieldValue(ip, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)});
errdefer msg.destroy(gpa);
_ = other; // TODO: this note is incorrect
try sema.errNote(block, src, msg, "other enum tag value here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
}
 
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
return sema.reifyEnum(block, inst, src, tag_type_val.toType(), is_exhaustive_val.toBool(), fields_val, name_strategy);
},
.Opaque => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls"),
@@ -21443,45 +21334,30 @@ fn zirReify(
return sema.fail(block, src, "reified opaque must have no decls", .{});
}
 
// Because these three things each reference each other,
// `undefined` placeholders are used in two places before being set
// after the opaque type gains an InternPool index.
const wip_ty = switch (try ip.getOpaqueType(gpa, .{
.has_namespace = false,
.key = .{ .reified = .{
.zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst),
} },
})) {
.existing => |ty| return Air.internedToRef(ty),
.wip => |wip| wip,
};
errdefer wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, name_strategy, "opaque", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer {
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
mod.abortAnonDecl(new_decl_index);
}
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.decl_index = new_decl_index,
.file_scope = block.getFileScope(mod),
});
errdefer mod.destroyNamespace(new_namespace_index);
 
const opaque_ty = try mod.intern(.{ .opaque_type = .{
.decl = new_decl_index,
.namespace = new_namespace_index,
.zir_index = .none,
} });
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(opaque_ty);
 
new_decl.ty = Type.type;
new_decl.val = Value.fromInterned(opaque_ty);
 
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
 
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none));
},
.Union => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "layout"),
@@ -21499,216 +21375,15 @@ fn zirReify(
try ip.getOrPutString(gpa, "decls"),
).?);
 
// Decls
if (decls_val.sliceLen(mod) > 0) {
return sema.fail(block, src, "reified unions must have no decls", .{});
}
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
const fields_len: u32 = @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod)));
 
// Tag type
var explicit_tags_seen: []bool = &.{};
var enum_field_names: []InternPool.NullTerminatedString = &.{};
var enum_tag_ty: InternPool.Index = .none;
if (tag_type_val.optionalValue(mod)) |payload_val| {
enum_tag_ty = payload_val.toType().toIntern();
 
const enum_type = switch (ip.indexToKey(enum_tag_ty)) {
.enum_type => |x| x,
else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}),
};
 
explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len);
@memset(explicit_tags_seen, false);
} else {
enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
}
 
// Fields
var any_aligned_fields: bool = false;
var union_fields: std.MultiArrayList(struct {
type: InternPool.Index,
alignment: InternPool.Alignment,
}) = .{};
var field_name_table: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
try field_name_table.ensureTotalCapacity(sema.arena, fields_len);
 
for (0..fields_len) |i| {
const elem_val = try fields_val.elemValue(mod, i);
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
).?);
const type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "type"),
).?);
const alignment_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "alignment"),
).?);
 
const field_name = try name_val.toIpString(Type.slice_const_u8, mod);
 
if (enum_field_names.len != 0) {
enum_field_names[i] = field_name;
}
 
if (enum_tag_ty != .none) {
const tag_info = ip.indexToKey(enum_tag_ty).enum_type;
const enum_index = tag_info.nameIndex(ip, field_name) orelse {
return sema.fail(block, src, "no field named '{}' in enum '{}'", .{
field_name.fmt(ip), Type.fromInterned(enum_tag_ty).fmt(mod),
});
};
assert(explicit_tags_seen.len == tag_info.names.len);
// No check for duplicate because the check already happened in order
// to create the enum type in the first place.
assert(!explicit_tags_seen[enum_index]);
explicit_tags_seen[enum_index] = true;
}
 
const gop = field_name_table.getOrPutAssumeCapacity(field_name);
if (gop.found_existing) {
// TODO: better source location
return sema.fail(block, src, "duplicate union field {}", .{field_name.fmt(ip)});
}
 
const field_ty = type_val.toType();
const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?;
if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) {
// TODO: better source location
return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{
alignment_val_int,
});
}
const field_align = Alignment.fromByteUnits(alignment_val_int);
any_aligned_fields = any_aligned_fields or field_align != .none;
 
try union_fields.append(sema.arena, .{
.type = field_ty.toIntern(),
.alignment = field_align,
});
 
if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
errdefer msg.destroy(gpa);
 
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
if (layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(gpa);
 
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(src, mod), field_ty, .union_field);
 
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
} else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(gpa);
 
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotPacked(msg, src_decl.toSrcLoc(src, mod), field_ty);
 
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
}
 
if (enum_tag_ty != .none) {
const tag_info = ip.indexToKey(enum_tag_ty).enum_type;
if (tag_info.names.len > fields_len) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{});
errdefer msg.destroy(gpa);
 
assert(explicit_tags_seen.len == tag_info.names.len);
for (tag_info.names.get(ip), 0..) |field_name, field_index| {
if (explicit_tags_seen[field_index]) continue;
try sema.addFieldErrNote(Type.fromInterned(enum_tag_ty), field_index, msg, "field '{}' missing, declared here", .{
field_name.fmt(ip),
});
}
try sema.addDeclaredHereNote(msg, Type.fromInterned(enum_tag_ty));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
} else {
enum_tag_ty = try sema.generateUnionTagTypeSimple(block, enum_field_names, .none);
}
 
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the union type gains an
// InternPool index.
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
}, name_strategy, "union", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer {
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
mod.abortAnonDecl(new_decl_index);
}
 
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.decl_index = new_decl_index,
.file_scope = block.getFileScope(mod),
});
errdefer mod.destroyNamespace(new_namespace_index);
 
const union_ty = try ip.getUnionType(gpa, .{
.decl = new_decl_index,
.namespace = new_namespace_index,
.enum_tag_ty = enum_tag_ty,
.fields_len = fields_len,
.zir_index = .none,
.flags = .{
.layout = layout,
.status = .have_field_types,
.runtime_tag = if (!tag_type_val.isNull(mod))
.tagged
else if (layout != .Auto)
.none
else switch (block.wantSafety()) {
true => .safety,
false => .none,
},
.any_aligned_fields = any_aligned_fields,
.requires_comptime = .unknown,
.assumed_runtime_bits = false,
.assumed_pointer_aligned = false,
.alignment = .none,
},
.field_types = union_fields.items(.type),
.field_aligns = if (any_aligned_fields) union_fields.items(.alignment) else &.{},
});
 
new_decl.ty = Type.type;
new_decl.val = Value.fromInterned(union_ty);
 
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
return sema.reifyUnion(block, inst, src, layout, tag_type_val, fields_val, name_strategy);
},
.Fn => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "calling_convention"),
@@ -21759,7 +21434,7 @@ fn zirReify(
var noalias_bits: u32 = 0;
for (param_types, 0..) |*param_type, i| {
const elem_val = try params_val.elemValue(mod, i);
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern()));
const param_is_generic_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_generic"),
@@ -21804,13 +21479,364 @@ fn zirReify(
}
}
 
fn reifyEnum(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
src: LazySrcLoc,
tag_ty: Type,
is_exhaustive: bool,
fields_val: Value,
name_strategy: Zir.Inst.NameStrategy,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
 
// This logic must stay in sync with the structure of `std.builtin.Type.Enum` - search for `fieldValue`.
 
const fields_len: u32 = @intCast(fields_val.sliceLen(mod));
 
// The validation work here is non-trivial, and it's possible the type already exists.
// So in this first pass, let's just construct a hash to optimize for this case. If the
// inputs turn out to be invalid, we can cancel the WIP type later.
 
// For deduplication purposes, we must create a hash including all details of this type.
// TODO: use a longer hash!
var hasher = std.hash.Wyhash.init(0);
std.hash.autoHash(&hasher, tag_ty.toIntern());
std.hash.autoHash(&hasher, is_exhaustive);
std.hash.autoHash(&hasher, fields_len);
 
for (0..fields_len) |field_idx| {
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 1));
 
const field_name = try field_name_val.toIpString(Type.slice_const_u8, mod);
 
std.hash.autoHash(&hasher, .{
field_name,
field_value_val.toIntern(),
});
}
 
const wip_ty = switch (try ip.getEnumType(gpa, .{
.has_namespace = false,
.has_values = true,
.tag_mode = if (is_exhaustive) .explicit else .nonexhaustive,
.fields_len = fields_len,
.key = .{ .reified = .{
.zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst),
.type_hash = hasher.final(),
} },
})) {
.wip => |wip| wip,
.existing => |ty| return Air.internedToRef(ty),
};
errdefer wip_ty.cancel(ip);
 
if (tag_ty.zigTypeTag(mod) != .Int) {
return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{});
}
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, name_strategy, "enum", inst);
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
wip_ty.prepare(ip, new_decl_index, .none);
wip_ty.setTagTy(ip, tag_ty.toIntern());
 
for (0..fields_len) |field_idx| {
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 1));
 
const field_name = try field_name_val.toIpString(Type.slice_const_u8, mod);
 
if (!try sema.intFitsInType(field_value_val, tag_ty, null)) {
// TODO: better source location
return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{
field_name.fmt(ip),
field_value_val.fmtValue(Type.comptime_int, mod),
tag_ty.fmt(mod),
});
}
 
const coerced_field_val = try mod.getCoerced(field_value_val, tag_ty);
if (wip_ty.nextField(ip, field_name, coerced_field_val.toIntern())) |conflict| {
return sema.failWithOwnedErrorMsg(block, switch (conflict.kind) {
.name => msg: {
const msg = try sema.errMsg(block, src, "duplicate enum field '{}'", .{field_name.fmt(ip)});
errdefer msg.destroy(gpa);
_ = conflict.prev_field_idx; // TODO: this note is incorrect
try sema.errNote(block, src, msg, "other field here", .{});
break :msg msg;
},
.value => msg: {
const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{field_value_val.fmtValue(Type.comptime_int, mod)});
errdefer msg.destroy(gpa);
_ = conflict.prev_field_idx; // TODO: this note is incorrect
try sema.errNote(block, src, msg, "other enum tag value here", .{});
break :msg msg;
},
});
}
}
 
if (!is_exhaustive and fields_len > 1 and std.math.log2_int(u64, fields_len) == tag_ty.bitSize(mod)) {
return sema.fail(block, src, "non-exhaustive enum specified every value", .{});
}
 
try mod.finalizeAnonDecl(new_decl_index);
return Air.internedToRef(wip_ty.index);
}
 
fn reifyUnion(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
src: LazySrcLoc,
layout: std.builtin.Type.ContainerLayout,
opt_tag_type_val: Value,
fields_val: Value,
name_strategy: Zir.Inst.NameStrategy,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
 
// This logic must stay in sync with the structure of `std.builtin.Type.Union` - search for `fieldValue`.
 
const fields_len: u32 = @intCast(fields_val.sliceLen(mod));
 
// The validation work here is non-trivial, and it's possible the type already exists.
// So in this first pass, let's just construct a hash to optimize for this case. If the
// inputs turn out to be invalid, we can cancel the WIP type later.
 
// For deduplication purposes, we must create a hash including all details of this type.
// TODO: use a longer hash!
var hasher = std.hash.Wyhash.init(0);
std.hash.autoHash(&hasher, layout);
std.hash.autoHash(&hasher, opt_tag_type_val.toIntern());
std.hash.autoHash(&hasher, fields_len);
 
var any_aligns = false;
 
for (0..fields_len) |field_idx| {
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_type_val = try field_info.fieldValue(mod, 1);
const field_align_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 2));
 
const field_name = try field_name_val.toIpString(Type.slice_const_u8, mod);
 
std.hash.autoHash(&hasher, .{
field_name,
field_type_val.toIntern(),
field_align_val.toIntern(),
});
 
if (field_align_val.toUnsignedInt(mod) != 0) {
any_aligns = true;
}
}
 
const wip_ty = switch (try ip.getUnionType(gpa, .{
.flags = .{
.layout = layout,
.status = .none,
.runtime_tag = if (opt_tag_type_val.optionalValue(mod) != null)
.tagged
else if (layout != .Auto)
.none
else switch (block.wantSafety()) {
true => .safety,
false => .none,
},
.any_aligned_fields = any_aligns,
.requires_comptime = .unknown,
.assumed_runtime_bits = false,
.assumed_pointer_aligned = false,
.alignment = .none,
},
.has_namespace = false,
.fields_len = fields_len,
.enum_tag_ty = .none, // set later because not yet validated
.field_types = &.{}, // set later
.field_aligns = &.{}, // set later
.key = .{ .reified = .{
.zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst),
.type_hash = hasher.final(),
} },
})) {
.wip => |wip| wip,
.existing => |ty| return Air.internedToRef(ty),
};
errdefer wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, name_strategy, "union", inst);
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
const field_types = try sema.arena.alloc(InternPool.Index, fields_len);
const field_aligns = if (any_aligns) try sema.arena.alloc(InternPool.Alignment, fields_len) else undefined;
 
const enum_tag_ty, const has_explicit_tag = if (opt_tag_type_val.optionalValue(mod)) |tag_type_val| tag_ty: {
switch (ip.indexToKey(tag_type_val.toIntern())) {
.enum_type => {},
else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}),
}
const enum_tag_ty = tag_type_val.toType();
 
// We simply track which fields of the tag type have been seen.
const tag_ty_fields_len = enum_tag_ty.enumFieldCount(mod);
var seen_tags = try std.DynamicBitSetUnmanaged.initEmpty(sema.arena, tag_ty_fields_len);
 
for (field_types, 0..) |*field_ty, field_idx| {
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_type_val = try field_info.fieldValue(mod, 1);
 
const field_name = try field_name_val.toIpString(Type.slice_const_u8, mod);
 
const enum_index = enum_tag_ty.enumFieldIndex(field_name, mod) orelse {
// TODO: better source location
return sema.fail(block, src, "no field named '{}' in enum '{}'", .{
field_name.fmt(ip), enum_tag_ty.fmt(mod),
});
};
if (seen_tags.isSet(enum_index)) {
// TODO: better source location
return sema.fail(block, src, "duplicate union field {}", .{field_name.fmt(ip)});
}
seen_tags.set(enum_index);
 
field_ty.* = field_type_val.toIntern();
if (any_aligns) {
const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema);
if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) {
// TODO: better source location
return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
}
field_aligns[field_idx] = Alignment.fromByteUnits(byte_align);
}
}
 
if (tag_ty_fields_len > fields_len) return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "enum fields missing in union", .{});
errdefer msg.destroy(gpa);
var it = seen_tags.iterator(.{ .kind = .unset });
while (it.next()) |enum_index| {
const field_name = enum_tag_ty.enumFieldName(enum_index, mod);
try sema.addFieldErrNote(enum_tag_ty, enum_index, msg, "field '{}' missing, declared here", .{
field_name.fmt(ip),
});
}
try sema.addDeclaredHereNote(msg, enum_tag_ty);
break :msg msg;
});
 
break :tag_ty .{ enum_tag_ty.toIntern(), true };
} else tag_ty: {
// We must track field names and set up the tag type ourselves.
var field_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
try field_names.ensureTotalCapacity(sema.arena, fields_len);
 
for (field_types, 0..) |*field_ty, field_idx| {
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_type_val = try field_info.fieldValue(mod, 1);
 
const field_name = try field_name_val.toIpString(Type.slice_const_u8, mod);
const gop = field_names.getOrPutAssumeCapacity(field_name);
if (gop.found_existing) {
// TODO: better source location
return sema.fail(block, src, "duplicate union field {}", .{field_name.fmt(ip)});
}
 
field_ty.* = field_type_val.toIntern();
if (any_aligns) {
const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema);
if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) {
// TODO: better source location
return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
}
field_aligns[field_idx] = Alignment.fromByteUnits(byte_align);
}
}
 
const enum_tag_ty = try sema.generateUnionTagTypeSimple(block, field_names.keys(), mod.declPtr(new_decl_index));
break :tag_ty .{ enum_tag_ty, false };
};
errdefer if (!has_explicit_tag) ip.remove(enum_tag_ty); // remove generated tag type on error
 
for (field_types) |field_ty_ip| {
const field_ty = Type.fromInterned(field_ty_ip);
if (field_ty.zigTypeTag(mod) == .Opaque) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
errdefer msg.destroy(gpa);
 
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
}
if (layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(gpa);
 
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(src, mod), field_ty, .union_field);
 
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
} else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(gpa);
 
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotPacked(msg, src_decl.toSrcLoc(src, mod), field_ty);
 
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
}
}
 
const loaded_union = ip.loadUnionType(wip_ty.index);
loaded_union.setFieldTypes(ip, field_types);
if (any_aligns) {
loaded_union.setFieldAligns(ip, field_aligns);
}
loaded_union.tagTypePtr(ip).* = enum_tag_ty;
loaded_union.flagsPtr(ip).status = .have_field_types;
 
try mod.finalizeAnonDecl(new_decl_index);
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none));
}
 
fn reifyStruct(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
src: LazySrcLoc,
layout: std.builtin.Type.ContainerLayout,
backing_int_val: Value,
opt_backing_int_val: Value,
fields_val: Value,
name_strategy: Zir.Inst.NameStrategy,
is_tuple: bool,
@@ -21819,111 +21845,126 @@ fn reifyStruct(
const gpa = sema.gpa;
const ip = &mod.intern_pool;
 
// This logic must stay in sync with the structure of `std.builtin.Type.Struct` - search for `fieldValue`.
 
const fields_len: u32 = @intCast(fields_val.sliceLen(mod));
 
// The validation work here is non-trivial, and it's possible the type already exists.
// So in this first pass, let's just construct a hash to optimize for this case. If the
// inputs turn out to be invalid, we can cancel the WIP type later.
 
// For deduplication purposes, we must create a hash including all details of this type.
// TODO: use a longer hash!
var hasher = std.hash.Wyhash.init(0);
std.hash.autoHash(&hasher, layout);
std.hash.autoHash(&hasher, opt_backing_int_val.toIntern());
std.hash.autoHash(&hasher, is_tuple);
std.hash.autoHash(&hasher, fields_len);
 
var any_comptime_fields = false;
var any_default_inits = false;
var any_aligned_fields = false;
 
for (0..fields_len) |field_idx| {
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_type_val = try field_info.fieldValue(mod, 1);
const field_default_value_val = try field_info.fieldValue(mod, 2);
const field_is_comptime_val = try field_info.fieldValue(mod, 3);
const field_alignment_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 4));
 
const field_name = try field_name_val.toIpString(Type.slice_const_u8, mod);
const field_is_comptime = field_is_comptime_val.toBool();
const field_default_value: InternPool.Index = if (field_default_value_val.optionalValue(mod)) |ptr_val| d: {
const ptr_ty = try mod.singleConstPtrType(field_type_val.toType());
// We need to do this deref here, so we won't check for this error case later on.
const val = try sema.pointerDeref(block, src, ptr_val, ptr_ty) orelse return sema.failWithNeededComptime(
block,
src,
.{ .needed_comptime_reason = "struct field default value must be comptime-known" },
);
// Resolve the value so that lazy values do not create distinct types.
break :d (try sema.resolveLazyValue(val)).toIntern();
} else .none;
 
std.hash.autoHash(&hasher, .{
field_name,
field_type_val.toIntern(),
field_default_value,
field_is_comptime,
field_alignment_val.toIntern(),
});
 
if (field_is_comptime) any_comptime_fields = true;
if (field_default_value != .none) any_default_inits = true;
switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, sema)) {
.eq => {},
.gt => any_aligned_fields = true,
.lt => unreachable,
}
}
 
const wip_ty = switch (try ip.getStructType(gpa, .{
.layout = layout,
.fields_len = fields_len,
.known_non_opv = false,
.requires_comptime = .unknown,
.is_tuple = is_tuple,
.any_comptime_fields = any_comptime_fields,
.any_default_inits = any_default_inits,
.any_aligned_fields = any_aligned_fields,
.inits_resolved = true,
.has_namespace = false,
.key = .{ .reified = .{
.zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst),
.type_hash = hasher.final(),
} },
})) {
.wip => |wip| wip,
.existing => |ty| return Air.internedToRef(ty),
};
errdefer wip_ty.cancel(ip);
 
if (is_tuple) switch (layout) {
.Extern => return sema.fail(block, src, "extern tuples are not supported", .{}),
.Packed => return sema.fail(block, src, "packed tuples are not supported", .{}),
.Auto => {},
};
 
const fields_len: u32 = @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod)));
 
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the struct type gains an
// InternPool index.
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, name_strategy, "struct", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer {
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
mod.abortAnonDecl(new_decl_index);
}
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
const ty = try ip.getStructType(gpa, .{
.decl = new_decl_index,
.namespace = .none,
.zir_index = .none,
.layout = layout,
.known_non_opv = false,
.fields_len = fields_len,
.requires_comptime = .unknown,
.is_tuple = is_tuple,
// So that we don't have to scan ahead, we allocate space in the struct
// type for alignments, comptime fields, and default inits. This might
// result in wasted space, however, this is a permitted encoding of
// struct types.
.any_comptime_fields = true,
.any_default_inits = true,
.inits_resolved = true,
.any_aligned_fields = true,
});
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(ty);
const struct_type = ip.indexToKey(ty).struct_type;
const struct_type = ip.loadStructType(wip_ty.index);
 
new_decl.ty = Type.type;
new_decl.val = Value.fromInterned(ty);
for (0..fields_len) |field_idx| {
const field_info = try fields_val.elemValue(mod, field_idx);
 
// Fields
for (0..fields_len) |i| {
const elem_val = try fields_val.elemValue(mod, i);
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
).?);
const type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "type"),
).?);
const default_value_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "default_value"),
).?);
const is_comptime_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_comptime"),
).?);
const alignment_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "alignment"),
).?);
 
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
const abi_align = (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?;
 
if (layout == .Packed) {
if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
if (is_comptime_val.toBool()) return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{});
} else {
if (abi_align > 0 and !math.isPowerOfTwo(abi_align)) return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{abi_align});
struct_type.field_aligns.get(ip)[i] = Alignment.fromByteUnits(abi_align);
}
if (layout == .Extern and is_comptime_val.toBool()) {
return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{});
}
 
const field_name = try name_val.toIpString(Type.slice_const_u8, mod);
const field_name_val = try field_info.fieldValue(mod, 0);
const field_type_val = try field_info.fieldValue(mod, 1);
const field_default_value_val = try field_info.fieldValue(mod, 2);
const field_is_comptime_val = try field_info.fieldValue(mod, 3);
const field_alignment_val = try field_info.fieldValue(mod, 4);
 
const field_ty = field_type_val.toType();
const field_name = try field_name_val.toIpString(Type.slice_const_u8, mod);
if (is_tuple) {
const field_index = field_name.toUnsigned(ip) orelse return sema.fail(
const field_name_index = field_name.toUnsigned(ip) orelse return sema.fail(
block,
src,
"tuple cannot have non-numeric field '{}'",
.{field_name.fmt(ip)},
);
 
if (field_index >= fields_len) {
if (field_name_index != field_idx) {
return sema.fail(
block,
src,
"tuple field {} exceeds tuple field count",
.{field_index},
"tuple field name '{}' does not match field index {}",
.{ field_name_index, field_idx },
);
}
} else if (struct_type.addFieldName(ip, field_name)) |prev_index| {
@@ -21931,45 +21972,72 @@ fn reifyStruct(
return sema.fail(block, src, "duplicate struct field name {}", .{field_name.fmt(ip)});
}
 
const field_ty = type_val.toType();
const default_val = if (default_value_val.optionalValue(mod)) |opt_val|
(try sema.pointerDeref(block, src, opt_val, try mod.singleConstPtrType(field_ty)) orelse
return sema.failWithNeededComptime(block, src, .{
.needed_comptime_reason = "struct field default value must be comptime-known",
})).toIntern()
else
.none;
if (is_comptime_val.toBool() and default_val == .none) {
if (any_aligned_fields) {
if (!try sema.intFitsInType(field_alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
 
const byte_align = try field_alignment_val.toUnsignedIntAdvanced(sema);
if (byte_align == 0) {
if (layout != .Packed) {
struct_type.field_aligns.get(ip)[field_idx] = .none;
}
} else {
if (layout == .Packed) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
if (!math.isPowerOfTwo(byte_align)) return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
struct_type.field_aligns.get(ip)[field_idx] = Alignment.fromNonzeroByteUnits(byte_align);
}
}
 
const field_is_comptime = field_is_comptime_val.toBool();
if (field_is_comptime) {
assert(any_comptime_fields);
switch (layout) {
.Extern => return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}),
.Packed => return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{}),
.Auto => struct_type.setFieldComptime(ip, field_idx),
}
}
 
const field_default: InternPool.Index = d: {
if (!any_default_inits) break :d .none;
const ptr_val = field_default_value_val.optionalValue(mod) orelse break :d .none;
const ptr_ty = try mod.singleConstPtrType(field_ty);
// Asserted comptime-dereferencable above.
const val = (try sema.pointerDeref(block, src, ptr_val, ptr_ty)).?;
// We already resolved this for deduplication, so we may as well do it now.
break :d (try sema.resolveLazyValue(val)).toIntern();
};
 
if (field_is_comptime and field_default == .none) {
return sema.fail(block, src, "comptime field without default initialization value", .{});
}
 
struct_type.field_types.get(ip)[i] = field_ty.toIntern();
struct_type.field_inits.get(ip)[i] = default_val;
if (is_comptime_val.toBool())
struct_type.setFieldComptime(ip, i);
struct_type.field_types.get(ip)[field_idx] = field_type_val.toIntern();
if (field_default != .none) {
struct_type.field_inits.get(ip)[field_idx] = field_default;
}
 
if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(gpa);
 
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
});
}
if (field_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{});
errdefer msg.destroy(gpa);
 
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
});
}
if (layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) {
const msg = msg: {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(gpa);
 
@@ -21978,10 +22046,9 @@ fn reifyStruct(
 
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
});
} else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
const msg = msg: {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(gpa);
 
@@ -21990,32 +22057,27 @@ fn reifyStruct(
 
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
});
}
}
 
if (layout == .Packed) {
for (0..struct_type.field_types.len) |index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
var fields_bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |field_idx| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]);
sema.resolveTypeLayout(field_ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
try sema.addFieldErrNote(Type.fromInterned(ty), index, msg, "while checking this field", .{});
try sema.errNote(block, src, msg, "while checking a field of this struct", .{});
return err;
},
else => return err,
};
}
 
var fields_bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |i| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
fields_bit_sum += field_ty.bitSize(mod);
}
 
if (backing_int_val.optionalValue(mod)) |backing_int_ty_val| {
const backing_int_ty = backing_int_ty_val.toType();
if (opt_backing_int_val.optionalValue(mod)) |backing_int_val| {
const backing_int_ty = backing_int_val.toType();
try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
} else {
@@ -22024,9 +22086,8 @@ fn reifyStruct(
}
}
 
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none));
}
 
fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref {
@@ -23241,7 +23302,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
switch (ty.containerLayout(mod)) {
.Packed => {
var bit_sum: u64 = 0;
const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
const struct_type = ip.loadStructType(ty.toIntern());
for (0..struct_type.field_types.len) |i| {
if (i == field_index) {
return bit_sum;
@@ -25919,7 +25980,7 @@ fn zirBuiltinExtern(
 
// TODO check duplicate extern
 
const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, .none);
const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node);
errdefer mod.destroyDecl(new_decl_index);
const new_decl = mod.declPtr(new_decl_index);
new_decl.name = options.name;
@@ -26515,7 +26576,6 @@ fn addSafetyCheck(
.sema = sema,
.src_decl = parent_block.src_decl,
.namespace = parent_block.namespace,
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
.is_comptime = false,
@@ -26624,7 +26684,6 @@ fn panicUnwrapError(
.sema = sema,
.src_decl = parent_block.src_decl,
.namespace = parent_block.namespace,
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
.is_comptime = false,
@@ -26741,7 +26800,6 @@ fn safetyCheckFormatted(
.sema = sema,
.src_decl = parent_block.src_decl,
.namespace = parent_block.namespace,
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
.is_comptime = false,
@@ -27268,8 +27326,7 @@ fn fieldCallBind(
.Union => {
try sema.resolveTypeFields(concrete_ty);
const union_obj = mod.typeToUnion(concrete_ty).?;
_ = union_obj.nameIndex(ip, field_name) orelse break :find_field;
 
_ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field;
const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false);
return .{ .direct = try sema.analyzeLoad(block, src, field_ptr, src) };
},
@@ -27643,7 +27700,8 @@ fn structFieldVal(
try sema.resolveTypeFields(struct_ty);
 
switch (ip.indexToKey(struct_ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(struct_ty.toIntern());
if (struct_type.isTuple(ip))
return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
 
@@ -27849,7 +27907,7 @@ fn unionFieldPtr(
 
try sema.requireRuntimeBlock(block, src, null);
if (!initializing and union_obj.getLayout(ip) == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_names.len > 1)
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
{
const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
@@ -27927,7 +27985,7 @@ fn unionFieldVal(
 
try sema.requireRuntimeBlock(block, src, null);
if (union_obj.getLayout(ip) == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_names.len > 1)
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
{
const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
@@ -31686,7 +31744,7 @@ fn coerceEnumToUnion(
const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{});
errdefer msg.destroy(sema.gpa);
 
const field_name = union_obj.field_names.get(ip)[field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{
field_name.fmt(ip),
});
@@ -31697,7 +31755,7 @@ fn coerceEnumToUnion(
}
const opv = (try sema.typeHasOnePossibleValue(field_ty)) orelse {
const msg = msg: {
const field_name = union_obj.field_names.get(ip)[field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{}'", .{
inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod),
field_ty.fmt(sema.mod), field_name.fmt(ip),
@@ -31769,8 +31827,8 @@ fn coerceEnumToUnion(
);
errdefer msg.destroy(sema.gpa);
 
for (0..union_obj.field_names.len) |field_index| {
const field_name = union_obj.field_names.get(ip)[field_index];
for (0..union_obj.field_types.len) |field_index| {
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{
@@ -31803,8 +31861,8 @@ fn coerceAnonStructToUnion(
.{ .name = anon_struct_type.names.get(ip)[0] }
else
.{ .count = anon_struct_type.names.len },
.struct_type => |struct_type| name: {
const field_names = struct_type.field_names.get(ip);
.struct_type => name: {
const field_names = ip.loadStructType(inst_ty.toIntern()).field_names.get(ip);
break :name if (field_names.len == 1)
.{ .name = field_names[0] }
else
@@ -32113,7 +32171,7 @@ fn coerceTupleToStruct(
var runtime_src: ?LazySrcLoc = null;
const field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.struct_type => |s| s.field_types.len,
.struct_type => ip.loadStructType(inst_ty.toIntern()).field_types.len,
else => unreachable,
};
for (0..field_count) |field_index_usize| {
@@ -32125,7 +32183,7 @@ fn coerceTupleToStruct(
anon_struct_type.names.get(ip)[field_i]
else
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
.struct_type => |s| s.field_names.get(ip)[field_i],
.struct_type => ip.loadStructType(inst_ty.toIntern()).field_names.get(ip)[field_i],
else => unreachable,
};
const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src);
@@ -32213,7 +32271,7 @@ fn coerceTupleToTuple(
const ip = &mod.intern_pool;
const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.struct_type => |struct_type| struct_type.field_types.len,
.struct_type => ip.loadStructType(tuple_ty.toIntern()).field_types.len,
else => unreachable,
};
const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count);
@@ -32223,7 +32281,7 @@ fn coerceTupleToTuple(
const inst_ty = sema.typeOf(inst);
const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.struct_type => |struct_type| struct_type.field_types.len,
.struct_type => ip.loadStructType(inst_ty.toIntern()).field_types.len,
else => unreachable,
};
if (src_field_count > dest_field_count) return error.NotCoercible;
@@ -32238,10 +32296,14 @@ fn coerceTupleToTuple(
anon_struct_type.names.get(ip)[field_i]
else
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
.struct_type => |struct_type| if (struct_type.field_names.len > 0)
struct_type.field_names.get(ip)[field_i]
else
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
.struct_type => s: {
const struct_type = ip.loadStructType(inst_ty.toIntern());
if (struct_type.field_names.len > 0) {
break :s struct_type.field_names.get(ip)[field_i];
} else {
break :s try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i});
}
},
else => unreachable,
};
 
@@ -32250,12 +32312,12 @@ fn coerceTupleToTuple(
 
const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize],
.struct_type => |struct_type| struct_type.field_types.get(ip)[field_index_usize],
.struct_type => ip.loadStructType(tuple_ty.toIntern()).field_types.get(ip)[field_index_usize],
else => unreachable,
};
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[field_index_usize],
.struct_type => |struct_type| struct_type.fieldInit(ip, field_index_usize),
.struct_type => ip.loadStructType(tuple_ty.toIntern()).fieldInit(ip, field_index_usize),
else => unreachable,
};
 
@@ -32294,7 +32356,7 @@ fn coerceTupleToTuple(
 
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[i],
.struct_type => |struct_type| struct_type.fieldInit(ip, i),
.struct_type => ip.loadStructType(tuple_ty.toIntern()).fieldInit(ip, i),
else => unreachable,
};
 
@@ -35534,7 +35596,7 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
pub fn resolveStructAlignment(
sema: *Sema,
ty: InternPool.Index,
struct_type: InternPool.Key.StructType,
struct_type: InternPool.LoadedStructType,
) CompileError!Alignment {
const mod = sema.mod;
const ip = &mod.intern_pool;
@@ -35674,7 +35736,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
}
}
 
const RuntimeOrder = InternPool.Key.StructType.RuntimeOrder;
const RuntimeOrder = InternPool.LoadedStructType.RuntimeOrder;
 
const AlignSortContext = struct {
aligns: []const Alignment,
@@ -35726,7 +35788,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
_ = try sema.typeRequiresComptime(ty);
}
 
fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) CompileError!void {
fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) CompileError!void {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
 
@@ -35766,7 +35828,6 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) Comp
.sema = &sema,
.src_decl = decl_index,
.namespace = struct_type.namespace.unwrap() orelse decl.src_namespace,
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -35789,9 +35850,16 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) Comp
 
if (small.has_backing_int) {
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
extra_index += @intFromBool(small.has_fields_len);
extra_index += @intFromBool(small.has_decls_len);
 
extra_index += captures_len;
 
const backing_int_body_len = zir.extra[extra_index];
extra_index += 1;
 
@@ -35879,7 +35947,7 @@ fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void
pub fn resolveUnionAlignment(
sema: *Sema,
ty: Type,
union_type: InternPool.Key.UnionType,
union_type: InternPool.LoadedUnionType,
) CompileError!Alignment {
const mod = sema.mod;
const ip = &mod.intern_pool;
@@ -35899,13 +35967,12 @@ pub fn resolveUnionAlignment(
 
try sema.resolveTypeFieldsUnion(ty, union_type);
 
const union_obj = ip.loadUnionType(union_type);
var max_align: Alignment = .@"1";
for (0..union_obj.field_names.len) |field_index| {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
for (0..union_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
 
const explicit_align = union_obj.fieldAlign(ip, @intCast(field_index));
const explicit_align = union_type.fieldAlign(ip, @intCast(field_index));
const field_align = if (explicit_align != .none)
explicit_align
else
@@ -35923,16 +35990,17 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
 
const union_type = ip.indexToKey(ty.ip_index).union_type;
try sema.resolveTypeFieldsUnion(ty, union_type);
try sema.resolveTypeFieldsUnion(ty, ip.loadUnionType(ty.ip_index));
 
const union_obj = ip.loadUnionType(union_type);
switch (union_obj.flagsPtr(ip).status) {
// Load again, since the tag type might have changed due to resolution.
const union_type = ip.loadUnionType(ty.ip_index);
 
switch (union_type.flagsPtr(ip).status) {
.none, .have_field_types => {},
.field_types_wip, .layout_wip => {
const msg = try Module.ErrorMsg.create(
sema.gpa,
mod.declPtr(union_obj.decl).srcLoc(mod),
mod.declPtr(union_type.decl).srcLoc(mod),
"union '{}' depends on itself",
.{ty.fmt(mod)},
);
@@ -35941,17 +36009,17 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
.have_layout, .fully_resolved_wip, .fully_resolved => return,
}
 
const prev_status = union_obj.flagsPtr(ip).status;
errdefer if (union_obj.flagsPtr(ip).status == .layout_wip) {
union_obj.flagsPtr(ip).status = prev_status;
const prev_status = union_type.flagsPtr(ip).status;
errdefer if (union_type.flagsPtr(ip).status == .layout_wip) {
union_type.flagsPtr(ip).status = prev_status;
};
 
union_obj.flagsPtr(ip).status = .layout_wip;
union_type.flagsPtr(ip).status = .layout_wip;
 
var max_size: u64 = 0;
var max_align: Alignment = .@"1";
for (0..union_obj.field_names.len) |field_index| {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
for (0..union_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
 
max_size = @max(max_size, sema.typeAbiSize(field_ty) catch |err| switch (err) {
@@ -35963,7 +36031,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
else => return err,
});
 
const explicit_align = union_obj.fieldAlign(ip, @intCast(field_index));
const explicit_align = union_type.fieldAlign(ip, @intCast(field_index));
const field_align = if (explicit_align != .none)
explicit_align
else
@@ -35972,10 +36040,10 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
max_align = max_align.max(field_align);
}
 
const flags = union_obj.flagsPtr(ip);
const has_runtime_tag = flags.runtime_tag.hasTag() and try sema.typeHasRuntimeBits(Type.fromInterned(union_obj.enum_tag_ty));
const flags = union_type.flagsPtr(ip);
const has_runtime_tag = flags.runtime_tag.hasTag() and try sema.typeHasRuntimeBits(Type.fromInterned(union_type.enum_tag_ty));
const size, const alignment, const padding = if (has_runtime_tag) layout: {
const enum_tag_type = Type.fromInterned(union_obj.enum_tag_ty);
const enum_tag_type = Type.fromInterned(union_type.enum_tag_ty);
const tag_align = try sema.typeAbiAlignment(enum_tag_type);
const tag_size = try sema.typeAbiSize(enum_tag_type);
 
@@ -36009,22 +36077,22 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
flags.alignment = alignment;
flags.status = .have_layout;
 
if (union_obj.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
if (union_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
mod.declPtr(union_obj.decl).srcLoc(mod),
mod.declPtr(union_type.decl).srcLoc(mod),
"union layout depends on it having runtime bits",
.{},
);
return sema.failWithOwnedErrorMsg(null, msg);
}
 
if (union_obj.flagsPtr(ip).assumed_pointer_aligned and
if (union_type.flagsPtr(ip).assumed_pointer_aligned and
alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(mod.getTarget().ptrBitWidth(), 8))))
{
const msg = try Module.ErrorMsg.create(
sema.gpa,
mod.declPtr(union_obj.decl).srcLoc(mod),
mod.declPtr(union_type.decl).srcLoc(mod),
"union layout depends on being pointer aligned",
.{},
);
@@ -36212,12 +36280,11 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void {
 
else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) {
.type_struct,
.type_struct_ns,
.type_struct_packed,
.type_struct_packed_inits,
=> try sema.resolveTypeFieldsStruct(ty_ip, ip.indexToKey(ty_ip).struct_type),
=> try sema.resolveTypeFieldsStruct(ty_ip, ip.loadStructType(ty_ip)),
 
.type_union => try sema.resolveTypeFieldsUnion(Type.fromInterned(ty_ip), ip.indexToKey(ty_ip).union_type),
.type_union => try sema.resolveTypeFieldsUnion(Type.fromInterned(ty_ip), ip.loadUnionType(ty_ip)),
.simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type),
else => {},
},
@@ -36249,7 +36316,7 @@ fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileErr
pub fn resolveTypeFieldsStruct(
sema: *Sema,
ty: InternPool.Index,
struct_type: InternPool.Key.StructType,
struct_type: InternPool.LoadedStructType,
) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
@@ -36309,7 +36376,7 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void {
struct_type.setHaveFieldInits(ip);
}
 
pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Key.UnionType) CompileError!void {
pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
const owner_decl = mod.declPtr(union_type.decl);
@@ -36500,6 +36567,12 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct {
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
const fields_len = if (small.has_fields_len) blk: {
const fields_len = zir.extra[extra_index];
extra_index += 1;
@@ -36512,6 +36585,8 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct {
break :decls_len decls_len;
} else 0;
 
extra_index += captures_len;
 
// The backing integer cannot be handled until `resolveStructLayout()`.
if (small.has_backing_int) {
const backing_int_body_len = zir.extra[extra_index];
@@ -36532,7 +36607,7 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct {
fn semaStructFields(
mod: *Module,
arena: Allocator,
struct_type: InternPool.Key.StructType,
struct_type: InternPool.LoadedStructType,
) CompileError!void {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
@@ -36584,7 +36659,6 @@ fn semaStructFields(
.sema = &sema,
.src_decl = decl_index,
.namespace = namespace_index,
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -36800,7 +36874,7 @@ fn semaStructFields(
fn semaStructFieldInits(
mod: *Module,
arena: Allocator,
struct_type: InternPool.Key.StructType,
struct_type: InternPool.LoadedStructType,
) CompileError!void {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
@@ -36842,7 +36916,6 @@ fn semaStructFieldInits(
.sema = &sema,
.src_decl = decl_index,
.namespace = namespace_index,
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -36952,15 +37025,15 @@ fn semaStructFieldInits(
}
}
 
fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.UnionType) CompileError!void {
fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.LoadedUnionType) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
 
const gpa = mod.gpa;
const ip = &mod.intern_pool;
const decl_index = union_type.decl;
const zir = mod.namespacePtr(union_type.namespace).file_scope.zir;
const zir_index = union_type.zir_index.unwrap().?.resolve(ip);
const zir = mod.namespacePtr(union_type.namespace.unwrap().?).file_scope.zir;
const zir_index = union_type.zir_index.resolve(ip);
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
assert(extended.opcode == .union_decl);
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
@@ -36974,6 +37047,12 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
break :blk ty_ref;
} else .none;
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
const body_len = if (small.has_body_len) blk: {
const body_len = zir.extra[extra_index];
extra_index += 1;
@@ -36992,8 +37071,8 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
break :decls_len decls_len;
} else 0;
 
// Skip over decls.
extra_index += decls_len;
// Skip over captures and decls.
extra_index += captures_len + decls_len;
 
const body = zir.bodySlice(extra_index, body_len);
extra_index += body.len;
@@ -37027,8 +37106,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
.parent = null,
.sema = &sema,
.src_decl = decl_index,
.namespace = union_type.namespace,
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
.namespace = union_type.namespace.unwrap().?,
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -37079,7 +37157,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
// The provided type is the enum tag type.
union_type.tagTypePtr(ip).* = provided_ty.toIntern();
const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) {
.enum_type => |x| x,
.enum_type => ip.loadEnumType(provided_ty.toIntern()),
else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(mod)}),
};
// The fields of the union must match the enum exactly.
@@ -37216,7 +37294,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
}
 
if (explicit_tags_seen.len > 0) {
const tag_info = ip.indexToKey(union_type.tagTypePtr(ip).*).enum_type;
const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*);
const enum_index = tag_info.nameIndex(ip, field_name) orelse {
const ty_src = mod.fieldSrcLoc(union_type.decl, .{
.index = field_i,
@@ -37327,7 +37405,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
union_type.setFieldAligns(ip, field_aligns.items);
 
if (explicit_tags_seen.len > 0) {
const tag_info = ip.indexToKey(union_type.tagTypePtr(ip).*).enum_type;
const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*);
if (tag_info.names.len > fields_len) {
const msg = msg: {
const msg = try sema.errMsg(&block_scope, src, "enum field(s) missing in union", .{});
@@ -37348,7 +37426,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), mod.declPtr(union_type.decl));
union_type.tagTypePtr(ip).* = enum_ty;
} else {
const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_type.decl.toOptional());
const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, mod.declPtr(union_type.decl));
union_type.tagTypePtr(ip).* = enum_ty;
}
}
@@ -37365,16 +37443,16 @@ fn generateUnionTagTypeNumbered(
block: *Block,
enum_field_names: []const InternPool.NullTerminatedString,
enum_field_vals: []const InternPool.Index,
decl: *Module.Decl,
union_owner_decl: *Module.Decl,
) !InternPool.Index {
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
 
const src_decl = mod.declPtr(block.src_decl);
const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope);
const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node);
errdefer mod.destroyDecl(new_decl_index);
const fqn = try decl.fullyQualifiedName(mod);
const fqn = try union_owner_decl.fullyQualifiedName(mod);
const name = try ip.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)});
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{
.ty = Type.noreturn,
@@ -37386,9 +37464,9 @@ fn generateUnionTagTypeNumbered(
new_decl.owns_tv = true;
new_decl.name_fully_qualified = true;
 
const enum_ty = try ip.getEnum(gpa, .{
const enum_ty = try ip.getGeneratedTagEnumType(gpa, .{
.decl = new_decl_index,
.namespace = .none,
.owner_union_ty = union_owner_decl.val.toIntern(),
.tag_ty = if (enum_field_vals.len == 0)
(try mod.intType(.unsigned, 0)).toIntern()
else
@@ -37396,7 +37474,6 @@ fn generateUnionTagTypeNumbered(
.names = enum_field_names,
.values = enum_field_vals,
.tag_mode = .explicit,
.zir_index = .none,
});
 
new_decl.ty = Type.type;
@@ -37410,22 +37487,16 @@ fn generateUnionTagTypeSimple(
sema: *Sema,
block: *Block,
enum_field_names: []const InternPool.NullTerminatedString,
maybe_decl_index: InternPool.OptionalDeclIndex,
union_owner_decl: *Module.Decl,
) !InternPool.Index {
const mod = sema.mod;
const ip = &mod.intern_pool;
const gpa = sema.gpa;
 
const new_decl_index = new_decl_index: {
const decl_index = maybe_decl_index.unwrap() orelse {
break :new_decl_index try mod.createAnonymousDecl(block, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
});
};
const fqn = try mod.declPtr(decl_index).fullyQualifiedName(mod);
const fqn = try union_owner_decl.fullyQualifiedName(mod);
const src_decl = mod.declPtr(block.src_decl);
const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope);
const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node);
errdefer mod.destroyDecl(new_decl_index);
const name = try ip.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)});
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{
@@ -37437,9 +37508,9 @@ fn generateUnionTagTypeSimple(
};
errdefer mod.abortAnonDecl(new_decl_index);
 
const enum_ty = try ip.getEnum(gpa, .{
const enum_ty = try ip.getGeneratedTagEnumType(gpa, .{
.decl = new_decl_index,
.namespace = .none,
.owner_union_ty = union_owner_decl.val.toIntern(),
.tag_ty = if (enum_field_names.len == 0)
(try mod.intType(.unsigned, 0)).toIntern()
else
@@ -37447,7 +37518,6 @@ fn generateUnionTagTypeSimple(
.names = enum_field_names,
.values = &.{},
.tag_mode = .auto,
.zir_index = .none,
});
 
const new_decl = mod.declPtr(new_decl_index);
@@ -37460,7 +37530,6 @@ fn generateUnionTagTypeSimple(
}
 
fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const src = LazySrcLoc.nodeOffset(0);
 
@@ -37469,7 +37538,6 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref {
.sema = sema,
.src_decl = sema.owner_decl_index,
.namespace = sema.owner_decl.src_namespace,
.wip_capture_scope = try mod.createCaptureScope(sema.owner_decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -37510,7 +37578,6 @@ fn getBuiltinDecl(sema: *Sema, block: *Block, name: []const u8) CompileError!Int
}
 
fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type {
const mod = sema.mod;
const ty_inst = try sema.getBuiltin(name);
 
var block: Block = .{
@@ -37518,7 +37585,6 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type {
.sema = sema,
.src_decl = sema.owner_decl_index,
.namespace = sema.owner_decl.src_namespace,
.wip_capture_scope = try mod.createCaptureScope(sema.owner_decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -37636,6 +37702,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
=> unreachable,
 
_ => switch (ip.items.items(.tag)[@intFromEnum(ty.toIntern())]) {
.removed => unreachable,
 
.type_int_signed, // i0 handled above
.type_int_unsigned, // u0 handled above
.type_pointer,
@@ -37713,7 +37781,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.type_enum_explicit,
.type_enum_nonexhaustive,
.type_struct,
.type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
@@ -37736,8 +37803,9 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
return null;
},
 
.struct_type => |struct_type| {
try sema.resolveTypeFields(ty);
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type);
 
if (struct_type.field_types.len == 0) {
// In this case the struct has no fields at all and
@@ -37795,10 +37863,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
} })));
},
 
.union_type => |union_type| {
try sema.resolveTypeFields(ty);
const union_obj = ip.loadUnionType(union_type);
const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.enum_tag_ty))) orelse
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
try sema.resolveTypeFieldsUnion(ty, union_obj);
const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse
return null;
if (union_obj.field_types.len == 0) {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
@@ -37825,39 +37893,42 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
return Value.fromInterned(only);
},
 
.enum_type => |enum_type| switch (enum_type.tag_mode) {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
.enum_type => {
const enum_type = ip.loadEnumType(ty.toIntern());
switch (enum_type.tag_mode) {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
 
if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
} });
return Value.fromInterned(only);
}
if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
} });
return Value.fromInterned(only);
}
 
return null;
},
.auto, .explicit => {
if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
return null;
},
.auto, .explicit => {
if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
 
return Value.fromInterned(switch (enum_type.names.len) {
0 => try mod.intern(.{ .empty_enum_value = ty.toIntern() }),
1 => try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = if (enum_type.values.len == 0)
(try mod.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern()
else
try mod.intern_pool.getCoercedInts(
mod.gpa,
mod.intern_pool.indexToKey(enum_type.values.get(ip)[0]).int,
enum_type.tag_ty,
),
} }),
else => return null,
});
},
return Value.fromInterned(switch (enum_type.names.len) {
0 => try mod.intern(.{ .empty_enum_value = ty.toIntern() }),
1 => try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = if (enum_type.values.len == 0)
(try mod.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern()
else
try mod.intern_pool.getCoercedInts(
mod.gpa,
mod.intern_pool.indexToKey(enum_type.values.get(ip)[0]).int,
enum_type.tag_ty,
),
} }),
else => return null,
});
},
}
},
 
else => unreachable,
@@ -38189,7 +38260,7 @@ fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment {
 
/// Not valid to call for packed unions.
/// Keep implementation in sync with `Module.unionFieldNormalAlignment`.
fn unionFieldAlignment(sema: *Sema, u: InternPool.UnionType, field_index: u32) !Alignment {
fn unionFieldAlignment(sema: *Sema, u: InternPool.LoadedUnionType, field_index: u32) !Alignment {
const mod = sema.mod;
const ip = &mod.intern_pool;
const field_align = u.fieldAlign(ip, field_index);
@@ -38237,7 +38308,7 @@ fn unionFieldIndex(
const ip = &mod.intern_pool;
try sema.resolveTypeFields(union_ty);
const union_obj = mod.typeToUnion(union_ty).?;
const field_index = union_obj.nameIndex(ip, field_name) orelse
const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
return @intCast(field_index);
}
@@ -38274,7 +38345,7 @@ fn anonStructFieldIndex(
.anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| {
if (name == field_name) return @intCast(i);
},
.struct_type => |struct_type| if (struct_type.nameIndex(ip, field_name)) |i| return i,
.struct_type => if (ip.loadStructType(struct_ty.toIntern()).nameIndex(ip, field_name)) |i| return i,
else => unreachable,
}
return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{
@@ -38710,7 +38781,7 @@ fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool {
/// Asserts the type is an enum.
fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
const mod = sema.mod;
const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type;
const enum_type = mod.intern_pool.loadEnumType(ty.toIntern());
assert(enum_type.tag_mode != .nonexhaustive);
// The `tagValueIndex` function call below relies on the type being the integer tag type.
// `getCoerced` assumes the value will fit the new type.
 
src/TypedValue.zig added: 4090, removed: 2946, total 1144
@@ -89,7 +89,7 @@ pub fn print(
 
if (payload.tag) |tag| {
try print(.{
.ty = Type.fromInterned(ip.indexToKey(ty.toIntern()).union_type.enum_tag_ty),
.ty = Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty),
.val = tag,
}, writer, level - 1, mod);
try writer.writeAll(" = ");
@@ -247,7 +247,7 @@ pub fn print(
if (level == 0) {
return writer.writeAll("(enum)");
}
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(ty.toIntern());
if (enum_type.tagValueIndex(ip, val.toIntern())) |tag_index| {
try writer.print(".{i}", .{enum_type.names.get(ip)[tag_index].fmt(ip)});
return;
@@ -398,7 +398,7 @@ pub fn print(
}
},
.Union => {
const field_name = mod.typeToUnion(container_ty).?.field_names.get(ip)[@intCast(field.index)];
const field_name = mod.typeToUnion(container_ty).?.loadTagType(ip).names.get(ip)[@intCast(field.index)];
try writer.print(".{i}", .{field_name.fmt(ip)});
},
.Pointer => {
@@ -482,11 +482,7 @@ fn printAggregate(
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
 
const field_name = switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |x| x.fieldName(ip, i),
.anon_struct_type => |x| if (x.isTuple()) .none else x.names.get(ip)[i].toOptional(),
else => unreachable,
};
const field_name = ty.structFieldName(@intCast(i), mod);
 
if (field_name.unwrap()) |name| try writer.print(".{} = ", .{name.fmt(ip)});
try print(.{
 
src/Value.zig added: 4090, removed: 2946, total 1144
@@ -424,22 +424,28 @@ pub fn toType(self: Value) Type {
 
pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ip.typeOf(val.toIntern()))) {
const enum_ty = ip.typeOf(val.toIntern());
return switch (ip.indexToKey(enum_ty)) {
// Assume it is already an integer and return it directly.
.simple_type, .int_type => val,
.enum_literal => |enum_literal| {
const field_index = ty.enumFieldIndex(enum_literal, mod).?;
return switch (ip.indexToKey(ty.toIntern())) {
switch (ip.indexToKey(ty.toIntern())) {
// Assume it is already an integer and return it directly.
.simple_type, .int_type => val,
.enum_type => |enum_type| if (enum_type.values.len != 0)
Value.fromInterned(enum_type.values.get(ip)[field_index])
else // Field index and integer values are the same.
mod.intValue(Type.fromInterned(enum_type.tag_ty), field_index),
.simple_type, .int_type => return val,
.enum_type => {
const enum_type = ip.loadEnumType(ty.toIntern());
if (enum_type.values.len != 0) {
return Value.fromInterned(enum_type.values.get(ip)[field_index]);
} else {
// Field index and integer values are the same.
return mod.intValue(Type.fromInterned(enum_type.tag_ty), field_index);
}
},
else => unreachable,
};
}
},
.enum_type => |enum_type| try mod.getCoerced(val, Type.fromInterned(enum_type.tag_ty)),
.enum_type => try mod.getCoerced(val, Type.fromInterned(ip.loadEnumType(enum_ty).tag_ty)),
else => unreachable,
};
}
@@ -832,7 +838,7 @@ pub fn writeToPackedMemory(
}
},
.Struct => {
const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
const struct_type = ip.loadStructType(ty.toIntern());
// Sema is supposed to have emitted a compile error already in the case of Auto,
// and Extern is handled in non-packed writeToMemory.
assert(struct_type.layout == .Packed);
 
src/arch/wasm/CodeGen.zig added: 4090, removed: 2946, total 1144
@@ -3354,7 +3354,8 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
val.writeToMemory(ty, mod, &buf) catch unreachable;
return func.storeSimdImmd(buf);
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
// non-packed structs are not handled in this function because they
// are by-ref types.
assert(struct_type.layout == .Packed);
@@ -5411,7 +5412,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const layout = union_ty.unionGetLayout(mod);
const union_obj = mod.typeToUnion(union_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const field_name = union_obj.field_names.get(ip)[extra.field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
 
const tag_int = blk: {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
 
src/arch/wasm/abi.zig added: 4090, removed: 2946, total 1144
@@ -76,7 +76,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
}
const layout = ty.unionGetLayout(mod);
assert(layout.tag_size == 0);
if (union_obj.field_names.len > 1) return memory;
if (union_obj.field_types.len > 1) return memory;
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
return classifyType(first_field_ty, mod);
},
 
src/arch/x86_64/CodeGen.zig added: 4090, removed: 2946, total 1144
@@ -18183,7 +18183,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const dst_mcv = try self.allocRegOrMem(inst, false);
 
const union_obj = mod.typeToUnion(union_ty).?;
const field_name = union_obj.field_names.get(ip)[extra.field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const tag_ty = Type.fromInterned(union_obj.enum_tag_ty);
const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
 
src/codegen.zig added: 4090, removed: 2946, total 1144
@@ -510,88 +510,91 @@ pub fn generateSymbol(
}
}
},
.struct_type => |struct_type| switch (struct_type.layout) {
.Packed => {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
return error.Overflow;
const current_pos = code.items.len;
try code.resize(current_pos + abi_size);
var bits: u16 = 0;
.struct_type => {
const struct_type = ip.loadStructType(typed_value.ty.toIntern());
switch (struct_type.layout) {
.Packed => {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
return error.Overflow;
const current_pos = code.items.len;
try code.resize(current_pos + abi_size);
var bits: u16 = 0;
 
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
const field_val = switch (aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[index] },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
};
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
const field_val = switch (aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[index] },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
};
 
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those separately.
if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) {
const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(mod)) orelse
return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit();
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(field_ty),
.val = Value.fromInterned(field_val),
}, &tmp_list, debug_output, reloc_info)) {
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
.fail => |em| return Result{ .fail = em },
}
} else {
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), mod, code.items[current_pos..], bits) catch unreachable;
}
bits += @as(u16, @intCast(Type.fromInterned(field_ty).bitSize(mod)));
}
},
.Auto, .Extern => {
const struct_begin = code.items.len;
const field_types = struct_type.field_types.get(ip);
const offsets = struct_type.offsets.get(ip);
 
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = field_types[field_index];
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
 
const field_val = switch (ip.indexToKey(typed_value.val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
 
const padding = math.cast(
usize,
offsets[field_index] - (code.items.len - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
 
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those separately.
if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) {
const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(mod)) orelse
return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit();
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(field_ty),
.val = Value.fromInterned(field_val),
}, &tmp_list, debug_output, reloc_info)) {
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
} else {
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), mod, code.items[current_pos..], bits) catch unreachable;
}
bits += @as(u16, @intCast(Type.fromInterned(field_ty).bitSize(mod)));
}
},
.Auto, .Extern => {
const struct_begin = code.items.len;
const field_types = struct_type.field_types.get(ip);
const offsets = struct_type.offsets.get(ip);
 
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = field_types[field_index];
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
 
const field_val = switch (ip.indexToKey(typed_value.val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
const size = struct_type.size(ip).*;
const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?;
 
const padding = math.cast(
usize,
offsets[field_index] - (code.items.len - struct_begin),
std.mem.alignForward(u64, size, @max(alignment, 1)) -
(code.items.len - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
 
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(field_ty),
.val = Value.fromInterned(field_val),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
}
 
const size = struct_type.size(ip).*;
const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?;
 
const padding = math.cast(
usize,
std.mem.alignForward(u64, size, @max(alignment, 1)) -
(code.items.len - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
},
},
}
},
else => unreachable,
},
 
src/codegen/c.zig added: 4090, removed: 2946, total 1144
@@ -1376,112 +1376,24 @@ pub const DeclGen = struct {
}
try writer.writeByte('}');
},
.struct_type => |struct_type| switch (struct_type.layout) {
.Auto, .Extern => {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
 
try writer.writeByte('{');
var empty = true;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (struct_type.fieldIsComptime(ip, field_index)) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
if (!empty) try writer.writeByte(',');
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), initializer_type);
 
empty = false;
}
try writer.writeByte('}');
},
.Packed => {
const int_info = ty.intInfo(mod);
 
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
const bit_offset_ty = try mod.intType(.unsigned, bits);
 
var bit_offset: u64 = 0;
var eff_num_fields: usize = 0;
 
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
eff_num_fields += 1;
}
 
if (eff_num_fields == 0) {
try writer.writeByte('(');
try dg.renderValue(writer, ty, Value.undef, initializer_type);
try writer.writeByte(')');
} else if (ty.bitSize(mod) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) {
try writer.writeAll("zig_or_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
}
 
var eff_index: usize = 0;
var needs_closing_paren = false;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
const cast_context = IntCastContext{ .value = .{ .value = Value.fromInterned(field_val) } };
if (bit_offset != 0) {
try writer.writeAll("zig_shl_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
try writer.writeAll(", ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
} else {
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
}
 
if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
 
bit_offset += field_ty.bitSize(mod);
needs_closing_paren = true;
eff_index += 1;
}
} else {
try writer.writeByte('(');
// a << a_off | b << b_off | c << c_off
var empty = true;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
if (!empty) try writer.writeAll(" | ");
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
switch (struct_type.layout) {
.Auto, .Extern => {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
 
try writer.writeByte('{');
var empty = true;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (struct_type.fieldIsComptime(ip, field_index)) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
if (!empty) try writer.writeByte(',');
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
@@ -1490,22 +1402,113 @@ pub const DeclGen = struct {
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), initializer_type);
 
if (bit_offset != 0) {
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), .Other);
try writer.writeAll(" << ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
} else {
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), .Other);
}
 
bit_offset += field_ty.bitSize(mod);
empty = false;
}
try writer.writeByte(')');
}
},
try writer.writeByte('}');
},
.Packed => {
const int_info = ty.intInfo(mod);
 
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
const bit_offset_ty = try mod.intType(.unsigned, bits);
 
var bit_offset: u64 = 0;
var eff_num_fields: usize = 0;
 
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
eff_num_fields += 1;
}
 
if (eff_num_fields == 0) {
try writer.writeByte('(');
try dg.renderValue(writer, ty, Value.undef, initializer_type);
try writer.writeByte(')');
} else if (ty.bitSize(mod) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) {
try writer.writeAll("zig_or_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
}
 
var eff_index: usize = 0;
var needs_closing_paren = false;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
const cast_context = IntCastContext{ .value = .{ .value = Value.fromInterned(field_val) } };
if (bit_offset != 0) {
try writer.writeAll("zig_shl_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
try writer.writeAll(", ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
} else {
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
}
 
if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
 
bit_offset += field_ty.bitSize(mod);
needs_closing_paren = true;
eff_index += 1;
}
} else {
try writer.writeByte('(');
// a << a_off | b << b_off | c << c_off
var empty = true;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
if (!empty) try writer.writeAll(" | ");
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
 
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
 
if (bit_offset != 0) {
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), .Other);
try writer.writeAll(" << ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
} else {
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), .Other);
}
 
bit_offset += field_ty.bitSize(mod);
empty = false;
}
try writer.writeByte(')');
}
},
}
},
else => unreachable,
},
@@ -1547,7 +1550,7 @@ pub const DeclGen = struct {
 
const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
const field_name = union_obj.field_names.get(ip)[field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
if (union_obj.getLayout(ip) == .Packed) {
if (field_ty.hasRuntimeBits(mod)) {
if (field_ty.isPtrAtRuntime(mod)) {
@@ -5502,7 +5505,7 @@ fn fieldLocation(
.{ .field = .{ .identifier = "payload" } }
else
.begin;
const field_name = union_obj.field_names.get(ip)[field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
return .{ .field = if (container_ty.unionTagTypeSafety(mod)) |_|
.{ .payload_identifier = ip.stringToSlice(field_name) }
else
@@ -5735,8 +5738,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
else
.{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, mod)) },
 
.union_type => |union_type| field_name: {
const union_obj = ip.loadUnionType(union_type);
.union_type => field_name: {
const union_obj = ip.loadUnionType(struct_ty.toIntern());
if (union_obj.flagsPtr(ip).layout == .Packed) {
const operand_lval = if (struct_byval == .constant) blk: {
const operand_local = try f.allocLocal(inst, struct_ty);
@@ -5762,8 +5765,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
 
return local;
} else {
const name = union_obj.field_names.get(ip)[extra.field_index];
break :field_name if (union_type.hasTag(ip)) .{
const name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
break :field_name if (union_obj.hasTag(ip)) .{
.payload_identifier = ip.stringToSlice(name),
} else .{
.identifier = ip.stringToSlice(name),
@@ -7171,7 +7174,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
 
const union_ty = f.typeOfIndex(inst);
const union_obj = mod.typeToUnion(union_ty).?;
const field_name = union_obj.field_names.get(ip)[extra.field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const payload_ty = f.typeOf(extra.init);
const payload = try f.resolveInst(extra.init);
try reap(f, inst, &.{extra.init});
 
src/codegen/c/type.zig added: 4090, removed: 2946, total 1144
@@ -1507,7 +1507,7 @@ pub const CType = extern union {
if (lookup.isMutable()) {
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
.Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i, mod);
@@ -1589,7 +1589,7 @@ pub const CType = extern union {
var is_packed = false;
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
.Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i, mod);
@@ -1940,7 +1940,7 @@ pub const CType = extern union {
const zig_ty_tag = ty.zigTypeTag(mod);
const fields_len = switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
.Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
};
 
@@ -1967,7 +1967,7 @@ pub const CType = extern union {
else
arena.dupeZ(u8, ip.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.legacyStructFieldName(field_i, mod),
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
.Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
else => unreachable,
})),
.type = store.set.typeToIndex(field_ty, mod, switch (kind) {
@@ -2097,7 +2097,7 @@ pub const CType = extern union {
var c_field_i: usize = 0;
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
.Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i_usize| {
const field_i: u32 = @intCast(field_i_usize);
@@ -2120,7 +2120,7 @@ pub const CType = extern union {
else
ip.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.legacyStructFieldName(field_i, mod),
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
.Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
else => unreachable,
}),
mem.span(c_field.name),
@@ -2226,7 +2226,7 @@ pub const CType = extern union {
const zig_ty_tag = ty.zigTypeTag(mod);
for (0..switch (ty.zigTypeTag(mod)) {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
.Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i_usize| {
const field_i: u32 = @intCast(field_i_usize);
@@ -2245,7 +2245,7 @@ pub const CType = extern union {
else
mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.legacyStructFieldName(field_i, mod),
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
.Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
else => unreachable,
}));
autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align");
 
src/codegen/llvm.zig added: 4090, removed: 2946, total 1144
@@ -1997,7 +1997,7 @@ pub const Object = struct {
return debug_enum_type;
}
 
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(ty.toIntern());
 
const enumerators = try gpa.alloc(Builder.Metadata, enum_type.names.len);
defer gpa.free(enumerators);
@@ -2507,8 +2507,8 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_struct_type);
return debug_struct_type;
},
.struct_type => |struct_type| {
if (!struct_type.haveFieldTypes(ip)) {
.struct_type => {
if (!ip.loadStructType(ty.toIntern()).haveFieldTypes(ip)) {
// This can happen if a struct type makes it all the way to
// flush() without ever being instantiated or referenced (even
// via pointer). The only reason we are hearing about it now is
@@ -2597,15 +2597,14 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
 
const union_type = ip.indexToKey(ty.toIntern()).union_type;
const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.haveFieldTypes(ip) or !ty.hasRuntimeBitsIgnoreComptime(mod)) {
const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
try o.debug_type_map.put(gpa, ty, debug_union_type);
return debug_union_type;
}
 
const union_obj = ip.loadUnionType(union_type);
const layout = mod.getUnionLayout(union_obj);
const layout = mod.getUnionLayout(union_type);
 
const debug_fwd_ref = try o.builder.debugForwardReference();
 
@@ -2622,7 +2621,7 @@ pub const Object = struct {
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(
&.{try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty))},
&.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))},
),
);
 
@@ -2636,21 +2635,23 @@ pub const Object = struct {
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{};
defer fields.deinit(gpa);
 
try fields.ensureUnusedCapacity(gpa, union_obj.field_names.len);
try fields.ensureUnusedCapacity(gpa, union_type.loadTagType(ip).names.len);
 
const debug_union_fwd_ref = if (layout.tag_size == 0)
debug_fwd_ref
else
try o.builder.debugForwardReference();
 
for (0..union_obj.field_names.len) |field_index| {
const field_ty = union_obj.field_types.get(ip)[field_index];
const tag_type = union_type.loadTagType(ip);
 
for (0..tag_type.names.len) |field_index| {
const field_ty = union_type.field_types.get(ip)[field_index];
if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
 
const field_size = Type.fromInterned(field_ty).abiSize(mod);
const field_align = mod.unionFieldNormalAlignment(union_obj, @intCast(field_index));
const field_align = mod.unionFieldNormalAlignment(union_type, @intCast(field_index));
 
const field_name = union_obj.field_names.get(ip)[field_index];
const field_name = tag_type.names.get(ip)[field_index];
fields.appendAssumeCapacity(try o.builder.debugMemberType(
try o.builder.metadataString(ip.stringToSlice(field_name)),
.none, // File
@@ -2706,7 +2707,7 @@ pub const Object = struct {
.none, // File
debug_fwd_ref,
0, // Line
try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty)),
try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty)),
layout.tag_size * 8,
layout.tag_align.toByteUnits(0) * 8,
tag_offset * 8,
@@ -3321,9 +3322,11 @@ pub const Object = struct {
return o.builder.structType(.normal, fields[0..fields_len]);
},
.simple_type => unreachable,
.struct_type => |struct_type| {
.struct_type => {
if (o.type_map.get(t.toIntern())) |value| return value;
 
const struct_type = ip.loadStructType(t.toIntern());
 
if (struct_type.layout == .Packed) {
const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntType(ip).*));
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
@@ -3468,10 +3471,10 @@ pub const Object = struct {
}
return o.builder.structType(.normal, llvm_field_types.items);
},
.union_type => |union_type| {
.union_type => {
if (o.type_map.get(t.toIntern())) |value| return value;
 
const union_obj = ip.loadUnionType(union_type);
const union_obj = ip.loadUnionType(t.toIntern());
const layout = mod.getUnionLayout(union_obj);
 
if (union_obj.flagsPtr(ip).layout == .Packed) {
@@ -3545,17 +3548,16 @@ pub const Object = struct {
);
return ty;
},
.opaque_type => |opaque_type| {
.opaque_type => {
const gop = try o.type_map.getOrPut(o.gpa, t.toIntern());
if (!gop.found_existing) {
const name = try o.builder.string(ip.stringToSlice(
try mod.opaqueFullyQualifiedName(opaque_type),
));
const decl = mod.declPtr(ip.loadOpaqueType(t.toIntern()).decl);
const name = try o.builder.string(ip.stringToSlice(try decl.fullyQualifiedName(mod)));
gop.value_ptr.* = try o.builder.opaqueType(name);
}
return gop.value_ptr.*;
},
.enum_type => |enum_type| try o.lowerType(Type.fromInterned(enum_type.tag_ty)),
.enum_type => try o.lowerType(Type.fromInterned(ip.loadEnumType(t.toIntern()).tag_ty)),
.func_type => |func_type| try o.lowerTypeFn(func_type),
.error_set_type, .inferred_error_set_type => try o.errorIntType(),
// values, not types
@@ -4032,7 +4034,8 @@ pub const Object = struct {
else
struct_ty, vals);
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveLayout(ip));
const struct_ty = try o.lowerType(ty);
if (struct_type.layout == .Packed) {
@@ -4596,7 +4599,7 @@ pub const Object = struct {
fn getEnumTagNameFunction(o: *Object, enum_ty: Type) !Builder.Function.Index {
const zcu = o.module;
const ip = &zcu.intern_pool;
const enum_type = ip.indexToKey(enum_ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(enum_ty.toIntern());
 
// TODO: detect when the type changes and re-emit this function.
const gop = try o.decl_map.getOrPut(o.gpa, enum_type.decl);
@@ -9620,7 +9623,7 @@ pub const FuncGen = struct {
fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index {
const o = self.dg.object;
const zcu = o.module;
const enum_type = zcu.intern_pool.indexToKey(enum_ty.toIntern()).enum_type;
const enum_type = zcu.intern_pool.loadEnumType(enum_ty.toIntern());
 
// TODO: detect when the type changes and re-emit this function.
const gop = try o.named_enum_map.getOrPut(o.gpa, enum_type.decl);
@@ -10092,7 +10095,7 @@ pub const FuncGen = struct {
 
const tag_int = blk: {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
const union_field_name = union_obj.field_names.get(ip)[extra.field_index];
const union_field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
@@ -11154,7 +11157,8 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
if (first_non_integer == null or classes[first_non_integer.?] == .none) {
assert(first_non_integer orelse classes.len == types_index);
switch (ip.indexToKey(return_type.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(return_type.toIntern());
assert(struct_type.haveLayout(ip));
const size: u64 = struct_type.size(ip).*;
assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
@@ -11446,7 +11450,8 @@ const ParamTypeIterator = struct {
return .byref;
}
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveLayout(ip));
const size: u64 = struct_type.size(ip).*;
assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
@@ -11562,7 +11567,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
}
return false;
},
.struct_type => |s| s,
.struct_type => ip.loadStructType(ty.toIntern()),
else => unreachable,
};
 
 
src/codegen/spirv.zig added: 4090, removed: 2946, total 1144
@@ -1528,7 +1528,7 @@ const DeclGen = struct {
try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
return ty_ref;
},
.struct_type => |struct_type| struct_type,
.struct_type => ip.loadStructType(ty.toIntern()),
else => unreachable,
};
 
@@ -3633,7 +3633,8 @@ const DeclGen = struct {
index += 1;
}
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(result_ty.toIntern());
var it = struct_type.iterateRuntimeOrder(ip);
for (elements, 0..) |element, i| {
const field_index = it.next().?;
@@ -3901,36 +3902,33 @@ const DeclGen = struct {
const mod = self.module;
const ip = &mod.intern_pool;
const union_ty = mod.typeToUnion(ty).?;
const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
 
if (union_ty.getLayout(ip) == .Packed) {
unreachable; // TODO
}
 
const maybe_tag_ty = ty.unionTagTypeSafety(mod);
const layout = self.unionLayout(ty);
 
const tag_int = if (layout.tag_size != 0) blk: {
const tag_ty = maybe_tag_ty.?;
const union_field_name = union_ty.field_names.get(ip)[active_field];
const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
const tag_val = try mod.enumValueFieldIndex(tag_ty, active_field);
const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
break :blk tag_int_val.toUnsignedInt(mod);
} else 0;
 
if (!layout.has_payload) {
const tag_ty_ref = try self.resolveType(maybe_tag_ty.?, .direct);
const tag_ty_ref = try self.resolveType(tag_ty, .direct);
return try self.constInt(tag_ty_ref, tag_int);
}
 
const tmp_id = try self.alloc(ty, .{ .storage_class = .Function });
 
if (layout.tag_size != 0) {
const tag_ty_ref = try self.resolveType(maybe_tag_ty.?, .direct);
const tag_ptr_ty_ref = try self.ptrType(maybe_tag_ty.?, .Function);
const tag_ty_ref = try self.resolveType(tag_ty, .direct);
const tag_ptr_ty_ref = try self.ptrType(tag_ty, .Function);
const ptr_id = try self.accessChain(tag_ptr_ty_ref, tmp_id, &.{@as(u32, @intCast(layout.tag_index))});
const tag_id = try self.constInt(tag_ty_ref, tag_int);
try self.store(maybe_tag_ty.?, ptr_id, tag_id, .{});
try self.store(tag_ty, ptr_id, tag_id, .{});
}
 
const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
 
src/glibc.zig added: 4090, removed: 2946, total 1144
@@ -1118,6 +1118,7 @@ fn buildSharedLib(
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
 
const c_source_files = [1]Compilation.CSourceFile{
 
src/libcxx.zig added: 4090, removed: 2946, total 1144
@@ -181,6 +181,7 @@ pub fn buildLibCXX(comp: *Compilation, prog_node: *std.Progress.Node) !void {
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
 
var c_source_files = try std.ArrayList(Compilation.CSourceFile).initCapacity(arena, libcxx_files.len);
@@ -395,6 +396,7 @@ pub fn buildLibCXXABI(comp: *Compilation, prog_node: *std.Progress.Node) !void {
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
 
var c_source_files = try std.ArrayList(Compilation.CSourceFile).initCapacity(arena, libcxxabi_files.len);
 
src/libtsan.zig added: 4090, removed: 2946, total 1144
@@ -92,6 +92,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!v
.cc_argv = &common_flags,
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
}) catch |err| {
comp.setMiscFailure(
.libtsan,
 
src/libunwind.zig added: 4090, removed: 2946, total 1144
@@ -58,6 +58,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: *std.Progress.Node) !void {
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
 
const root_name = "unwind";
 
src/link/Dwarf.zig added: 4090, removed: 2946, total 1144
@@ -311,7 +311,8 @@ pub const DeclState = struct {
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
// DW.AT.name, DW.FORM.string
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
@@ -374,7 +375,7 @@ pub const DeclState = struct {
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
 
const enum_type = ip.indexToKey(ty.ip_index).enum_type;
const enum_type = ip.loadEnumType(ty.ip_index);
for (enum_type.names.get(ip), 0..) |field_name_index, field_i| {
const field_name = ip.stringToSlice(field_name_index);
// DW.AT.enumerator
@@ -442,7 +443,7 @@ pub const DeclState = struct {
try dbg_info_buffer.append(0);
}
 
for (union_obj.field_types.get(ip), union_obj.field_names.get(ip)) |field_ty, field_name| {
for (union_obj.field_types.get(ip), union_obj.loadTagType(ip).names.get(ip)) |field_ty, field_name| {
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member));
 
src/main.zig added: 4090, removed: 2946, total 1144
@@ -2708,7 +2708,9 @@ fn buildOutputType(
create_module.opts.emit_bin = emit_bin != .no;
create_module.opts.any_c_source_files = create_module.c_source_files.items.len != 0;
 
const main_mod = try createModule(gpa, arena, &create_module, 0, null, zig_lib_directory);
var builtin_modules: std.StringHashMapUnmanaged(*Package.Module) = .{};
// `builtin_modules` allocated into `arena`, so no deinit
const main_mod = try createModule(gpa, arena, &create_module, 0, null, zig_lib_directory, &builtin_modules);
for (create_module.modules.keys(), create_module.modules.values()) |key, cli_mod| {
if (cli_mod.resolved == null)
fatal("module '{s}' declared but not used", .{key});
@@ -2753,6 +2755,7 @@ fn buildOutputType(
.global = create_module.resolved_options,
.parent = main_mod,
.builtin_mod = main_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is specified
});
test_mod.deps = try main_mod.deps.clone(arena);
break :test_mod test_mod;
@@ -2771,6 +2774,7 @@ fn buildOutputType(
.global = create_module.resolved_options,
.parent = main_mod,
.builtin_mod = main_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is specified
});
 
break :root_mod test_mod;
@@ -3479,6 +3483,7 @@ fn createModule(
index: usize,
parent: ?*Package.Module,
zig_lib_directory: Cache.Directory,
builtin_modules: *std.StringHashMapUnmanaged(*Package.Module),
) Allocator.Error!*Package.Module {
const cli_mod = &create_module.modules.values()[index];
if (cli_mod.resolved) |m| return m;
@@ -3931,6 +3936,7 @@ fn createModule(
.global = create_module.resolved_options,
.parent = parent,
.builtin_mod = null,
.builtin_modules = builtin_modules,
}) catch |err| switch (err) {
error.ValgrindUnsupportedOnTarget => fatal("unable to create module '{s}': valgrind does not support the selected target CPU architecture", .{name}),
error.TargetRequiresSingleThreaded => fatal("unable to create module '{s}': the selected target does not support multithreading", .{name}),
@@ -3953,7 +3959,7 @@ fn createModule(
for (cli_mod.deps) |dep| {
const dep_index = create_module.modules.getIndex(dep.value) orelse
fatal("module '{s}' depends on non-existent module '{s}'", .{ name, dep.key });
const dep_mod = try createModule(gpa, arena, create_module, dep_index, mod, zig_lib_directory);
const dep_mod = try createModule(gpa, arena, create_module, dep_index, mod, zig_lib_directory, builtin_modules);
try mod.deps.put(arena, dep.key, dep_mod);
}
 
@@ -5249,6 +5255,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.global = config,
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // all modules will inherit this one's builtin
});
 
const builtin_mod = root_mod.getBuiltinDependency();
@@ -5265,6 +5272,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.global = config,
.parent = root_mod,
.builtin_mod = builtin_mod,
.builtin_modules = null, // `builtin_mod` is specified
});
 
var cleanup_build_dir: ?fs.Dir = null;
@@ -5399,6 +5407,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.global = config,
.parent = root_mod,
.builtin_mod = builtin_mod,
.builtin_modules = null, // `builtin_mod` is specified
});
const hash_cloned = try arena.dupe(u8, &hash);
deps_mod.deps.putAssumeCapacityNoClobber(hash_cloned, m);
@@ -5648,6 +5657,7 @@ fn jitCmd(
.global = config,
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // all modules will inherit this one's builtin
});
 
if (options.depend_on_aro) {
@@ -5670,6 +5680,7 @@ fn jitCmd(
.global = config,
.parent = null,
.builtin_mod = root_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is specified
});
try root_mod.deps.put(arena, "aro", aro_mod);
}
@@ -7216,10 +7227,11 @@ fn createDependenciesModule(
},
.fully_qualified_name = "root.@dependencies",
.parent = main_mod,
.builtin_mod = builtin_mod,
.cc_argv = &.{},
.inherited = .{},
.global = global_options,
.builtin_mod = builtin_mod,
.builtin_modules = null, // `builtin_mod` is specified
});
try main_mod.deps.put(arena, "@dependencies", deps_mod);
return deps_mod;
 
src/musl.zig added: 4090, removed: 2946, total 1144
@@ -250,6 +250,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progr
.cc_argv = cc_argv,
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
 
const sub_compilation = try Compilation.create(comp.gpa, arena, .{
 
src/print_zir.zig added: 4090, removed: 2946, total 1144
@@ -282,7 +282,6 @@ const Writer = struct {
 
.ref,
.ret_implicit,
.closure_capture,
.validate_ref_ty,
=> try self.writeUnTok(stream, inst),
 
@@ -510,8 +509,6 @@ const Writer = struct {
 
.dbg_stmt => try self.writeDbgStmt(stream, inst),
 
.closure_get => try self.writeInstNode(stream, inst),
 
.@"defer" => try self.writeDefer(stream, inst),
.defer_err_code => try self.writeDeferErrCode(stream, inst),
 
@@ -611,6 +608,7 @@ const Writer = struct {
.ptr_cast_no_dest => try self.writePtrCastNoDest(stream, extended),
 
.restore_err_ret_index => try self.writeRestoreErrRetIndex(stream, extended),
.closure_get => try self.writeClosureGet(stream, extended),
}
}
 
@@ -1401,6 +1399,12 @@ const Writer = struct {
 
var extra_index: usize = extra.end;
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = self.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
const fields_len = if (small.has_fields_len) blk: {
const fields_len = self.code.extra[extra_index];
extra_index += 1;
@@ -1419,12 +1423,26 @@ const Writer = struct {
 
try stream.print("{s}, ", .{@tagName(small.name_strategy)});
 
if (small.layout == .Packed and small.has_backing_int) {
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
 
if (small.has_backing_int) {
const backing_int_body_len = self.code.extra[extra_index];
extra_index += 1;
try stream.writeAll("Packed(");
if (backing_int_body_len == 0) {
const backing_int_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
const backing_int_ref: Zir.Inst.Ref = @enumFromInt(self.code.extra[extra_index]);
extra_index += 1;
try self.writeInstRef(stream, backing_int_ref);
} else {
@@ -1601,6 +1619,12 @@ const Writer = struct {
break :blk tag_type_ref;
} else .none;
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = self.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
const body_len = if (small.has_body_len) blk: {
const body_len = self.code.extra[extra_index];
extra_index += 1;
@@ -1624,6 +1648,20 @@ const Writer = struct {
});
try self.writeFlag(stream, "autoenum, ", small.auto_enum_tag);
 
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
 
if (decls_len == 0) {
try stream.writeAll("{}");
} else {
@@ -1748,6 +1786,12 @@ const Writer = struct {
break :blk tag_type_ref;
} else .none;
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = self.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
const body_len = if (small.has_body_len) blk: {
const body_len = self.code.extra[extra_index];
extra_index += 1;
@@ -1769,6 +1813,20 @@ const Writer = struct {
try stream.print("{s}, ", .{@tagName(small.name_strategy)});
try self.writeFlag(stream, "nonexhaustive, ", small.nonexhaustive);
 
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
 
if (decls_len == 0) {
try stream.writeAll("{}, ");
} else {
@@ -1854,6 +1912,12 @@ const Writer = struct {
const extra = self.code.extraData(Zir.Inst.OpaqueDecl, extended.operand);
var extra_index: usize = extra.end;
 
const captures_len = if (small.has_captures_len) blk: {
const captures_len = self.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
 
const decls_len = if (small.has_decls_len) blk: {
const decls_len = self.code.extra[extra_index];
extra_index += 1;
@@ -1862,6 +1926,20 @@ const Writer = struct {
 
try stream.print("{s}, ", .{@tagName(small.name_strategy)});
 
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
 
if (decls_len == 0) {
try stream.writeAll("{})");
} else {
@@ -2706,6 +2784,12 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
 
fn writeClosureGet(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
const src = LazySrcLoc.nodeOffset(@bitCast(extended.operand));
try stream.print("{d})) ", .{extended.small});
try self.writeSrc(stream, src);
}
 
fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void {
if (ref == .none) {
return stream.writeAll(".none");
@@ -2722,6 +2806,19 @@ const Writer = struct {
return stream.print("%{d}", .{@intFromEnum(inst)});
}
 
fn writeCapture(self: *Writer, stream: anytype, capture: Zir.Inst.Capture) !void {
switch (capture.unwrap()) {
.nested => |i| return stream.print("[{d}]", .{i}),
.instruction => |inst| return self.writeInstIndex(stream, inst),
.decl_val => |str| try stream.print("decl_val \"{}\"", .{
std.zig.fmtEscapes(self.code.nullTerminatedString(str)),
}),
.decl_ref => |str| try stream.print("decl_ref \"{}\"", .{
std.zig.fmtEscapes(self.code.nullTerminatedString(str)),
}),
}
}
 
fn writeOptionalInstRef(
self: *Writer,
stream: anytype,
 
src/type.zig added: 4090, removed: 2946, total 1144
@@ -320,11 +320,12 @@ pub const Type = struct {
 
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.decl.unwrap()) |decl_index| {
const decl = mod.declPtr(decl_index);
try decl.renderFullyQualifiedName(mod, writer);
} else if (struct_type.namespace.unwrap()) |namespace_index| {
} else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| {
const namespace = mod.namespacePtr(namespace_index);
try namespace.renderFullyQualifiedName(mod, .empty, writer);
} else {
@@ -354,16 +355,16 @@ pub const Type = struct {
try writer.writeAll("}");
},
 
.union_type => |union_type| {
const decl = mod.declPtr(union_type.decl);
.union_type => {
const decl = mod.declPtr(ip.loadUnionType(ty.toIntern()).decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.opaque_type => |opaque_type| {
const decl = mod.declPtr(opaque_type.decl);
.opaque_type => {
const decl = mod.declPtr(ip.loadOpaqueType(ty.toIntern()).decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.enum_type => |enum_type| {
const decl = mod.declPtr(enum_type.decl);
.enum_type => {
const decl = mod.declPtr(ip.loadEnumType(ty.toIntern()).decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.func_type => |fn_info| {
@@ -573,7 +574,8 @@ pub const Type = struct {
 
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) {
// In this case, we guess that hasRuntimeBits() for this type is true,
// and then later if our guess was incorrect, we emit a compile error.
@@ -601,7 +603,8 @@ pub const Type = struct {
return false;
},
 
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
switch (union_type.flagsPtr(ip).runtime_tag) {
.none => {
if (union_type.flagsPtr(ip).status == .field_types_wip) {
@@ -628,9 +631,8 @@ pub const Type = struct {
.lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes())
return error.NeedLazy,
}
const union_obj = ip.loadUnionType(union_type);
for (0..union_obj.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
for (0..union_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
return true;
} else {
@@ -639,7 +641,7 @@ pub const Type = struct {
},
 
.opaque_type => true,
.enum_type => |enum_type| Type.fromInterned(enum_type.tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
.enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
 
// values, not types
.undef,
@@ -736,15 +738,19 @@ pub const Type = struct {
.generic_poison,
=> false,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
// Struct with no fields have a well-defined layout of no bits.
return struct_type.layout != .Auto or struct_type.field_types.len == 0;
},
.union_type => |union_type| switch (union_type.flagsPtr(ip).runtime_tag) {
.none, .safety => union_type.flagsPtr(ip).layout != .Auto,
.tagged => false,
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
return switch (union_type.flagsPtr(ip).runtime_tag) {
.none, .safety => union_type.flagsPtr(ip).layout != .Auto,
.tagged => false,
};
},
.enum_type => |enum_type| switch (enum_type.tag_mode) {
.enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) {
.auto => false,
.explicit, .nonexhaustive => true,
},
@@ -1019,7 +1025,8 @@ pub const Type = struct {
.noreturn => unreachable,
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .Packed) {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
@@ -1066,7 +1073,8 @@ pub const Type = struct {
}
return .{ .scalar = big_align };
},
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
const flags = union_type.flagsPtr(ip).*;
if (flags.alignment != .none) return .{ .scalar = flags.alignment };
 
@@ -1082,8 +1090,8 @@ pub const Type = struct {
return .{ .scalar = union_type.flagsPtr(ip).alignment };
},
.opaque_type => return .{ .scalar = .@"1" },
.enum_type => |enum_type| return .{
.scalar = Type.fromInterned(enum_type.tag_ty).abiAlignment(mod),
.enum_type => return .{
.scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod),
},
 
// values, not types
@@ -1394,7 +1402,8 @@ pub const Type = struct {
.noreturn => unreachable,
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => switch (struct_type.layout) {
@@ -1439,7 +1448,8 @@ pub const Type = struct {
return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
},
 
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{
@@ -1455,7 +1465,7 @@ pub const Type = struct {
return .{ .scalar = union_type.size(ip).* };
},
.opaque_type => unreachable, // no size available
.enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = Type.fromInterned(enum_type.tag_ty).abiSize(mod) },
.enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) },
 
// values, not types
.undef,
@@ -1644,7 +1654,8 @@ pub const Type = struct {
.extern_options => unreachable,
.type_info => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
const is_packed = struct_type.layout == .Packed;
if (opt_sema) |sema| {
try sema.resolveTypeFields(ty);
@@ -1661,7 +1672,8 @@ pub const Type = struct {
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
},
 
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
const is_packed = ty.containerLayout(mod) == .Packed;
if (opt_sema) |sema| {
try sema.resolveTypeFields(ty);
@@ -1670,19 +1682,18 @@ pub const Type = struct {
if (!is_packed) {
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
}
const union_obj = ip.loadUnionType(union_type);
assert(union_obj.flagsPtr(ip).status.haveFieldTypes());
assert(union_type.flagsPtr(ip).status.haveFieldTypes());
 
var size: u64 = 0;
for (0..union_obj.field_types.len) |field_index| {
const field_ty = union_obj.field_types.get(ip)[field_index];
for (0..union_type.field_types.len) |field_index| {
const field_ty = union_type.field_types.get(ip)[field_index];
size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema));
}
 
return size;
},
.opaque_type => unreachable,
.enum_type => |enum_type| return bitSizeAdvanced(Type.fromInterned(enum_type.tag_ty), mod, opt_sema),
.enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema),
 
// values, not types
.undef,
@@ -1713,8 +1724,8 @@ pub const Type = struct {
pub fn layoutIsResolved(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.haveLayout(ip),
.union_type => |union_type| union_type.haveLayout(ip),
.struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip),
.union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip),
.array_type => |array_type| {
if ((array_type.len + @intFromBool(array_type.sentinel != .none)) == 0) return true;
return Type.fromInterned(array_type.child).layoutIsResolved(mod);
@@ -1914,16 +1925,18 @@ pub const Type = struct {
/// Otherwise, returns `null`.
pub fn unionTagType(ty: Type, mod: *Module) ?Type {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.union_type => |union_type| switch (union_type.flagsPtr(ip).runtime_tag) {
.tagged => {
assert(union_type.flagsPtr(ip).status.haveFieldTypes());
return Type.fromInterned(union_type.enum_tag_ty);
},
else => null,
switch (ip.indexToKey(ty.toIntern())) {
.union_type => {},
else => return null,
}
const union_type = ip.loadUnionType(ty.toIntern());
switch (union_type.flagsPtr(ip).runtime_tag) {
.tagged => {
assert(union_type.flagsPtr(ip).status.haveFieldTypes());
return Type.fromInterned(union_type.enum_tag_ty);
},
else => null,
};
else => return null,
}
}
 
/// Same as `unionTagType` but includes safety tag.
@@ -1931,7 +1944,8 @@ pub const Type = struct {
pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.hasTag(ip)) return null;
assert(union_type.haveFieldTypes(ip));
return Type.fromInterned(union_type.enum_tag_ty);
@@ -1981,17 +1995,16 @@ pub const Type = struct {
 
pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout {
const ip = &mod.intern_pool;
const union_type = ip.indexToKey(ty.toIntern()).union_type;
const union_obj = ip.loadUnionType(union_type);
const union_obj = ip.loadUnionType(ty.toIntern());
return mod.getUnionLayout(union_obj);
}
 
pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.layout,
.struct_type => ip.loadStructType(ty.toIntern()).layout,
.anon_struct_type => .Auto,
.union_type => |union_type| union_type.flagsPtr(ip).layout,
.union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout,
else => unreachable,
};
}
@@ -2095,22 +2108,15 @@ pub const Type = struct {
 
/// Asserts the type is an array or vector or struct.
pub fn arrayLen(ty: Type, mod: *const Module) u64 {
return arrayLenIp(ty, &mod.intern_pool);
return ty.arrayLenIp(&mod.intern_pool);
}
 
pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 {
return switch (ip.indexToKey(ty.toIntern())) {
.vector_type => |vector_type| vector_type.len,
.array_type => |array_type| array_type.len,
.struct_type => |struct_type| struct_type.field_types.len,
.anon_struct_type => |tuple| tuple.types.len,
 
else => unreachable,
};
return ip.aggregateTypeLen(ty.toIntern());
}
 
pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 {
return ty.arrayLen(mod) + @intFromBool(ty.sentinel(mod) != null);
return mod.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern());
}
 
pub fn vectorLen(ty: Type, mod: *const Module) u32 {
@@ -2199,8 +2205,8 @@ pub const Type = struct {
.c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| return int_type,
.struct_type => |t| ty = Type.fromInterned(t.backingIntType(ip).*),
.enum_type => |enum_type| ty = Type.fromInterned(enum_type.tag_ty),
.struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*),
.enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty),
.vector_type => |vector_type| ty = Type.fromInterned(vector_type.child),
 
.error_set_type, .inferred_error_set_type => {
@@ -2463,7 +2469,8 @@ pub const Type = struct {
 
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveFieldTypes(ip));
if (struct_type.knownNonOpv(ip))
return null;
@@ -2505,11 +2512,11 @@ pub const Type = struct {
} })));
},
 
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse
return null;
if (union_obj.field_names.len == 0) {
if (union_obj.field_types.len == 0) {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
}
@@ -2524,45 +2531,48 @@ pub const Type = struct {
return Value.fromInterned(only);
},
.opaque_type => return null,
.enum_type => |enum_type| switch (enum_type.tag_mode) {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
.enum_type => {
const enum_type = ip.loadEnumType(ty.toIntern());
switch (enum_type.tag_mode) {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
 
if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
} });
return Value.fromInterned(only);
}
 
return null;
},
.auto, .explicit => {
if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
 
switch (enum_type.names.len) {
0 => {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
} });
return Value.fromInterned(only);
},
1 => {
if (enum_type.values.len == 0) {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = try mod.intern(.{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = 0 },
} }),
} });
}
 
return null;
},
.auto, .explicit => {
if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
 
switch (enum_type.names.len) {
0 => {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
} else {
return Value.fromInterned(enum_type.values.get(ip)[0]);
}
},
else => return null,
}
},
},
1 => {
if (enum_type.values.len == 0) {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = try mod.intern(.{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = 0 },
} }),
} });
return Value.fromInterned(only);
} else {
return Value.fromInterned(enum_type.values.get(ip)[0]);
}
},
else => return null,
}
},
}
},
 
// values, not types
@@ -2676,7 +2686,8 @@ pub const Type = struct {
.type_info,
=> true,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
// packed structs cannot be comptime-only because they have a well-defined
// memory layout and every field has a well-defined bit pattern.
if (struct_type.layout == .Packed)
@@ -2726,38 +2737,40 @@ pub const Type = struct {
return false;
},
 
.union_type => |union_type| switch (union_type.flagsPtr(ip).requires_comptime) {
.no, .wip => false,
.yes => true,
.unknown => {
// The type is not resolved; assert that we have a Sema.
const sema = opt_sema.?;
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
switch (union_type.flagsPtr(ip).requires_comptime) {
.no, .wip => return false,
.yes => return true,
.unknown => {
// The type is not resolved; assert that we have a Sema.
const sema = opt_sema.?;
 
if (union_type.flagsPtr(ip).status == .field_types_wip)
return false;
if (union_type.flagsPtr(ip).status == .field_types_wip)
return false;
 
union_type.flagsPtr(ip).requires_comptime = .wip;
errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
union_type.flagsPtr(ip).requires_comptime = .wip;
errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
 
try sema.resolveTypeFieldsUnion(ty, union_type);
try sema.resolveTypeFieldsUnion(ty, union_type);
 
const union_obj = ip.loadUnionType(union_type);
for (0..union_obj.field_types.len) |field_idx| {
const field_ty = union_obj.field_types.get(ip)[field_idx];
if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) {
union_obj.flagsPtr(ip).requires_comptime = .yes;
return true;
for (0..union_type.field_types.len) |field_idx| {
const field_ty = union_type.field_types.get(ip)[field_idx];
if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) {
union_type.flagsPtr(ip).requires_comptime = .yes;
return true;
}
}
}
 
union_obj.flagsPtr(ip).requires_comptime = .no;
return false;
},
union_type.flagsPtr(ip).requires_comptime = .no;
return false;
},
}
},
 
.opaque_type => false,
 
.enum_type => |enum_type| return Type.fromInterned(enum_type.tag_ty).comptimeOnlyAdvanced(mod, opt_sema),
.enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema),
 
// values, not types
.undef,
@@ -2830,11 +2843,12 @@ pub const Type = struct {
 
/// Returns null if the type has no namespace.
pub fn getNamespaceIndex(ty: Type, mod: *Module) InternPool.OptionalNamespaceIndex {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
.struct_type => |struct_type| struct_type.namespace,
.union_type => |union_type| union_type.namespace.toOptional(),
.enum_type => |enum_type| enum_type.namespace,
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace,
.struct_type => ip.loadStructType(ty.toIntern()).namespace,
.union_type => ip.loadUnionType(ty.toIntern()).namespace,
.enum_type => ip.loadEnumType(ty.toIntern()).namespace,
 
else => .none,
};
@@ -2920,16 +2934,18 @@ pub const Type = struct {
 
/// Asserts the type is an enum or a union.
pub fn intTagType(ty: Type, mod: *Module) Type {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.union_type => |union_type| Type.fromInterned(union_type.enum_tag_ty).intTagType(mod),
.enum_type => |enum_type| Type.fromInterned(enum_type.tag_ty),
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(mod),
.enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty),
else => unreachable,
};
}
 
pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.enum_type => |enum_type| switch (enum_type.tag_mode) {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) {
.nonexhaustive => true,
.auto, .explicit => false,
},
@@ -2953,21 +2969,21 @@ pub const Type = struct {
}
 
pub fn enumFields(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice {
return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names;
return mod.intern_pool.loadEnumType(ty.toIntern()).names;
}
 
pub fn enumFieldCount(ty: Type, mod: *Module) usize {
return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names.len;
return mod.intern_pool.loadEnumType(ty.toIntern()).names.len;
}
 
pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString {
const ip = &mod.intern_pool;
return ip.indexToKey(ty.toIntern()).enum_type.names.get(ip)[field_index];
return ip.loadEnumType(ty.toIntern()).names.get(ip)[field_index];
}
 
pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 {
const ip = &mod.intern_pool;
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(ty.toIntern());
return enum_type.nameIndex(ip, field_name);
}
 
@@ -2976,7 +2992,7 @@ pub const Type = struct {
/// declaration order, or `null` if `enum_tag` does not match any field.
pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
const ip = &mod.intern_pool;
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(ty.toIntern());
const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) {
.int => enum_tag.toIntern(),
.enum_tag => |info| info.int,
@@ -2990,7 +3006,7 @@ pub const Type = struct {
pub fn structFieldName(ty: Type, field_index: u32, mod: *Module) InternPool.OptionalNullTerminatedString {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.fieldName(ip, field_index),
.struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, field_index),
.anon_struct_type => |anon_struct| anon_struct.fieldName(ip, field_index),
else => unreachable,
};
@@ -3010,7 +3026,7 @@ pub const Type = struct {
pub fn structFieldCount(ty: Type, mod: *Module) u32 {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.field_types.len,
.struct_type => ip.loadStructType(ty.toIntern()).field_types.len,
.anon_struct_type => |anon_struct| anon_struct.types.len,
else => unreachable,
};
@@ -3020,9 +3036,9 @@ pub const Type = struct {
pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| Type.fromInterned(struct_type.field_types.get(ip)[index]),
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
.struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]),
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
return Type.fromInterned(union_obj.field_types.get(ip)[index]);
},
.anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]),
@@ -3033,7 +3049,8 @@ pub const Type = struct {
pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) Alignment {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.layout != .Packed);
const explicit_align = struct_type.fieldAlign(ip, index);
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
@@ -3042,8 +3059,8 @@ pub const Type = struct {
.anon_struct_type => |anon_struct| {
return Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignment(mod);
},
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
return mod.unionFieldNormalAlignment(union_obj, @intCast(index));
},
else => unreachable,
@@ -3053,7 +3070,8 @@ pub const Type = struct {
pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
const val = struct_type.fieldInit(ip, index);
// TODO: avoid using `unreachable` to indicate this.
if (val == .none) return Value.@"unreachable";
@@ -3072,7 +3090,8 @@ pub const Type = struct {
pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.fieldIsComptime(ip, index)) {
assert(struct_type.haveFieldInits(ip));
return Value.fromInterned(struct_type.field_inits.get(ip)[index]);
@@ -3095,7 +3114,7 @@ pub const Type = struct {
pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.fieldIsComptime(ip, index),
.struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index),
.anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none,
else => unreachable,
};
@@ -3110,7 +3129,8 @@ pub const Type = struct {
pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveLayout(ip));
assert(struct_type.layout != .Packed);
return struct_type.offsets.get(ip)[index];
@@ -3137,11 +3157,11 @@ pub const Type = struct {
return offset;
},
 
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.hasTag(ip))
return 0;
const union_obj = ip.loadUnionType(union_type);
const layout = mod.getUnionLayout(union_obj);
const layout = mod.getUnionLayout(union_type);
if (layout.tag_align.compare(.gte, layout.payload_align)) {
// {Tag, Payload}
return layout.payload_align.forward(layout.tag_size);
@@ -3160,17 +3180,8 @@ pub const Type = struct {
}
 
pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
return mod.declPtr(struct_type.decl.unwrap() orelse return null).srcLoc(mod);
},
.union_type => |union_type| {
return mod.declPtr(union_type.decl).srcLoc(mod);
},
.opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type),
.enum_type => |enum_type| mod.declPtr(enum_type.decl).srcLoc(mod),
else => null,
};
const decl = ty.getOwnerDeclOrNull(mod) orelse return null;
return mod.declPtr(decl).srcLoc(mod);
}
 
pub fn getOwnerDecl(ty: Type, mod: *Module) InternPool.DeclIndex {
@@ -3178,11 +3189,12 @@ pub const Type = struct {
}
 
pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?InternPool.DeclIndex {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.decl.unwrap(),
.union_type => |union_type| union_type.decl,
.opaque_type => |opaque_type| opaque_type.decl,
.enum_type => |enum_type| enum_type.decl,
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).decl.unwrap(),
.union_type => ip.loadUnionType(ty.toIntern()).decl,
.opaque_type => ip.loadOpaqueType(ty.toIntern()).decl,
.enum_type => ip.loadEnumType(ty.toIntern()).decl,
else => null,
};
}
@@ -3194,7 +3206,8 @@ pub const Type = struct {
pub fn isTuple(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .Packed) return false;
if (struct_type.decl == .none) return false;
return struct_type.flagsPtr(ip).is_tuple;
@@ -3215,7 +3228,8 @@ pub const Type = struct {
pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .Packed) return false;
if (struct_type.decl == .none) return false;
return struct_type.flagsPtr(ip).is_tuple;
@@ -3262,16 +3276,28 @@ pub const Type = struct {
}
 
pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
inline .struct_type,
.union_type,
.enum_type,
.opaque_type,
=> |info| info.zir_index.unwrap(),
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(),
.union_type => ip.loadUnionType(ty.toIntern()).zir_index,
.enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(),
.opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index,
else => null,
};
}
 
/// Given a namespace type, returns its list of caotured values.
pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).captures,
.union_type => ip.loadUnionType(ty.toIntern()).captures,
.enum_type => ip.loadEnumType(ty.toIntern()).captures,
.opaque_type => ip.loadOpaqueType(ty.toIntern()).captures,
else => unreachable,
};
}
 
pub const @"u1": Type = .{ .ip_index = .u1_type };
pub const @"u8": Type = .{ .ip_index = .u8_type };
pub const @"u16": Type = .{ .ip_index = .u16_type };
 
test/behavior/enum.zig added: 4090, removed: 2946, total 1144
@@ -1242,3 +1242,24 @@ test "Non-exhaustive enum backed by comptime_int" {
e = @as(E, @enumFromInt(378089457309184723749));
try expect(@intFromEnum(e) == 378089457309184723749);
}
 
test "matching captures causes enum equivalence" {
const S = struct {
fn Nonexhaustive(comptime I: type) type {
const UTag = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(I).Int.bits,
} });
return enum(UTag) { _ };
}
};
 
comptime assert(S.Nonexhaustive(u8) == S.Nonexhaustive(i8));
comptime assert(S.Nonexhaustive(u16) == S.Nonexhaustive(i16));
comptime assert(S.Nonexhaustive(u8) != S.Nonexhaustive(u16));
 
const a: S.Nonexhaustive(u8) = @enumFromInt(123);
const b: S.Nonexhaustive(i8) = @enumFromInt(123);
comptime assert(@TypeOf(a) == @TypeOf(b));
try expect(@intFromEnum(a) == @intFromEnum(b));
}
 
test/behavior/generics.zig added: 4090, removed: 2946, total 1144
@@ -371,8 +371,12 @@ test "extern function used as generic parameter" {
const S = struct {
extern fn usedAsGenericParameterFoo() void;
extern fn usedAsGenericParameterBar() void;
inline fn usedAsGenericParameterBaz(comptime _: anytype) type {
return struct {};
inline fn usedAsGenericParameterBaz(comptime token: anytype) type {
return struct {
comptime {
_ = token;
}
};
}
};
try expect(S.usedAsGenericParameterBaz(S.usedAsGenericParameterFoo) !=
 
test/behavior/src.zig added: 4090, removed: 2946, total 1144
@@ -23,8 +23,12 @@ test "@src" {
 
test "@src used as a comptime parameter" {
const S = struct {
fn Foo(comptime _: std.builtin.SourceLocation) type {
return struct {};
fn Foo(comptime src: std.builtin.SourceLocation) type {
return struct {
comptime {
_ = src;
}
};
}
};
const T1 = S.Foo(@src());
 
test/behavior/struct.zig added: 4090, removed: 2946, total 1144
@@ -2127,3 +2127,26 @@ test "struct containing optional pointer to array of @This()" {
_ = &s;
try expect(s.x.?[0].x == null);
}
 
test "matching captures causes struct equivalence" {
const S = struct {
fn UnsignedWrapper(comptime I: type) type {
const bits = @typeInfo(I).Int.bits;
return struct {
x: @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = bits,
} }),
};
}
};
 
comptime assert(S.UnsignedWrapper(u8) == S.UnsignedWrapper(i8));
comptime assert(S.UnsignedWrapper(u16) == S.UnsignedWrapper(i16));
comptime assert(S.UnsignedWrapper(u8) != S.UnsignedWrapper(u16));
 
const a: S.UnsignedWrapper(u8) = .{ .x = 10 };
const b: S.UnsignedWrapper(i8) = .{ .x = 10 };
comptime assert(@TypeOf(a) == @TypeOf(b));
try expect(a.x == b.x);
}
 
test/behavior/type.zig added: 4090, removed: 2946, total 1144
@@ -2,6 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
const Type = std.builtin.Type;
const testing = std.testing;
const assert = std.debug.assert;
 
fn testTypes(comptime types: []const type) !void {
inline for (types) |testType| {
@@ -734,3 +735,28 @@ test "struct field names sliced at comptime from larger string" {
try testing.expectEqualStrings("f3", gen_fields[2].name);
}
}
 
test "matching captures causes opaque equivalence" {
const S = struct {
fn UnsignedId(comptime I: type) type {
const U = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(I).Int.bits,
} });
return opaque {
fn id(x: U) U {
return x;
}
};
}
};
 
comptime assert(S.UnsignedId(u8) == S.UnsignedId(i8));
comptime assert(S.UnsignedId(u16) == S.UnsignedId(i16));
comptime assert(S.UnsignedId(u8) != S.UnsignedId(u16));
 
const a = S.UnsignedId(u8).id(123);
const b = S.UnsignedId(i8).id(123);
comptime assert(@TypeOf(a) == @TypeOf(b));
try testing.expect(a == b);
}
 
test/behavior/typename.zig added: 4090, removed: 2946, total 1144
@@ -164,21 +164,30 @@ test "fn param" {
}
 
fn TypeFromFn(comptime T: type) type {
_ = T;
return struct {};
return struct {
comptime {
_ = T;
}
};
}
 
fn TypeFromFn2(comptime T1: type, comptime T2: type) type {
_ = T1;
_ = T2;
return struct {};
return struct {
comptime {
_ = T1;
_ = T2;
}
};
}
 
fn TypeFromFnB(comptime T1: type, comptime T2: type, comptime T3: type) type {
_ = T1;
_ = T2;
_ = T3;
return struct {};
return struct {
comptime {
_ = T1;
_ = T2;
_ = T3;
}
};
}
 
/// Replaces integers in `actual` with '0' before doing the test.
 
test/behavior/union.zig added: 4090, removed: 2946, total 1144
@@ -2273,3 +2273,30 @@ test "create union(enum) from other union(enum)" {
else => {},
}
}
 
test "matching captures causes union equivalence" {
const S = struct {
fn SignedUnsigned(comptime I: type) type {
const bits = @typeInfo(I).Int.bits;
return union {
u: @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = bits,
} }),
i: @Type(.{ .Int = .{
.signedness = .signed,
.bits = bits,
} }),
};
}
};
 
comptime assert(S.SignedUnsigned(u8) == S.SignedUnsigned(i8));
comptime assert(S.SignedUnsigned(u16) == S.SignedUnsigned(i16));
comptime assert(S.SignedUnsigned(u8) != S.SignedUnsigned(u16));
 
const a: S.SignedUnsigned(u8) = .{ .u = 10 };
const b: S.SignedUnsigned(i8) = .{ .u = 10 };
comptime assert(@TypeOf(a) == @TypeOf(b));
try expect(a.u == b.u);
}
 
test/cases/compile_errors/reify_struct.zig added: 4090, removed: 2946, total 1144
@@ -74,7 +74,7 @@ comptime {
// target=native
//
// :2:5: error: tuple cannot have non-numeric field 'foo'
// :16:5: error: tuple field 3 exceeds tuple field count
// :16:5: error: tuple field name '3' does not match field index 0
// :30:5: error: comptime field without default initialization value
// :44:5: error: extern struct fields cannot be marked comptime
// :58:5: error: alignment in a packed struct field must be set to 0
 
test/cases/compile_errors/reify_type_for_tagged_union_with_extra_enum_field.zig added: 4090, removed: 2946, total 1144
@@ -30,6 +30,6 @@ export fn entry() void {
// backend=stage2
// target=native
//
// :13:16: error: enum field(s) missing in union
// :13:16: error: enum fields missing in union
// :1:13: note: field 'arst' missing, declared here
// :1:13: note: enum declared here
 
test/cases/compile_errors/reify_type_for_tagged_union_with_no_union_fields.zig added: 4090, removed: 2946, total 1144
@@ -26,7 +26,7 @@ export fn entry() void {
// backend=stage2
// target=native
//
// :12:16: error: enum field(s) missing in union
// :12:16: error: enum fields missing in union
// :1:13: note: field 'signed' missing, declared here
// :1:13: note: field 'unsigned' missing, declared here
// :1:13: note: enum declared here