srctree

Andrew Kelley parent 341857e5 51325495 5140f272
Merge pull request #19437 from mlugg/value-cleanups

Follow-up to #19414

inlinesplit
CMakeLists.txt added: 2056, removed: 2746, total 0
@@ -526,7 +526,6 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/Package/Fetch.zig"
"${CMAKE_SOURCE_DIR}/src/RangeSet.zig"
"${CMAKE_SOURCE_DIR}/src/Sema.zig"
"${CMAKE_SOURCE_DIR}/src/TypedValue.zig"
"${CMAKE_SOURCE_DIR}/src/Value.zig"
"${CMAKE_SOURCE_DIR}/src/arch/aarch64/CodeGen.zig"
"${CMAKE_SOURCE_DIR}/src/arch/aarch64/Emit.zig"
@@ -634,9 +633,11 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/main.zig"
"${CMAKE_SOURCE_DIR}/src/mingw.zig"
"${CMAKE_SOURCE_DIR}/src/musl.zig"
"${CMAKE_SOURCE_DIR}/src/mutable_value.zig"
"${CMAKE_SOURCE_DIR}/src/print_air.zig"
"${CMAKE_SOURCE_DIR}/src/print_env.zig"
"${CMAKE_SOURCE_DIR}/src/print_targets.zig"
"${CMAKE_SOURCE_DIR}/src/print_value.zig"
"${CMAKE_SOURCE_DIR}/src/print_zir.zig"
"${CMAKE_SOURCE_DIR}/src/register_manager.zig"
"${CMAKE_SOURCE_DIR}/src/target.zig"
 
src/Compilation.zig added: 2056, removed: 2746, total 0
@@ -102,7 +102,6 @@ link_errors: std.ArrayListUnmanaged(link.File.ErrorMsg) = .{},
lld_errors: std.ArrayListUnmanaged(LldError) = .{},
 
work_queue: std.fifo.LinearFifo(Job, .Dynamic),
anon_work_queue: std.fifo.LinearFifo(Job, .Dynamic),
 
/// These jobs are to invoke the Clang compiler to create an object file, which
/// gets linked with the Compilation.
@@ -1417,7 +1416,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.emit_llvm_ir = options.emit_llvm_ir,
.emit_llvm_bc = options.emit_llvm_bc,
.work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa),
.anon_work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa),
.c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa),
.win32_resource_work_queue = if (build_options.only_core_functionality) {} else std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa),
.astgen_work_queue = std.fifo.LinearFifo(*Module.File, .Dynamic).init(gpa),
@@ -1840,7 +1838,6 @@ pub fn destroy(comp: *Compilation) void {
if (comp.module) |zcu| zcu.deinit();
comp.cache_use.deinit();
comp.work_queue.deinit();
comp.anon_work_queue.deinit();
comp.c_object_work_queue.deinit();
if (!build_options.only_core_functionality) {
comp.win32_resource_work_queue.deinit();
@@ -3354,18 +3351,11 @@ pub fn performAllTheWork(
mod.sema_prog_node = undefined;
};
 
// In this main loop we give priority to non-anonymous Decls in the work queue, so
// that they can establish references to anonymous Decls, setting alive=true in the
// backend, preventing anonymous Decls from being prematurely destroyed.
while (true) {
if (comp.work_queue.readItem()) |work_item| {
try processOneJob(comp, work_item, main_progress_node);
continue;
}
if (comp.anon_work_queue.readItem()) |work_item| {
try processOneJob(comp, work_item, main_progress_node);
continue;
}
if (comp.module) |zcu| {
// If there's no work queued, check if there's anything outdated
// which we need to work on, and queue it if so.
@@ -3413,14 +3403,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
 
assert(decl.has_tv);
 
if (decl.alive) {
try module.linkerUpdateDecl(decl_index);
return;
}
 
// Instead of sending this decl to the linker, we actually will delete it
// because we found out that it in fact was never referenced.
module.deleteUnusedDecl(decl_index);
try module.linkerUpdateDecl(decl_index);
return;
},
}
 
src/InternPool.zig added: 2056, removed: 2746, total 0
@@ -6581,7 +6581,6 @@ pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey)
generic_owner,
func_index,
func_extra_index,
func_ty,
arg.alignment,
arg.section,
);
@@ -6711,7 +6710,6 @@ pub fn getFuncInstanceIes(
generic_owner,
func_index,
func_extra_index,
func_ty,
arg.alignment,
arg.section,
);
@@ -6723,7 +6721,6 @@ fn finishFuncInstance(
generic_owner: Index,
func_index: Index,
func_extra_index: u32,
func_ty: Index,
alignment: Alignment,
section: OptionalNullTerminatedString,
) Allocator.Error!Index {
@@ -6735,7 +6732,6 @@ fn finishFuncInstance(
.src_line = fn_owner_decl.src_line,
.has_tv = true,
.owns_tv = true,
.ty = @import("type.zig").Type.fromInterned(func_ty),
.val = @import("Value.zig").fromInterned(func_index),
.alignment = alignment,
.@"linksection" = section,
@@ -6744,7 +6740,6 @@ fn finishFuncInstance(
.zir_decl_index = fn_owner_decl.zir_decl_index,
.is_pub = fn_owner_decl.is_pub,
.is_exported = fn_owner_decl.is_exported,
.alive = true,
.kind = .anon,
});
errdefer ip.destroyDecl(gpa, decl_index);
 
src/Module.zig added: 2056, removed: 2746, total 0
@@ -22,7 +22,6 @@ const Compilation = @import("Compilation.zig");
const Cache = std.Build.Cache;
const Value = @import("Value.zig");
const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
const Package = @import("Package.zig");
const link = @import("link.zig");
const Air = @import("Air.zig");
@@ -330,9 +329,6 @@ const ValueArena = struct {
 
pub const Decl = struct {
name: InternPool.NullTerminatedString,
/// The most recent Type of the Decl after a successful semantic analysis.
/// Populated when `has_tv`.
ty: Type,
/// The most recent Value of the Decl after a successful semantic analysis.
/// Populated when `has_tv`.
val: Value,
@@ -397,15 +393,6 @@ pub const Decl = struct {
is_pub: bool,
/// Whether the corresponding AST decl has a `export` keyword.
is_exported: bool,
/// Flag used by garbage collection to mark and sweep.
/// Decls which correspond to an AST node always have this field set to `true`.
/// Anonymous Decls are initialized with this field set to `false` and then it
/// is the responsibility of machine code backends to mark it `true` whenever
/// a `decl_ref` Value is encountered that points to this Decl.
/// When the `codegen_decl` job is encountered in the main work queue, if the
/// Decl is marked alive, then it sends the Decl to the linker. Otherwise it
/// deletes the Decl on the spot.
alive: bool,
/// If true `name` is already fully qualified.
name_fully_qualified: bool = false,
/// What kind of a declaration is this.
@@ -438,14 +425,6 @@ pub const Decl = struct {
return @as(i32, @bitCast(node_index)) - @as(i32, @bitCast(decl.src_node));
}
 
pub fn tokSrcLoc(decl: Decl, token_index: Ast.TokenIndex) LazySrcLoc {
return .{ .token_offset = token_index - decl.srcToken() };
}
 
pub fn nodeSrcLoc(decl: Decl, node_index: Ast.Node.Index) LazySrcLoc {
return LazySrcLoc.nodeOffset(decl.nodeIndexToRelative(node_index));
}
 
pub fn srcLoc(decl: Decl, zcu: *Zcu) SrcLoc {
return decl.nodeOffsetSrcLoc(0, zcu);
}
@@ -458,16 +437,6 @@ pub const Decl = struct {
};
}
 
pub fn srcToken(decl: Decl, zcu: *Zcu) Ast.TokenIndex {
const tree = &decl.getFileScope(zcu).tree;
return tree.firstToken(decl.src_node);
}
 
pub fn srcByteOffset(decl: Decl, zcu: *Zcu) u32 {
const tree = &decl.getFileScope(zcu).tree;
return tree.tokens.items(.start)[decl.srcToken()];
}
 
pub fn renderFullyQualifiedName(decl: Decl, zcu: *Zcu, writer: anytype) !void {
if (decl.name_fully_qualified) {
try writer.print("{}", .{decl.name.fmt(&zcu.intern_pool)});
@@ -487,37 +456,16 @@ pub const Decl = struct {
zcu.namespacePtr(decl.src_namespace).fullyQualifiedName(zcu, decl.name);
}
 
pub fn typedValue(decl: Decl) error{AnalysisFail}!TypedValue {
if (!decl.has_tv) return error.AnalysisFail;
return TypedValue{ .ty = decl.ty, .val = decl.val };
}
 
pub fn internValue(decl: *Decl, zcu: *Zcu) Allocator.Error!InternPool.Index {
pub fn typeOf(decl: Decl, zcu: *const Zcu) Type {
assert(decl.has_tv);
const ip_index = try decl.val.intern(decl.ty, zcu);
decl.val = Value.fromInterned(ip_index);
return ip_index;
return decl.val.typeOf(zcu);
}
 
pub fn isFunction(decl: Decl, zcu: *const Zcu) !bool {
const tv = try decl.typedValue();
return tv.ty.zigTypeTag(zcu) == .Fn;
}
 
/// If the Decl owns its value and it is a struct, return it,
/// otherwise null.
pub fn getOwnedStruct(decl: Decl, zcu: *Zcu) ?InternPool.Key.StructType {
if (!decl.owns_tv) return null;
if (decl.val.ip_index == .none) return null;
return zcu.typeToStruct(decl.val.toType());
}
 
/// If the Decl owns its value and it is a union, return it,
/// otherwise null.
pub fn getOwnedUnion(decl: Decl, zcu: *Zcu) ?InternPool.LoadedUnionType {
if (!decl.owns_tv) return null;
if (decl.val.ip_index == .none) return null;
return zcu.typeToUnion(decl.val.toType());
/// Small wrapper for Sema to use over direct access to the `val` field.
/// If the value is not populated, instead returns `error.AnalysisFail`.
pub fn valueOrFail(decl: Decl) error{AnalysisFail}!Value {
if (!decl.has_tv) return error.AnalysisFail;
return decl.val;
}
 
pub fn getOwnedFunction(decl: Decl, zcu: *Zcu) ?InternPool.Key.Func {
@@ -590,7 +538,7 @@ pub const Decl = struct {
@tagName(decl.analysis),
});
if (decl.has_tv) {
std.debug.print(" ty={} val={}", .{ decl.ty, decl.val });
std.debug.print(" val={}", .{decl.val});
}
std.debug.print("\n", .{});
}
@@ -615,7 +563,7 @@ pub const Decl = struct {
pub fn getAlignment(decl: Decl, zcu: *Zcu) Alignment {
assert(decl.has_tv);
if (decl.alignment != .none) return decl.alignment;
return decl.ty.abiAlignment(zcu);
return decl.typeOf(zcu).abiAlignment(zcu);
}
 
/// Upgrade a `LazySrcLoc` to a `SrcLoc` based on the `Decl` provided.
@@ -3525,10 +3473,8 @@ fn semaFile(mod: *Module, file: *File) SemaError!void {
new_decl.src_line = 0;
new_decl.is_pub = true;
new_decl.is_exported = false;
new_decl.ty = Type.type;
new_decl.alignment = .none;
new_decl.@"linksection" = .none;
new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive.
new_decl.analysis = .in_progress;
 
if (file.status != .success_zir) {
@@ -3594,7 +3540,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
 
const old_has_tv = decl.has_tv;
// The following values are ignored if `!old_has_tv`
const old_ty = decl.ty;
const old_ty = if (old_has_tv) decl.typeOf(mod) else undefined;
const old_val = decl.val;
const old_align = decl.alignment;
const old_linksection = decl.@"linksection";
@@ -3698,25 +3644,25 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
const address_space_src: LazySrcLoc = .{ .node_offset_var_decl_addrspace = 0 };
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 };
const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 };
const decl_tv = try sema.resolveFinalDeclValue(&block_scope, init_src, result_ref);
const decl_val = try sema.resolveFinalDeclValue(&block_scope, init_src, result_ref);
const decl_ty = decl_val.typeOf(mod);
 
// Note this resolves the type of the Decl, not the value; if this Decl
// is a struct, for example, this resolves `type` (which needs no resolution),
// not the struct itself.
try sema.resolveTypeLayout(decl_tv.ty);
try sema.resolveTypeLayout(decl_ty);
 
if (decl.kind == .@"usingnamespace") {
if (!decl_tv.ty.eql(Type.type, mod)) {
if (!decl_ty.eql(Type.type, mod)) {
return sema.fail(&block_scope, ty_src, "expected type, found {}", .{
decl_tv.ty.fmt(mod),
decl_ty.fmt(mod),
});
}
const ty = decl_tv.val.toType();
const ty = decl_val.toType();
if (ty.getNamespace(mod) == null) {
return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)});
}
 
decl.ty = Type.fromInterned(InternPool.Index.type_type);
decl.val = ty.toValue();
decl.alignment = .none;
decl.@"linksection" = .none;
@@ -3734,10 +3680,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
var queue_linker_work = true;
var is_func = false;
var is_inline = false;
switch (decl_tv.val.toIntern()) {
switch (decl_val.toIntern()) {
.generic_poison => unreachable,
.unreachable_value => unreachable,
else => switch (ip.indexToKey(decl_tv.val.toIntern())) {
else => switch (ip.indexToKey(decl_val.toIntern())) {
.variable => |variable| {
decl.owns_tv = variable.decl == decl_index;
queue_linker_work = decl.owns_tv;
@@ -3752,7 +3698,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
.func => |func| {
decl.owns_tv = func.owner_decl == decl_index;
queue_linker_work = false;
is_inline = decl.owns_tv and decl_tv.ty.fnCallingConvention(mod) == .Inline;
is_inline = decl.owns_tv and decl_ty.fnCallingConvention(mod) == .Inline;
is_func = decl.owns_tv;
},
 
@@ -3760,8 +3706,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
},
}
 
decl.ty = decl_tv.ty;
decl.val = Value.fromInterned((try decl_tv.val.intern(decl_tv.ty, mod)));
decl.val = decl_val;
// Function linksection, align, and addrspace were already set by Sema
if (!is_func) {
decl.alignment = blk: {
@@ -3784,7 +3729,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
break :blk section.toOptional();
};
decl.@"addrspace" = blk: {
const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_tv.val.toIntern())) {
const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) {
.variable => .variable,
.extern_func, .func => .function,
else => .constant,
@@ -3806,10 +3751,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
decl.analysis = .complete;
 
const result: SemaDeclResult = if (old_has_tv) .{
.invalidate_decl_val = !decl.ty.eql(old_ty, mod) or
!decl.val.eql(old_val, decl.ty, mod) or
.invalidate_decl_val = !decl_ty.eql(old_ty, mod) or
!decl.val.eql(old_val, decl_ty, mod) or
is_inline != old_is_inline,
.invalidate_decl_ref = !decl.ty.eql(old_ty, mod) or
.invalidate_decl_ref = !decl_ty.eql(old_ty, mod) or
decl.alignment != old_align or
decl.@"linksection" != old_linksection or
decl.@"addrspace" != old_addrspace or
@@ -3819,11 +3764,11 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
.invalidate_decl_ref = true,
};
 
const has_runtime_bits = queue_linker_work and (is_func or try sema.typeHasRuntimeBits(decl.ty));
const has_runtime_bits = queue_linker_work and (is_func or try sema.typeHasRuntimeBits(decl_ty));
if (has_runtime_bits) {
// Needed for codegen_decl which will call updateDecl and then the
// codegen backend wants full access to the Decl Type.
try sema.resolveTypeFully(decl.ty);
try sema.resolveTypeFully(decl_ty);
 
try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
 
@@ -3850,7 +3795,7 @@ fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult {
 
log.debug("semaAnonOwnerDecl '{d}'", .{@intFromEnum(decl_index)});
 
switch (decl.ty.zigTypeTag(zcu)) {
switch (decl.typeOf(zcu).zigTypeTag(zcu)) {
.Fn => @panic("TODO: update fn instance"),
.Type => {},
else => unreachable,
@@ -4380,7 +4325,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void
const decl = zcu.declPtr(decl_index);
const was_exported = decl.is_exported;
assert(decl.kind == kind); // ZIR tracking should preserve this
assert(decl.alive);
decl.name = decl_name;
decl.src_node = decl_node;
decl.src_line = line;
@@ -4397,7 +4341,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void
new_decl.is_pub = declaration.flags.is_pub;
new_decl.is_exported = declaration.flags.is_export;
new_decl.zir_decl_index = tracked_inst.toOptional();
new_decl.alive = true; // This Decl corresponds to an AST node and is therefore always alive.
break :decl_index .{ false, new_decl_index };
};
 
@@ -4450,22 +4393,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void
}
}
 
/// This function is exclusively called for anonymous decls.
/// All resources referenced by anonymous decls are owned by InternPool
/// so there is no cleanup to do here.
pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
 
ip.destroyDecl(gpa, decl_index);
 
if (mod.emit_h) |mod_emit_h| {
const decl_emit_h = mod_emit_h.declPtr(decl_index);
decl_emit_h.fwd_decl.deinit(gpa);
decl_emit_h.* = undefined;
}
}
 
/// Cancel the creation of an anon decl and delete any references to it.
/// If other decls depend on this decl, they must be aborted first.
pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
@@ -4475,12 +4402,8 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
 
/// Finalize the creation of an anon decl.
pub fn finalizeAnonDecl(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
// The Decl starts off with alive=false and the codegen backend will set alive=true
// if the Decl is referenced by an instruction or another constant. Otherwise,
// the Decl will be garbage collected by the `codegen_decl` task instead of sent
// to the linker.
if (mod.declPtr(decl_index).ty.isFnOrHasRuntimeBits(mod)) {
try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = decl_index });
if (mod.declPtr(decl_index).typeOf(mod).isFnOrHasRuntimeBits(mod)) {
try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
}
}
 
@@ -4563,7 +4486,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
// the runtime-known parameters only, not to be confused with the
// generic_owner function type, which potentially has more parameters,
// including comptime parameters.
const fn_ty = decl.ty;
const fn_ty = decl.typeOf(mod);
const fn_ty_info = mod.typeToFunc(fn_ty).?;
 
var sema: Sema = .{
@@ -4812,7 +4735,6 @@ pub fn allocateNewDecl(
.src_line = undefined,
.has_tv = false,
.owns_tv = false,
.ty = undefined,
.val = undefined,
.alignment = undefined,
.@"linksection" = .none,
@@ -4821,7 +4743,6 @@ pub fn allocateNewDecl(
.zir_decl_index = .none,
.is_pub = false,
.is_exported = false,
.alive = false,
.kind = .anon,
});
 
@@ -4856,41 +4777,18 @@ pub fn errorSetBits(mod: *Module) u16 {
return std.math.log2_int_ceil(ErrorInt, mod.error_limit + 1); // +1 for no error
}
 
pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !Decl.Index {
const src_decl = mod.declPtr(block.src_decl);
return mod.createAnonymousDeclFromDecl(src_decl, block.namespace, typed_value);
}
 
pub fn createAnonymousDeclFromDecl(
mod: *Module,
src_decl: *Decl,
namespace: Namespace.Index,
tv: TypedValue,
) !Decl.Index {
const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node);
errdefer mod.destroyDecl(new_decl_index);
const name = try mod.intern_pool.getOrPutStringFmt(mod.gpa, "{}__anon_{d}", .{
src_decl.name.fmt(&mod.intern_pool), @intFromEnum(new_decl_index),
});
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, tv, name);
return new_decl_index;
}
 
pub fn initNewAnonDecl(
mod: *Module,
new_decl_index: Decl.Index,
src_line: u32,
typed_value: TypedValue,
val: Value,
name: InternPool.NullTerminatedString,
) Allocator.Error!void {
assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern()));
 
const new_decl = mod.declPtr(new_decl_index);
 
new_decl.name = name;
new_decl.src_line = src_line;
new_decl.ty = typed_value.ty;
new_decl.val = typed_value.val;
new_decl.val = val;
new_decl.alignment = .none;
new_decl.@"linksection" = .none;
new_decl.has_tv = true;
@@ -5419,9 +5317,9 @@ pub fn populateTestFunctions(
try mod.ensureDeclAnalyzed(decl_index);
}
const decl = mod.declPtr(decl_index);
const test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod);
const test_fn_ty = decl.typeOf(mod).slicePtrFieldType(mod).childType(mod);
 
const array_decl_index = d: {
const array_anon_decl: InternPool.Key.Ptr.Addr.AnonDecl = array: {
// Add mod.test_functions to an array decl then make the test_functions
// decl reference it as a slice.
const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count());
@@ -5431,21 +5329,20 @@ pub fn populateTestFunctions(
const test_decl = mod.declPtr(test_decl_index);
const test_decl_name = try gpa.dupe(u8, ip.stringToSlice(try test_decl.fullyQualifiedName(mod)));
defer gpa.free(test_decl_name);
const test_name_decl_index = n: {
const test_name_decl_ty = try mod.arrayType(.{
const test_name_anon_decl: InternPool.Key.Ptr.Addr.AnonDecl = n: {
const test_name_ty = try mod.arrayType(.{
.len = test_decl_name.len,
.child = .u8_type,
});
const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .{
.ty = test_name_decl_ty,
.val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = test_name_decl_ty.toIntern(),
.storage = .{ .bytes = test_decl_name },
} }))),
});
break :n test_name_decl_index;
const test_name_val = try mod.intern(.{ .aggregate = .{
.ty = test_name_ty.toIntern(),
.storage = .{ .bytes = test_decl_name },
} });
break :n .{
.orig_ty = (try mod.singleConstPtrType(test_name_ty)).toIntern(),
.val = test_name_val,
};
};
try mod.linkerUpdateDecl(test_name_decl_index);
 
const test_fn_fields = .{
// name
@@ -5453,7 +5350,7 @@ pub fn populateTestFunctions(
.ty = .slice_const_u8_type,
.ptr = try mod.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_type,
.addr = .{ .decl = test_name_decl_index },
.addr = .{ .anon_decl = test_name_anon_decl },
} }),
.len = try mod.intern(.{ .int = .{
.ty = .usize_type,
@@ -5463,7 +5360,7 @@ pub fn populateTestFunctions(
// func
try mod.intern(.{ .ptr = .{
.ty = try mod.intern(.{ .ptr_type = .{
.child = test_decl.ty.toIntern(),
.child = test_decl.typeOf(mod).toIntern(),
.flags = .{
.is_const = true,
},
@@ -5477,22 +5374,20 @@ pub fn populateTestFunctions(
} });
}
 
const array_decl_ty = try mod.arrayType(.{
const array_ty = try mod.arrayType(.{
.len = test_fn_vals.len,
.child = test_fn_ty.toIntern(),
.sentinel = .none,
});
const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .{
.ty = array_decl_ty,
.val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = array_decl_ty.toIntern(),
.storage = .{ .elems = test_fn_vals },
} }))),
});
 
break :d array_decl_index;
const array_val = try mod.intern(.{ .aggregate = .{
.ty = array_ty.toIntern(),
.storage = .{ .elems = test_fn_vals },
} });
break :array .{
.orig_ty = (try mod.singleConstPtrType(array_ty)).toIntern(),
.val = array_val,
};
};
try mod.linkerUpdateDecl(array_decl_index);
 
{
const new_ty = try mod.ptrType(.{
@@ -5507,7 +5402,7 @@ pub fn populateTestFunctions(
.ty = new_ty.toIntern(),
.ptr = try mod.intern(.{ .ptr = .{
.ty = new_ty.slicePtrFieldType(mod).toIntern(),
.addr = .{ .decl = array_decl_index },
.addr = .{ .anon_decl = array_anon_decl },
} }),
.len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(),
} });
@@ -5515,7 +5410,6 @@ pub fn populateTestFunctions(
 
// Since we are replacing the Decl's value we must perform cleanup on the
// previous value.
decl.ty = new_ty;
decl.val = new_val;
decl.has_tv = true;
}
@@ -5590,53 +5484,6 @@ fn reportRetryableFileError(
gop.value_ptr.* = err_msg;
}
 
pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void {
switch (mod.intern_pool.indexToKey(val.toIntern())) {
.variable => |variable| try mod.markDeclIndexAlive(variable.decl),
.extern_func => |extern_func| try mod.markDeclIndexAlive(extern_func.decl),
.func => |func| try mod.markDeclIndexAlive(func.owner_decl),
.error_union => |error_union| switch (error_union.val) {
.err_name => {},
.payload => |payload| try mod.markReferencedDeclsAlive(Value.fromInterned(payload)),
},
.slice => |slice| {
try mod.markReferencedDeclsAlive(Value.fromInterned(slice.ptr));
try mod.markReferencedDeclsAlive(Value.fromInterned(slice.len));
},
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| try mod.markDeclIndexAlive(decl),
.anon_decl => {},
.int, .comptime_field, .comptime_alloc => {},
.eu_payload, .opt_payload => |parent| try mod.markReferencedDeclsAlive(Value.fromInterned(parent)),
.elem, .field => |base_index| try mod.markReferencedDeclsAlive(Value.fromInterned(base_index.base)),
},
.opt => |opt| if (opt.val != .none) try mod.markReferencedDeclsAlive(Value.fromInterned(opt.val)),
.aggregate => |aggregate| for (aggregate.storage.values()) |elem|
try mod.markReferencedDeclsAlive(Value.fromInterned(elem)),
.un => |un| {
if (un.tag != .none) try mod.markReferencedDeclsAlive(Value.fromInterned(un.tag));
try mod.markReferencedDeclsAlive(Value.fromInterned(un.val));
},
else => {},
}
}
 
pub fn markDeclAlive(mod: *Module, decl: *Decl) Allocator.Error!void {
if (decl.alive) return;
decl.alive = true;
 
_ = try decl.internValue(mod);
 
// This is the first time we are marking this Decl alive. We must
// therefore recurse into its value and mark any Decl it references
// as also alive, so that any Decl referenced does not get garbage collected.
try mod.markReferencedDeclsAlive(decl.val);
}
 
fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
return mod.markDeclAlive(mod.declPtr(decl_index));
}
 
pub fn addGlobalAssembly(mod: *Module, decl_index: Decl.Index, source: []const u8) !void {
const gop = try mod.global_assembly.getOrPut(mod.gpa, decl_index);
if (gop.found_existing) {
 
src/Sema.zig added: 2056, removed: 2746, total 0
@@ -139,8 +139,7 @@ const MaybeComptimeAlloc = struct {
};
 
const ComptimeAlloc = struct {
ty: Type,
val: Value,
val: MutableValue,
is_const: bool,
/// `.none` indicates that the alignment is the natural alignment of `val`.
alignment: Alignment,
@@ -153,8 +152,7 @@ const ComptimeAlloc = struct {
fn newComptimeAlloc(sema: *Sema, block: *Block, ty: Type, alignment: Alignment) !ComptimeAllocIndex {
const idx = sema.comptime_allocs.items.len;
try sema.comptime_allocs.append(sema.gpa, .{
.ty = ty,
.val = Value.fromInterned(try sema.mod.intern(.{ .undef = ty.toIntern() })),
.val = .{ .interned = try sema.mod.intern(.{ .undef = ty.toIntern() }) },
.is_const = false,
.alignment = alignment,
.runtime_index = block.runtime_index,
@@ -175,8 +173,8 @@ const log = std.log.scoped(.sema);
 
const Sema = @This();
const Value = @import("Value.zig");
const MutableValue = @import("mutable_value.zig").MutableValue;
const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
const Air = @import("Air.zig");
const Zir = std.zig.Zir;
const Module = @import("Module.zig");
@@ -1709,7 +1707,7 @@ fn analyzeBodyInner(
.needed_comptime_reason = "condition in comptime branch must be comptime-known",
.block_comptime_reason = block.comptime_reason,
});
const inline_body = if (cond.val.toBool()) then_body else else_body;
const inline_body = if (cond.toBool()) then_body else else_body;
 
try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src);
 
@@ -1729,7 +1727,7 @@ fn analyzeBodyInner(
.needed_comptime_reason = "condition in comptime branch must be comptime-known",
.block_comptime_reason = block.comptime_reason,
});
const inline_body = if (cond.val.toBool()) then_body else else_body;
const inline_body = if (cond.toBool()) then_body else else_body;
 
try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src);
const old_runtime_index = block.runtime_index;
@@ -1882,10 +1880,10 @@ pub fn toConstString(
air_inst: Air.Inst.Ref,
reason: NeededComptimeReason,
) ![]u8 {
const wanted_type = Type.slice_const_u8;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod);
const coerced_inst = try sema.coerce(block, Type.slice_const_u8, air_inst, src);
const slice_val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
const arr_val = try sema.derefSliceAsArray(block, src, slice_val, reason);
return arr_val.toAllocatedBytes(arr_val.typeOf(sema.mod), sema.arena, sema.mod);
}
 
pub fn resolveConstStringIntern(
@@ -2180,13 +2178,9 @@ fn resolveInstConst(
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
reason: NeededComptimeReason,
) CompileError!TypedValue {
) CompileError!Value {
const air_ref = try sema.resolveInst(zir_ref);
const val = try sema.resolveConstDefinedValue(block, src, air_ref, reason);
return .{
.ty = sema.typeOf(air_ref),
.val = val,
};
return sema.resolveConstDefinedValue(block, src, air_ref, reason);
}
 
/// Value Tag may be `undef` or `variable`.
@@ -2195,7 +2189,7 @@ pub fn resolveFinalDeclValue(
block: *Block,
src: LazySrcLoc,
air_ref: Air.Inst.Ref,
) CompileError!TypedValue {
) CompileError!Value {
const val = try sema.resolveValueAllowVariables(air_ref) orelse {
return sema.failWithNeededComptime(block, src, .{
.needed_comptime_reason = "global variable initializer must be comptime-known",
@@ -2205,10 +2199,7 @@ pub fn resolveFinalDeclValue(
if (val.canMutateComptimeVarState(sema.mod)) {
return sema.fail(block, src, "global variable contains reference to comptime var", .{});
}
return .{
.ty = sema.typeOf(air_ref),
.val = val,
};
return val;
}
 
fn failWithNeededComptime(sema: *Sema, block: *Block, src: LazySrcLoc, reason: NeededComptimeReason) CompileError {
@@ -2281,7 +2272,7 @@ fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty:
if (int_ty.zigTypeTag(mod) == .Vector) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "overflow of vector type '{}' with value '{}'", .{
int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod),
int_ty.fmt(sema.mod), val.fmtValue(sema.mod),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "when computing vector element at index '{d}'", .{vector_index});
@@ -2290,7 +2281,7 @@ fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty:
return sema.failWithOwnedErrorMsg(block, msg);
}
return sema.fail(block, src, "overflow of integer type '{}' with value '{}'", .{
int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod),
int_ty.fmt(sema.mod), val.fmtValue(sema.mod),
});
}
 
@@ -2826,10 +2817,14 @@ fn zirStructDecl(
});
errdefer wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, small.name_strategy, "struct", inst);
const new_decl_index = try sema.createAnonymousDeclTypeNamed(
block,
src,
Value.fromInterned(wip_ty.index),
small.name_strategy,
"struct",
inst,
);
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
@@ -2862,7 +2857,7 @@ fn createAnonymousDeclTypeNamed(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
typed_value: TypedValue,
val: Value,
name_strategy: Zir.Inst.NameStrategy,
anon_prefix: []const u8,
inst: ?Zir.Inst.Index,
@@ -2888,12 +2883,12 @@ fn createAnonymousDeclTypeNamed(
const name = mod.intern_pool.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{
src_decl.name.fmt(&mod.intern_pool), anon_prefix, @intFromEnum(new_decl_index),
}) catch unreachable;
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, val, name);
return new_decl_index;
},
.parent => {
const name = mod.declPtr(block.src_decl).name;
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, val, name);
return new_decl_index;
},
.func => {
@@ -2916,10 +2911,10 @@ fn createAnonymousDeclTypeNamed(
// function and the name doesn't matter since it will later
// result in a compile error.
const arg_val = sema.resolveConstValue(block, .unneeded, arg, undefined) catch
return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null);
return sema.createAnonymousDeclTypeNamed(block, src, val, .anon, anon_prefix, null);
 
if (arg_i != 0) try writer.writeByte(',');
try writer.print("{}", .{arg_val.fmtValue(sema.typeOf(arg), sema.mod)});
try writer.print("{}", .{arg_val.fmtValue(sema.mod)});
 
arg_i += 1;
continue;
@@ -2929,7 +2924,7 @@ fn createAnonymousDeclTypeNamed(
 
try writer.writeByte(')');
const name = try mod.intern_pool.getOrPutString(gpa, buf.items);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, val, name);
return new_decl_index;
},
.dbg_var => {
@@ -2944,12 +2939,12 @@ fn createAnonymousDeclTypeNamed(
src_decl.name.fmt(&mod.intern_pool), zir_data[i].str_op.getStr(sema.code),
});
 
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, val, name);
return new_decl_index;
},
else => {},
};
return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null);
return sema.createAnonymousDeclTypeNamed(block, src, val, .anon, anon_prefix, null);
},
}
}
@@ -3049,10 +3044,14 @@ fn zirEnumDecl(
 
errdefer if (!done) wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, small.name_strategy, "enum", inst);
const new_decl_index = try sema.createAnonymousDeclTypeNamed(
block,
src,
Value.fromInterned(wip_ty.index),
small.name_strategy,
"enum",
inst,
);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer if (!done) mod.abortAnonDecl(new_decl_index);
@@ -3187,7 +3186,7 @@ fn zirEnumDecl(
}).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = conflict.prev_field_idx }).lazy;
const msg = msg: {
const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)});
const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(sema.mod)});
errdefer msg.destroy(gpa);
try sema.errNote(block, other_field_src, msg, "other occurrence here", .{});
break :msg msg;
@@ -3207,7 +3206,7 @@ fn zirEnumDecl(
const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = conflict.prev_field_idx }).lazy;
const msg = msg: {
const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)});
const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(sema.mod)});
errdefer msg.destroy(gpa);
try sema.errNote(block, other_field_src, msg, "other occurrence here", .{});
break :msg msg;
@@ -3229,7 +3228,7 @@ fn zirEnumDecl(
.range = if (has_tag_value) .value else .name,
}).lazy;
const msg = try sema.errMsg(block, value_src, "enumeration value '{}' too large for type '{}'", .{
last_tag_val.?.fmtValue(int_tag_ty, mod), int_tag_ty.fmt(mod),
last_tag_val.?.fmtValue(mod), int_tag_ty.fmt(mod),
});
return sema.failWithOwnedErrorMsg(block, msg);
}
@@ -3316,10 +3315,14 @@ fn zirUnionDecl(
});
errdefer wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, small.name_strategy, "union", inst);
const new_decl_index = try sema.createAnonymousDeclTypeNamed(
block,
src,
Value.fromInterned(wip_ty.index),
small.name_strategy,
"union",
inst,
);
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
@@ -3400,10 +3403,14 @@ fn zirOpaqueDecl(
};
errdefer wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, small.name_strategy, "opaque", inst);
const new_decl_index = try sema.createAnonymousDeclTypeNamed(
block,
src,
Value.fromInterned(wip_ty.index),
small.name_strategy,
"opaque",
inst,
);
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
@@ -3463,10 +3470,14 @@ fn zirErrorSetDecl(
 
const error_set_ty = try mod.errorSetFromUnsortedNames(names.keys());
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = error_set_ty.toValue(),
}, name_strategy, "error", inst);
const new_decl_index = try sema.createAnonymousDeclTypeNamed(
block,
src,
error_set_ty.toValue(),
name_strategy,
"error",
inst,
);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
@@ -3762,10 +3773,10 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
if (!sema.isComptimeMutablePtr(ptr_val)) break :already_ct;
const alloc_index = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr.comptime_alloc;
const ct_alloc = sema.getComptimeAlloc(alloc_index);
const interned = try ct_alloc.val.intern(ct_alloc.ty, mod);
const interned = try ct_alloc.val.intern(mod, sema.arena);
if (Value.fromInterned(interned).canMutateComptimeVarState(mod)) {
// Preserve the comptime alloc, just make the pointer const.
ct_alloc.val = Value.fromInterned(interned);
ct_alloc.val = .{ .interned = interned };
ct_alloc.is_const = true;
return sema.makePtrConst(block, alloc);
} else {
@@ -4030,7 +4041,7 @@ fn finishResolveComptimeKnownAllocPtr(
const alloc_index = existing_comptime_alloc orelse a: {
const idx = try sema.newComptimeAlloc(block, alloc_ty.childType(zcu), alloc_ty.ptrAlignment(zcu));
const alloc = sema.getComptimeAlloc(idx);
alloc.val = Value.fromInterned(result_val);
alloc.val = .{ .interned = result_val };
break :a idx;
};
sema.getComptimeAlloc(alloc_index).is_const = true;
@@ -4193,7 +4204,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
.anon_decl => |a| a.val,
.comptime_alloc => |i| val: {
const alloc = sema.getComptimeAlloc(i);
break :val try alloc.val.intern(alloc.ty, mod);
break :val try alloc.val.intern(mod, sema.arena);
},
else => unreachable,
};
@@ -4370,10 +4381,10 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.input_index = len_idx,
} };
try sema.errNote(block, a_src, msg, "length {} here", .{
v.fmtValue(Type.usize, sema.mod),
v.fmtValue(sema.mod),
});
try sema.errNote(block, arg_src, msg, "length {} here", .{
arg_val.fmtValue(Type.usize, sema.mod),
arg_val.fmtValue(sema.mod),
});
break :msg msg;
};
@@ -5597,7 +5608,7 @@ fn storeToInferredAllocComptime(
} });
} else {
const alloc_index = try sema.newComptimeAlloc(block, operand_ty, iac.alignment);
sema.getComptimeAlloc(alloc_index).val = operand_val;
sema.getComptimeAlloc(alloc_index).val = .{ .interned = operand_val.toIntern() };
iac.ptr = try zcu.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.addr = .{ .comptime_alloc = alloc_index },
@@ -5783,7 +5794,7 @@ fn zirCompileLog(
const arg_ty = sema.typeOf(arg);
if (try sema.resolveValueResolveLazy(arg)) |val| {
try writer.print("@as({}, {})", .{
arg_ty.fmt(mod), val.fmtValue(arg_ty, mod),
arg_ty.fmt(mod), val.fmtValue(mod),
});
} else {
try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(mod)});
@@ -6395,7 +6406,7 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const options = try sema.resolveExportOptions(block, options_src, extra.options);
if (options.linkage == .internal)
return;
if (operand.val.getFunction(mod)) |function| {
if (operand.getFunction(mod)) |function| {
const decl_index = function.owner_decl;
return sema.analyzeExport(block, src, options, decl_index);
}
@@ -6405,7 +6416,7 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
.src = src,
.owner_decl = sema.owner_decl_index,
.src_decl = block.src_decl,
.exported = .{ .value = operand.val.toIntern() },
.exported = .{ .value = operand.toIntern() },
.status = .in_progress,
});
}
@@ -6425,16 +6436,17 @@ pub fn analyzeExport(
 
try mod.ensureDeclAnalyzed(exported_decl_index);
const exported_decl = mod.declPtr(exported_decl_index);
const export_ty = exported_decl.typeOf(mod);
 
if (!try sema.validateExternType(exported_decl.ty, .other)) {
if (!try sema.validateExternType(export_ty, .other)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "unable to export type '{}'", .{exported_decl.ty.fmt(mod)});
const msg = try sema.errMsg(block, src, "unable to export type '{}'", .{export_ty.fmt(mod)});
errdefer msg.destroy(gpa);
 
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(src, mod), exported_decl.ty, .other);
try sema.explainWhyTypeIsNotExtern(msg, src_decl.toSrcLoc(src, mod), export_ty, .other);
 
try sema.addDeclaredHereNote(msg, exported_decl.ty);
try sema.addDeclaredHereNote(msg, export_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
@@ -6445,8 +6457,6 @@ pub fn analyzeExport(
return sema.fail(block, src, "export target cannot be extern", .{});
}
 
// This decl is alive no matter what, since it's being exported
try mod.markDeclAlive(exported_decl);
try sema.maybeQueueFuncBodyAnalysis(exported_decl_index);
 
try addExport(mod, .{
@@ -6503,7 +6513,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
}
 
const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index);
switch (fn_owner_decl.ty.fnCallingConvention(mod)) {
switch (fn_owner_decl.typeOf(mod).fnCallingConvention(mod)) {
.Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}),
.Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}),
else => if (block.inlining != null) {
@@ -7692,7 +7702,7 @@ fn analyzeCall(
// comptime memory is mutated.
const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
 
const owner_info = mod.typeToFunc(fn_owner_decl.ty).?;
const owner_info = mod.typeToFunc(fn_owner_decl.typeOf(mod)).?;
const new_param_types = try sema.arena.alloc(InternPool.Index, owner_info.param_types.len);
var new_fn_info: InternPool.GetFuncTypeKey = .{
.param_types = new_param_types,
@@ -7835,7 +7845,7 @@ fn analyzeCall(
 
if (is_comptime_call) {
const result_val = try sema.resolveConstValue(block, .unneeded, result, undefined);
const result_interned = try result_val.intern2(sema.fn_ret_ty, mod);
const result_interned = result_val.toIntern();
 
// Transform ad-hoc inferred error set types into concrete error sets.
const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned);
@@ -7856,8 +7866,7 @@ fn analyzeCall(
}
 
if (try sema.resolveValue(result)) |result_val| {
const result_interned = try result_val.intern2(sema.fn_ret_ty, mod);
const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned);
const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_val.toIntern());
break :res2 Air.internedToRef(result_transformed);
}
 
@@ -7960,9 +7969,9 @@ fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Typ
});
}
const func_decl = mod.funcOwnerDeclPtr(sema.owner_func_index);
if (!func_ty.eql(func_decl.ty, mod)) {
if (!func_ty.eql(func_decl.typeOf(mod), mod)) {
return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{
func_ty.fmt(mod), func_decl.ty.fmt(mod),
func_ty.fmt(mod), func_decl.typeOf(mod).fmt(mod),
});
}
_ = try block.addUnOp(.ret, result);
@@ -8042,7 +8051,7 @@ fn analyzeInlineCallArg(
// when the hash function is called.
const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val);
should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod);
memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(Type.fromInterned(param_ty), mod);
memoized_arg_values[arg_i.*] = resolved_arg_val.toIntern();
} else {
ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
}
@@ -8081,7 +8090,7 @@ fn analyzeInlineCallArg(
// when the hash function is called.
const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val);
should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod);
memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(ics.caller().typeOf(uncasted_arg), mod);
memoized_arg_values[arg_i.*] = resolved_arg_val.toIntern();
} else {
if (zir_tags[@intFromEnum(inst)] == .param_anytype_comptime) {
_ = try ics.caller().resolveConstValue(arg_block, arg_src, uncasted_arg, .{
@@ -8871,7 +8880,7 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern());
}
return sema.fail(block, src, "int value '{}' out of range of non-exhaustive enum '{}'", .{
int_val.fmtValue(sema.typeOf(operand), mod), dest_ty.fmt(mod),
int_val.fmtValue(mod), dest_ty.fmt(mod),
});
}
if (int_val.isUndef(mod)) {
@@ -8879,7 +8888,7 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
if (!(try sema.enumHasInt(dest_ty, int_val))) {
return sema.fail(block, src, "enum '{}' has no tag with value '{}'", .{
dest_ty.fmt(mod), int_val.fmtValue(sema.typeOf(operand), mod),
dest_ty.fmt(mod), int_val.fmtValue(mod),
});
}
return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern());
@@ -13925,7 +13934,7 @@ fn zirShl(
const rhs_elem = try rhs_val.elemValue(mod, i);
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
rhs_elem.fmtValue(scalar_ty, mod),
rhs_elem.fmtValue(mod),
i,
scalar_ty.fmt(mod),
});
@@ -13933,7 +13942,7 @@ fn zirShl(
}
} else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
rhs_val.fmtValue(scalar_ty, mod),
rhs_val.fmtValue(mod),
scalar_ty.fmt(mod),
});
}
@@ -13944,14 +13953,14 @@ fn zirShl(
const rhs_elem = try rhs_val.elemValue(mod, i);
if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
rhs_elem.fmtValue(scalar_ty, mod),
rhs_elem.fmtValue(mod),
i,
});
}
}
} else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
rhs_val.fmtValue(scalar_ty, mod),
rhs_val.fmtValue(mod),
});
}
}
@@ -14090,7 +14099,7 @@ fn zirShr(
const rhs_elem = try rhs_val.elemValue(mod, i);
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
rhs_elem.fmtValue(scalar_ty, mod),
rhs_elem.fmtValue(mod),
i,
scalar_ty.fmt(mod),
});
@@ -14098,7 +14107,7 @@ fn zirShr(
}
} else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
rhs_val.fmtValue(scalar_ty, mod),
rhs_val.fmtValue(mod),
scalar_ty.fmt(mod),
});
}
@@ -14109,14 +14118,14 @@ fn zirShr(
const rhs_elem = try rhs_val.elemValue(mod, i);
if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
rhs_elem.fmtValue(scalar_ty, mod),
rhs_elem.fmtValue(mod),
i,
});
}
}
} else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
rhs_val.fmtValue(scalar_ty, mod),
rhs_val.fmtValue(mod),
});
}
if (maybe_lhs_val) |lhs_val| {
@@ -14270,7 +14279,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
for (elems, 0..) |*elem, i| {
const elem_val = try val.elemValue(mod, i);
elem.* = try (try elem_val.bitwiseNot(scalar_type, sema.arena, mod)).intern(scalar_type, mod);
elem.* = (try elem_val.bitwiseNot(scalar_type, sema.arena, mod)).toIntern();
}
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
.ty = operand_type.toIntern(),
@@ -14499,12 +14508,16 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else => unreachable,
}) |rhs_val| {
const lhs_sub_val = if (lhs_ty.isSinglePointer(mod))
(try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).?
try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty) orelse break :rs lhs_src
else if (lhs_ty.isSlice(mod))
try sema.maybeDerefSliceAsArray(block, lhs_src, lhs_val) orelse break :rs lhs_src
else
lhs_val;
 
const rhs_sub_val = if (rhs_ty.isSinglePointer(mod))
(try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).?
try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty) orelse break :rs rhs_src
else if (rhs_ty.isSlice(mod))
try sema.maybeDerefSliceAsArray(block, rhs_src, rhs_val) orelse break :rs rhs_src
else
rhs_val;
 
@@ -14521,7 +14534,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} };
const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, operand_src);
const coerced_elem_val = try sema.resolveConstValue(block, operand_src, coerced_elem_val_inst, undefined);
element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod);
element_vals[elem_i] = coerced_elem_val.toIntern();
}
while (elem_i < result_len) : (elem_i += 1) {
const rhs_elem_i = elem_i - lhs_len;
@@ -14534,7 +14547,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} };
const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, operand_src);
const coerced_elem_val = try sema.resolveConstValue(block, operand_src, coerced_elem_val_inst, undefined);
element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod);
element_vals[elem_i] = coerced_elem_val.toIntern();
}
return sema.addConstantMaybeRef(try mod.intern(.{ .aggregate = .{
.ty = result_ty.toIntern(),
@@ -14624,10 +14637,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
.Pointer => {
const ptr_info = operand_ty.ptrInfo(mod);
switch (ptr_info.flags.size) {
// TODO: in the Many case here this should only work if the type
// has a sentinel, and this code should compute the length based
// on the sentinel value.
.Slice, .Many => {
.Slice => {
const val = try sema.resolveConstDefinedValue(block, src, operand, .{
.needed_comptime_reason = "slice value being concatenated must be comptime-known",
});
@@ -14637,7 +14647,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
.none => null,
else => Value.fromInterned(ptr_info.sentinel),
},
.len = val.sliceLen(mod),
.len = try val.sliceLen(sema),
};
},
.One => {
@@ -14645,7 +14655,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
return Type.fromInterned(ptr_info.child).arrayInfo(mod);
}
},
.C => {},
.C, .Many => {},
}
},
.Struct => {
@@ -14831,9 +14841,11 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null;
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
 
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| ct: {
const lhs_sub_val = if (lhs_ty.isSinglePointer(mod))
(try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).?
try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty) orelse break :ct
else if (lhs_ty.isSlice(mod))
try sema.maybeDerefSliceAsArray(block, lhs_src, lhs_val) orelse break :ct
else
lhs_val;
 
@@ -14841,7 +14853,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// Optimization for the common pattern of a single element repeated N times, such
// as zero-filling a byte array.
if (lhs_len == 1 and lhs_info.sentinel == null) {
const elem_val = (try lhs_sub_val.maybeElemValueFull(sema, mod, 0)).?;
const elem_val = try lhs_sub_val.elemValue(mod, 0);
break :v try mod.intern(.{ .aggregate = .{
.ty = result_ty.toIntern(),
.storage = .{ .repeated_elem = elem_val.toIntern() },
@@ -14853,7 +14865,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
while (elem_i < result_len) {
var lhs_i: usize = 0;
while (lhs_i < lhs_len) : (lhs_i += 1) {
const elem_val = (try lhs_sub_val.maybeElemValueFull(sema, mod, lhs_i)).?;
const elem_val = try lhs_sub_val.elemValue(mod, lhs_i);
element_vals[elem_i] = elem_val.toIntern();
elem_i += 1;
}
@@ -15034,7 +15046,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
block,
src,
"ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'",
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(resolved_type, mod) },
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(mod) },
);
}
}
@@ -15813,7 +15825,7 @@ fn intRem(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod);
scalar.* = (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -17753,7 +17765,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const info = ty.intInfo(mod);
const field_values = .{
// signedness: Signedness,
try (try mod.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).intern(signedness_ty, mod),
(try mod.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).toIntern(),
// bits: u16,
(try mod.intValue(Type.u16, info.bits)).toIntern(),
};
@@ -17823,7 +17835,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
const field_values = .{
// size: Size,
try (try mod.enumValueFieldIndex(ptr_size_ty, @intFromEnum(info.flags.size))).intern(ptr_size_ty, mod),
(try mod.enumValueFieldIndex(ptr_size_ty, @intFromEnum(info.flags.size))).toIntern(),
// is_const: bool,
Value.makeBool(info.flags.is_const).toIntern(),
// is_volatile: bool,
@@ -17831,7 +17843,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// alignment: comptime_int,
alignment.toIntern(),
// address_space: AddressSpace
try (try mod.enumValueFieldIndex(addrspace_ty, @intFromEnum(info.flags.address_space))).intern(addrspace_ty, mod),
(try mod.enumValueFieldIndex(addrspace_ty, @intFromEnum(info.flags.address_space))).toIntern(),
// child: type,
info.child,
// is_allowzero: bool,
@@ -19975,8 +19987,8 @@ fn unionInit(
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
return Air.internedToRef((try mod.intern(.{ .un = .{
.ty = union_ty.toIntern(),
.tag = try tag_val.intern(tag_ty, mod),
.val = try init_val.intern(field_ty, mod),
.tag = tag_val.toIntern(),
.val = init_val.toIntern(),
} })));
}
 
@@ -20099,8 +20111,8 @@ fn zirStructInit(
if (try sema.resolveValue(init_inst)) |val| {
const struct_val = Value.fromInterned((try mod.intern(.{ .un = .{
.ty = resolved_ty.toIntern(),
.tag = try tag_val.intern(tag_ty, mod),
.val = try val.intern(field_ty, mod),
.tag = tag_val.toIntern(),
.val = val.toIntern(),
} })));
const final_val_inst = try sema.coerce(block, result_ty, Air.internedToRef(struct_val.toIntern()), src);
const final_val = (try sema.resolveValue(final_val_inst)).?;
@@ -20400,7 +20412,7 @@ fn structInitAnon(
return sema.failWithOwnedErrorMsg(block, msg);
}
if (try sema.resolveValue(init)) |init_val| {
field_val.* = try init_val.intern(Type.fromInterned(field_ty.*), mod);
field_val.* = init_val.toIntern();
} else {
field_val.* = .none;
runtime_index = @intCast(i_usize);
@@ -20577,13 +20589,9 @@ fn zirArrayInit(
 
const runtime_index = opt_runtime_index orelse {
const elem_vals = try sema.arena.alloc(InternPool.Index, resolved_args.len);
for (elem_vals, resolved_args, 0..) |*val, arg, i| {
const elem_ty = if (is_tuple)
array_ty.structFieldType(i, mod)
else
array_ty.elemType2(mod);
for (elem_vals, resolved_args) |*val, arg| {
// We checked that all args are comptime above.
val.* = try ((sema.resolveValue(arg) catch unreachable).?).intern(elem_ty, mod);
val.* = (sema.resolveValue(arg) catch unreachable).?.toIntern();
}
const arr_val = try mod.intern(.{ .aggregate = .{
.ty = array_ty.toIntern(),
@@ -20998,7 +21006,7 @@ fn maybeConstantUnaryMath(
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
for (elems, 0..) |*elem, i| {
const elem_val = try val.elemValue(sema.mod, i);
elem.* = try (try eval(elem_val, scalar_ty, sema.arena, sema.mod)).intern(scalar_ty, mod);
elem.* = (try eval(elem_val, scalar_ty, sema.arena, sema.mod)).toIntern();
}
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
.ty = result_ty.toIntern(),
@@ -21086,7 +21094,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const enum_decl = mod.declPtr(enum_decl_index);
const msg = msg: {
const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{}'", .{
val.fmtValue(enum_ty, sema.mod), enum_decl.name.fmt(ip),
val.fmtValue(sema.mod), enum_decl.name.fmt(ip),
});
errdefer msg.destroy(sema.gpa);
try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{});
@@ -21129,7 +21137,9 @@ fn zirReify(
.needed_comptime_reason = "operand to @Type must be comptime-known",
});
const union_val = ip.indexToKey(val.toIntern()).un;
if (try sema.anyUndef(Value.fromInterned(union_val.val))) return sema.failWithUseOfUndef(block, src);
if (try sema.anyUndef(block, operand_src, Value.fromInterned(union_val.val))) {
return sema.failWithUseOfUndef(block, operand_src);
}
const tag_index = type_info_ty.unionTagFieldIndex(Value.fromInterned(union_val.tag), mod).?;
switch (@as(std.builtin.TypeId, @enumFromInt(tag_index))) {
.Type => return .type_type,
@@ -21370,11 +21380,15 @@ fn zirReify(
const payload_val = Value.fromInterned(union_val.val).optionalValue(mod) orelse
return Air.internedToRef(Type.anyerror.toIntern());
 
const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod));
const names_val = try sema.derefSliceAsArray(block, src, payload_val, .{
.needed_comptime_reason = "error set contents must be comptime-known",
});
 
const len = try sema.usizeCast(block, src, names_val.typeOf(mod).arrayLen(mod));
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, len);
for (0..len) |i| {
const elem_val = (try payload_val.maybeElemValueFull(sema, mod, i)).?;
const elem_val = try names_val.elemValue(mod, i);
const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern()));
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
@@ -21422,7 +21436,7 @@ fn zirReify(
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
 
// Decls
if (decls_val.sliceLen(mod) > 0) {
if (try decls_val.sliceLen(sema) > 0) {
return sema.fail(block, src, "reified structs must have no decls", .{});
}
 
@@ -21430,7 +21444,11 @@ fn zirReify(
return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
}
 
return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool());
const fields_arr = try sema.derefSliceAsArray(block, operand_src, fields_val, .{
.needed_comptime_reason = "struct fields must be comptime-known",
});
 
return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_arr, name_strategy, is_tuple_val.toBool());
},
.Enum => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
@@ -21451,11 +21469,15 @@ fn zirReify(
try ip.getOrPutString(gpa, "is_exhaustive"),
).?);
 
if (decls_val.sliceLen(mod) > 0) {
if (try decls_val.sliceLen(sema) > 0) {
return sema.fail(block, src, "reified enums must have no decls", .{});
}
 
return sema.reifyEnum(block, inst, src, tag_type_val.toType(), is_exhaustive_val.toBool(), fields_val, name_strategy);
const fields_arr = try sema.derefSliceAsArray(block, operand_src, fields_val, .{
.needed_comptime_reason = "enum fields must be comptime-known",
});
 
return sema.reifyEnum(block, inst, src, tag_type_val.toType(), is_exhaustive_val.toBool(), fields_arr, name_strategy);
},
.Opaque => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
@@ -21465,7 +21487,7 @@ fn zirReify(
).?);
 
// Decls
if (decls_val.sliceLen(mod) > 0) {
if (try decls_val.sliceLen(sema) > 0) {
return sema.fail(block, src, "reified opaque must have no decls", .{});
}
 
@@ -21480,10 +21502,14 @@ fn zirReify(
};
errdefer wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, name_strategy, "opaque", inst);
const new_decl_index = try sema.createAnonymousDeclTypeNamed(
block,
src,
Value.fromInterned(wip_ty.index),
name_strategy,
"opaque",
inst,
);
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
@@ -21510,12 +21536,16 @@ fn zirReify(
try ip.getOrPutString(gpa, "decls"),
).?);
 
if (decls_val.sliceLen(mod) > 0) {
if (try decls_val.sliceLen(sema) > 0) {
return sema.fail(block, src, "reified unions must have no decls", .{});
}
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
 
return sema.reifyUnion(block, inst, src, layout, tag_type_val, fields_val, name_strategy);
const fields_arr = try sema.derefSliceAsArray(block, operand_src, fields_val, .{
.needed_comptime_reason = "union fields must be comptime-known",
});
 
return sema.reifyUnion(block, inst, src, layout, tag_type_val, fields_arr, name_strategy);
},
.Fn => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
@@ -21535,7 +21565,7 @@ fn zirReify(
ip,
try ip.getOrPutString(gpa, "return_type"),
).?);
const params_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
const params_slice_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "params"),
).?);
@@ -21554,12 +21584,16 @@ fn zirReify(
const return_type = return_type_val.optionalValue(mod) orelse
return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{});
 
const args_len = try sema.usizeCast(block, src, params_val.sliceLen(mod));
const params_val = try sema.derefSliceAsArray(block, operand_src, params_slice_val, .{
.needed_comptime_reason = "function parameters must be comptime-known",
});
 
const args_len = try sema.usizeCast(block, src, params_val.typeOf(mod).arrayLen(mod));
const param_types = try sema.arena.alloc(InternPool.Index, args_len);
 
var noalias_bits: u32 = 0;
for (param_types, 0..) |*param_type, i| {
const elem_val = (try params_val.maybeElemValueFull(sema, mod, i)).?;
const elem_val = try params_val.elemValue(mod, i);
const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern()));
const param_is_generic_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
@@ -21620,7 +21654,7 @@ fn reifyEnum(
 
// This logic must stay in sync with the structure of `std.builtin.Type.Enum` - search for `fieldValue`.
 
const fields_len: u32 = @intCast(fields_val.sliceLen(mod));
const fields_len: u32 = @intCast(fields_val.typeOf(mod).arrayLen(mod));
 
// The validation work here is non-trivial, and it's possible the type already exists.
// So in this first pass, let's just construct a hash to optimize for this case. If the
@@ -21634,7 +21668,7 @@ fn reifyEnum(
std.hash.autoHash(&hasher, fields_len);
 
for (0..fields_len) |field_idx| {
const field_info = (try fields_val.maybeElemValueFull(sema, mod, field_idx)).?;
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 1));
@@ -21668,10 +21702,14 @@ fn reifyEnum(
return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{});
}
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, name_strategy, "enum", inst);
const new_decl_index = try sema.createAnonymousDeclTypeNamed(
block,
src,
Value.fromInterned(wip_ty.index),
name_strategy,
"enum",
inst,
);
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
@@ -21679,7 +21717,7 @@ fn reifyEnum(
wip_ty.setTagTy(ip, tag_ty.toIntern());
 
for (0..fields_len) |field_idx| {
const field_info = (try fields_val.maybeElemValueFull(sema, mod, field_idx)).?;
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 1));
@@ -21691,7 +21729,7 @@ fn reifyEnum(
// TODO: better source location
return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{
field_name.fmt(ip),
field_value_val.fmtValue(Type.comptime_int, mod),
field_value_val.fmtValue(mod),
tag_ty.fmt(mod),
});
}
@@ -21707,7 +21745,7 @@ fn reifyEnum(
break :msg msg;
},
.value => msg: {
const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{field_value_val.fmtValue(Type.comptime_int, mod)});
const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{field_value_val.fmtValue(mod)});
errdefer msg.destroy(gpa);
_ = conflict.prev_field_idx; // TODO: this note is incorrect
try sema.errNote(block, src, msg, "other enum tag value here", .{});
@@ -21741,7 +21779,7 @@ fn reifyUnion(
 
// This logic must stay in sync with the structure of `std.builtin.Type.Union` - search for `fieldValue`.
 
const fields_len: u32 = @intCast(fields_val.sliceLen(mod));
const fields_len: u32 = @intCast(fields_val.typeOf(mod).arrayLen(mod));
 
// The validation work here is non-trivial, and it's possible the type already exists.
// So in this first pass, let's just construct a hash to optimize for this case. If the
@@ -21757,7 +21795,7 @@ fn reifyUnion(
var any_aligns = false;
 
for (0..fields_len) |field_idx| {
const field_info = (try fields_val.maybeElemValueFull(sema, mod, field_idx)).?;
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_type_val = try field_info.fieldValue(mod, 1);
@@ -21811,10 +21849,14 @@ fn reifyUnion(
};
errdefer wip_ty.cancel(ip);
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, name_strategy, "union", inst);
const new_decl_index = try sema.createAnonymousDeclTypeNamed(
block,
src,
Value.fromInterned(wip_ty.index),
name_strategy,
"union",
inst,
);
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
@@ -21833,7 +21875,7 @@ fn reifyUnion(
var seen_tags = try std.DynamicBitSetUnmanaged.initEmpty(sema.arena, tag_ty_fields_len);
 
for (field_types, 0..) |*field_ty, field_idx| {
const field_info = (try fields_val.maybeElemValueFull(sema, mod, field_idx)).?;
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_type_val = try field_info.fieldValue(mod, 1);
@@ -21885,7 +21927,7 @@ fn reifyUnion(
try field_names.ensureTotalCapacity(sema.arena, fields_len);
 
for (field_types, 0..) |*field_ty, field_idx| {
const field_info = (try fields_val.maybeElemValueFull(sema, mod, field_idx)).?;
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_type_val = try field_info.fieldValue(mod, 1);
@@ -21979,7 +22021,7 @@ fn reifyStruct(
 
// This logic must stay in sync with the structure of `std.builtin.Type.Struct` - search for `fieldValue`.
 
const fields_len: u32 = @intCast(fields_val.sliceLen(mod));
const fields_len: u32 = @intCast(fields_val.typeOf(mod).arrayLen(mod));
 
// The validation work here is non-trivial, and it's possible the type already exists.
// So in this first pass, let's just construct a hash to optimize for this case. If the
@@ -21998,7 +22040,7 @@ fn reifyStruct(
var any_aligned_fields = false;
 
for (0..fields_len) |field_idx| {
const field_info = (try fields_val.maybeElemValueFull(sema, mod, field_idx)).?;
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_type_val = try field_info.fieldValue(mod, 1);
@@ -22066,17 +22108,21 @@ fn reifyStruct(
.auto => {},
};
 
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = Value.fromInterned(wip_ty.index),
}, name_strategy, "struct", inst);
const new_decl_index = try sema.createAnonymousDeclTypeNamed(
block,
src,
Value.fromInterned(wip_ty.index),
name_strategy,
"struct",
inst,
);
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
 
const struct_type = ip.loadStructType(wip_ty.index);
 
for (0..fields_len) |field_idx| {
const field_info = (try fields_val.maybeElemValueFull(sema, mod, field_idx)).?;
const field_info = try fields_val.elemValue(mod, field_idx);
 
const field_name_val = try field_info.fieldValue(mod, 0);
const field_type_val = try field_info.fieldValue(mod, 1);
@@ -22837,12 +22883,12 @@ fn ptrCastFull(
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = if (src_info.sentinel == .none) blk: {
break :blk try sema.errMsg(block, src, "destination pointer requires '{}' sentinel", .{
Value.fromInterned(dest_info.sentinel).fmtValue(Type.fromInterned(dest_info.child), mod),
Value.fromInterned(dest_info.sentinel).fmtValue(mod),
});
} else blk: {
break :blk try sema.errMsg(block, src, "pointer sentinel '{}' cannot coerce into pointer sentinel '{}'", .{
Value.fromInterned(src_info.sentinel).fmtValue(Type.fromInterned(src_info.child), mod),
Value.fromInterned(dest_info.sentinel).fmtValue(Type.fromInterned(dest_info.child), mod),
Value.fromInterned(src_info.sentinel).fmtValue(mod),
Value.fromInterned(dest_info.sentinel).fmtValue(mod),
});
};
errdefer msg.destroy(sema.gpa);
@@ -23216,7 +23262,8 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod));
for (elems, 0..) |*elem, i| {
const elem_val = try val.elemValue(mod, i);
elem.* = try (try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, mod)).intern(dest_scalar_ty, mod);
const uncoerced_elem = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, mod);
elem.* = (try mod.getCoerced(uncoerced_elem, dest_scalar_ty)).toIntern();
}
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
@@ -23330,7 +23377,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
for (elems, 0..) |*elem, i| {
const elem_val = try val.elemValue(mod, i);
elem.* = try (try elem_val.byteSwap(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod);
elem.* = (try elem_val.byteSwap(scalar_ty, mod, sema.arena)).toIntern();
}
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
.ty = operand_ty.toIntern(),
@@ -23378,7 +23425,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
for (elems, 0..) |*elem, i| {
const elem_val = try val.elemValue(mod, i);
elem.* = try (try elem_val.bitReverse(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod);
elem.* = (try elem_val.bitReverse(scalar_ty, mod, sema.arena)).toIntern();
}
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
.ty = operand_ty.toIntern(),
@@ -23896,11 +23943,9 @@ fn resolveExportOptions(
const visibility_src = sema.maybeOptionsSrc(block, src, "visibility");
 
const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src);
const name_val = try sema.resolveConstDefinedValue(block, name_src, name_operand, .{
const name = try sema.toConstString(block, name_src, name_operand, .{
.needed_comptime_reason = "name of exported value must be comptime-known",
});
const name_ty = Type.slice_const_u8;
const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod);
 
const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src);
const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_operand, .{
@@ -23912,9 +23957,10 @@ fn resolveExportOptions(
const section_opt_val = try sema.resolveConstDefinedValue(block, section_src, section_operand, .{
.needed_comptime_reason = "linksection of exported value must be comptime-known",
});
const section_ty = Type.slice_const_u8;
const section = if (section_opt_val.optionalValue(mod)) |section_val|
try section_val.toAllocatedBytes(section_ty, sema.arena, mod)
try sema.toConstString(block, section_src, Air.internedToRef(section_val.toIntern()), .{
.needed_comptime_reason = "linksection of exported value must be comptime-known",
})
else
null;
 
@@ -24311,7 +24357,7 @@ fn analyzeShuffle(
}
const int = mask_elem_val.toSignedInt(mod);
const unsigned: u32 = @intCast(if (int >= 0) int else ~int);
values[i] = try (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).intern(elem_ty, mod);
values[i] = (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).toIntern();
}
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
.ty = res_ty.toIntern(),
@@ -24417,7 +24463,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
for (elems, 0..) |*elem, i| {
const pred_elem_val = try pred_val.elemValue(mod, i);
const should_choose_a = pred_elem_val.toBool();
elem.* = try (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).intern(elem_ty, mod);
elem.* = (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).toIntern();
}
 
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
@@ -25239,10 +25285,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const msg = try sema.errMsg(block, src, "non-matching @memcpy lengths", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, dest_src, msg, "length {} here", .{
dest_len_val.fmtValue(Type.usize, sema.mod),
dest_len_val.fmtValue(sema.mod),
});
try sema.errNote(block, src_src, msg, "length {} here", .{
src_len_val.fmtValue(Type.usize, sema.mod),
src_len_val.fmtValue(sema.mod),
});
break :msg msg;
};
@@ -25777,7 +25823,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
} else if (extra.data.bits.has_ret_ty_ref) blk: {
const ret_ty_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref, .{
const ret_ty_val = sema.resolveInstConst(block, ret_src, ret_ty_ref, .{
.needed_comptime_reason = "return type must be comptime-known",
}) catch |err| switch (err) {
error.GenericPoison => {
@@ -25785,8 +25831,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
},
else => |e| return e,
};
const ty = ret_ty_tv.val.toType();
break :blk ty;
break :blk ret_ty_val.toType();
} else Type.void;
 
const noalias_bits: u32 = if (extra.data.bits.has_any_noalias) blk: {
@@ -26032,10 +26077,9 @@ fn resolveExternOptions(
const thread_local_src = sema.maybeOptionsSrc(block, src, "thread_local");
 
const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src);
const name_val = try sema.resolveConstDefinedValue(block, name_src, name_ref, .{
const name = try sema.toConstString(block, name_src, name_ref, .{
.needed_comptime_reason = "name of the extern symbol must be comptime-known",
});
const name = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod);
 
const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "library_name"), library_src);
const library_name_val = try sema.resolveConstDefinedValue(block, library_src, library_name_inst, .{
@@ -26054,7 +26098,9 @@ fn resolveExternOptions(
});
 
const library_name = if (library_name_val.optionalValue(mod)) |library_name_payload| library_name: {
const library_name = try library_name_payload.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod);
const library_name = try sema.toConstString(block, library_src, Air.internedToRef(library_name_payload.toIntern()), .{
.needed_comptime_reason = "library in which extern symbol is must be comptime-known",
});
if (library_name.len == 0) {
return sema.fail(block, library_src, "library name cannot be empty", .{});
}
@@ -26120,9 +26166,10 @@ fn zirBuiltinExtern(
const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node);
errdefer mod.destroyDecl(new_decl_index);
const new_decl = mod.declPtr(new_decl_index);
try mod.initNewAnonDecl(new_decl_index, sema.owner_decl.src_line, .{
.ty = Type.fromInterned(ptr_info.child),
.val = Value.fromInterned(
try mod.initNewAnonDecl(
new_decl_index,
sema.owner_decl.src_line,
Value.fromInterned(
if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn)
try ip.getExternFunc(sema.gpa, .{
.ty = ptr_info.child,
@@ -26141,7 +26188,8 @@ fn zirBuiltinExtern(
.is_weak_linkage = options.linkage == .weak,
} }),
),
}, options.name);
options.name,
);
new_decl.owns_tv = true;
// Note that this will queue the anon decl for codegen, so that the backend can
// correctly handle the extern, including duplicate detection.
@@ -26641,13 +26689,12 @@ fn prepareSimplePanic(sema: *Sema, block: *Block) !void {
// decl_index may be an alias; we must find the decl that actually
// owns the function.
try sema.ensureDeclAnalyzed(decl_index);
const tv = try mod.declPtr(decl_index).typedValue();
const fn_val = try mod.declPtr(decl_index).valueOrFail();
try sema.declareDependency(.{ .decl_val = decl_index });
assert(tv.ty.zigTypeTag(mod) == .Fn);
assert(try sema.fnHasRuntimeBits(tv.ty));
const func_index = tv.val.toIntern();
try mod.ensureFuncBodyAnalysisQueued(func_index);
mod.panic_func_index = func_index;
assert(fn_val.typeOf(mod).zigTypeTag(mod) == .Fn);
assert(try sema.fnHasRuntimeBits(fn_val.typeOf(mod)));
try mod.ensureFuncBodyAnalysisQueued(fn_val.toIntern());
mod.panic_func_index = fn_val.toIntern();
}
 
if (mod.null_stack_trace == .none) {
@@ -27789,7 +27836,7 @@ fn structFieldPtrByIndex(
const val = try mod.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.addr = .{ .field = .{
.base = try struct_ptr_val.intern(struct_ptr_ty, mod),
.base = struct_ptr_val.toIntern(),
.index = field_index,
} },
} });
@@ -28568,7 +28615,7 @@ fn elemValSlice(
 
if (maybe_slice_val) |slice_val| {
runtime_src = elem_index_src;
const slice_len = slice_val.sliceLen(mod);
const slice_len = try slice_val.sliceLen(sema);
const slice_len_s = slice_len + @intFromBool(slice_sent);
if (slice_len_s == 0) {
return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
@@ -28593,7 +28640,7 @@ fn elemValSlice(
try sema.requireRuntimeBlock(block, src, runtime_src);
if (oob_safety and block.wantSafety()) {
const len_inst = if (maybe_slice_val) |slice_val|
try mod.intRef(Type.usize, slice_val.sliceLen(mod))
try mod.intRef(Type.usize, try slice_val.sliceLen(sema))
else
try block.addTyOp(.slice_len, Type.usize, slice);
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
@@ -28630,7 +28677,7 @@ fn elemPtrSlice(
if (slice_val.isUndef(mod)) {
return mod.undefRef(elem_ptr_ty);
}
const slice_len = slice_val.sliceLen(mod);
const slice_len = try slice_val.sliceLen(sema);
const slice_len_s = slice_len + @intFromBool(slice_sent);
if (slice_len_s == 0) {
return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
@@ -28653,7 +28700,7 @@ fn elemPtrSlice(
const len_inst = len: {
if (maybe_undef_slice_val) |slice_val|
if (!slice_val.isUndef(mod))
break :len try mod.intRef(Type.usize, slice_val.sliceLen(mod));
break :len try mod.intRef(Type.usize, try slice_val.sliceLen(sema));
break :len try block.addTyOp(.slice_len, Type.usize, slice);
};
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
@@ -29115,7 +29162,7 @@ fn coerceExtra(
// comptime-known integer to other number
if (!(try sema.intFitsInType(val, dest_ty, null))) {
if (!opts.report_err) return error.NotCoercible;
return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) });
return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(mod), val.fmtValue(mod) });
}
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => try mod.undefRef(dest_ty),
@@ -29160,7 +29207,7 @@ fn coerceExtra(
block,
inst_src,
"type '{}' cannot represent float value '{}'",
.{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) },
.{ dest_ty.fmt(mod), val.fmtValue(mod) },
);
}
return Air.internedToRef(result_val.toIntern());
@@ -29531,11 +29578,11 @@ const InMemoryCoercionResult = union(enum) {
.array_sentinel => |sentinel| {
if (sentinel.actual.toIntern() != .unreachable_value) {
try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{
sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod),
sentinel.actual.fmtValue(mod), sentinel.wanted.fmtValue(mod),
});
} else {
try sema.errNote(block, src, msg, "destination array requires '{}' sentinel", .{
sentinel.wanted.fmtValue(sentinel.ty, mod),
sentinel.wanted.fmtValue(mod),
});
}
break;
@@ -29657,11 +29704,11 @@ const InMemoryCoercionResult = union(enum) {
.ptr_sentinel => |sentinel| {
if (sentinel.actual.toIntern() != .unreachable_value) {
try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{
sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod),
sentinel.actual.fmtValue(mod), sentinel.wanted.fmtValue(mod),
});
} else {
try sema.errNote(block, src, msg, "destination pointer requires '{}' sentinel", .{
sentinel.wanted.fmtValue(sentinel.ty, mod),
sentinel.wanted.fmtValue(mod),
});
}
break;
@@ -30654,21 +30701,22 @@ fn storePtrVal(
.opv => {},
.direct => |val_ptr| {
if (mut_kit.root == .comptime_field) {
val_ptr.* = Value.fromInterned((try val_ptr.intern(operand_ty, mod)));
if (!operand_val.eql(val_ptr.*, operand_ty, mod)) {
val_ptr.* = .{ .interned = try val_ptr.intern(mod, sema.arena) };
if (operand_val.toIntern() != val_ptr.interned) {
// TODO use failWithInvalidComptimeFieldStore
return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{});
}
return;
}
val_ptr.* = Value.fromInterned((try operand_val.intern(operand_ty, mod)));
val_ptr.* = .{ .interned = operand_val.toIntern() };
},
.reinterpret => |reinterpret| {
try sema.resolveTypeLayout(mut_kit.ty);
const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod));
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, mod, buffer) catch |err| switch (err) {
const interned_old = Value.fromInterned(try reinterpret.val_ptr.intern(mod, sema.arena));
interned_old.writeToMemory(mut_kit.ty, mod, buffer) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReinterpretDeclRef => unreachable,
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
@@ -30692,7 +30740,7 @@ fn storePtrVal(
error.IllDefinedMemoryLayout => unreachable,
error.Unimplemented => return sema.fail(block, src, "TODO: implement readFromMemory for type '{}'", .{mut_kit.ty.fmt(mod)}),
};
reinterpret.val_ptr.* = Value.fromInterned((try val.intern(mut_kit.ty, mod)));
reinterpret.val_ptr.* = .{ .interned = val.toIntern() };
},
.bad_decl_ty, .bad_ptr_ty => {
// TODO show the decl declaration site in a note and explain whether the decl
@@ -30717,11 +30765,11 @@ const ComptimePtrMutationKit = struct {
opv,
/// The pointer type matches the actual comptime Value so a direct
/// modification is possible.
direct: *Value,
direct: *MutableValue,
/// The largest parent Value containing pointee and having a well-defined memory layout.
/// This is used for bitcasting, if direct dereferencing failed.
reinterpret: struct {
val_ptr: *Value,
val_ptr: *MutableValue,
byte_offset: usize,
/// If set, write the operand to packed memory
write_packed: bool = false,
@@ -30753,15 +30801,15 @@ fn beginComptimePtrMutation(
.decl, .anon_decl, .int => unreachable, // isComptimeMutablePtr has been checked already
.comptime_alloc => |alloc_index| {
const alloc = sema.getComptimeAlloc(alloc_index);
return sema.beginComptimePtrMutationInner(block, src, alloc.ty, &alloc.val, ptr_elem_ty, .{ .alloc = alloc_index });
return sema.beginComptimePtrMutationInner(block, src, alloc.val.typeOf(mod), &alloc.val, ptr_elem_ty, .{ .alloc = alloc_index });
},
.comptime_field => |comptime_field| {
const duped = try sema.arena.create(Value);
duped.* = Value.fromInterned(comptime_field);
const duped = try sema.arena.create(MutableValue);
duped.* = .{ .interned = comptime_field };
return sema.beginComptimePtrMutationInner(
block,
src,
Type.fromInterned(mod.intern_pool.typeOf(comptime_field)),
duped.typeOf(mod),
duped,
ptr_elem_ty,
.comptime_field,
@@ -30774,36 +30822,28 @@ fn beginComptimePtrMutation(
.opv => unreachable,
.direct => |val_ptr| {
const payload_ty = parent.ty.errorUnionPayload(mod);
if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) {
return ComptimePtrMutationKit{
.root = parent.root,
.pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data },
.ty = payload_ty,
};
} else {
try val_ptr.unintern(mod, sema.arena, false, false);
if (val_ptr.* == .interned) {
// An error union has been initialized to undefined at comptime and now we
// are for the first time setting the payload. We must change the
// representation of the error union from `undef` to `opt_payload`.
 
const payload = try sema.arena.create(Value.Payload.SubValue);
payload.* = .{
.base = .{ .tag = .eu_payload },
.data = Value.fromInterned((try mod.intern(.{ .undef = payload_ty.toIntern() }))),
};
 
val_ptr.* = Value.initPayload(&payload.base);
 
return ComptimePtrMutationKit{
.root = parent.root,
.pointee = .{ .direct = &payload.data },
.ty = payload_ty,
};
// representation of the error union to `eu_payload`.
const child = try sema.arena.create(MutableValue);
child.* = .{ .interned = try mod.intern(.{ .undef = payload_ty.toIntern() }) };
val_ptr.* = .{ .eu_payload = .{
.ty = parent.ty.toIntern(),
.child = child,
} };
}
return .{
.root = parent.root,
.pointee = .{ .direct = val_ptr.eu_payload.child },
.ty = payload_ty,
};
},
.bad_decl_ty, .bad_ptr_ty => return parent,
// Even though the parent value type has well-defined memory layout, our
// pointer type does not.
.reinterpret => return ComptimePtrMutationKit{
.reinterpret => return .{
.root = parent.root,
.pointee = .bad_ptr_ty,
.ty = eu_ty,
@@ -30817,46 +30857,28 @@ fn beginComptimePtrMutation(
.opv => unreachable,
.direct => |val_ptr| {
const payload_ty = parent.ty.optionalChild(mod);
switch (val_ptr.ip_index) {
.none => return ComptimePtrMutationKit{
.root = parent.root,
.pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data },
.ty = payload_ty,
},
else => {
const payload_val = switch (mod.intern_pool.indexToKey(val_ptr.ip_index)) {
.undef => try mod.intern(.{ .undef = payload_ty.toIntern() }),
.opt => |opt| switch (opt.val) {
.none => try mod.intern(.{ .undef = payload_ty.toIntern() }),
else => |payload| payload,
},
else => unreachable,
};
 
// An optional has been initialized to undefined at comptime and now we
// are for the first time setting the payload. We must change the
// representation of the optional from `undef` to `opt_payload`.
 
const payload = try sema.arena.create(Value.Payload.SubValue);
payload.* = .{
.base = .{ .tag = .opt_payload },
.data = Value.fromInterned(payload_val),
};
 
val_ptr.* = Value.initPayload(&payload.base);
 
return ComptimePtrMutationKit{
.root = parent.root,
.pointee = .{ .direct = &payload.data },
.ty = payload_ty,
};
},
try val_ptr.unintern(mod, sema.arena, false, false);
if (val_ptr.* == .interned) {
// An optional has been initialized to undefined at comptime and now we
// are for the first time setting the payload. We must change the
// representation of the optional to `opt_payload`.
const child = try sema.arena.create(MutableValue);
child.* = .{ .interned = try mod.intern(.{ .undef = payload_ty.toIntern() }) };
val_ptr.* = .{ .opt_payload = .{
.ty = parent.ty.toIntern(),
.child = child,
} };
}
return .{
.root = parent.root,
.pointee = .{ .direct = val_ptr.opt_payload.child },
.ty = payload_ty,
};
},
.bad_decl_ty, .bad_ptr_ty => return parent,
// Even though the parent value type has well-defined memory layout, our
// pointer type does not.
.reinterpret => return ComptimePtrMutationKit{
.reinterpret => return .{
.root = parent.root,
.pointee = .bad_ptr_ty,
.ty = opt_ty,
@@ -30915,106 +30937,28 @@ fn beginComptimePtrMutation(
};
}
 
switch (val_ptr.ip_index) {
.none => switch (val_ptr.tag()) {
.bytes => {
// An array is memory-optimized to store a slice of bytes, but we are about
// to modify an individual field and the representation has to change.
// If we wanted to avoid this, there would need to be special detection
// elsewhere to identify when writing a value to an array element that is stored
// using the `bytes` tag, and handle it without making a call to this function.
const arena = sema.arena;
try val_ptr.unintern(mod, sema.arena, false, false);
 
const bytes = val_ptr.castTag(.bytes).?.data;
const dest_len = parent.ty.arrayLenIncludingSentinel(mod);
// bytes.len may be one greater than dest_len because of the case when
// assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted.
assert(bytes.len >= dest_len);
const elems = try arena.alloc(Value, @intCast(dest_len));
for (elems, 0..) |*elem, i| {
elem.* = try mod.intValue(elem_ty, bytes[i]);
}
const aggregate = switch (val_ptr.*) {
.interned,
.bytes,
.repeated,
.eu_payload,
.opt_payload,
.slice,
.un,
=> unreachable,
.aggregate => |*a| a,
};
 
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
 
return beginComptimePtrMutationInner(
sema,
block,
src,
elem_ty,
&elems[@intCast(elem_ptr.index)],
ptr_elem_ty,
parent.root,
);
},
.repeated => {
// An array is memory-optimized to store only a single element value, and
// that value is understood to be the same for the entire length of the array.
// However, now we want to modify an individual field and so the
// representation has to change. If we wanted to avoid this, there would
// need to be special detection elsewhere to identify when writing a value to an
// array element that is stored using the `repeated` tag, and handle it
// without making a call to this function.
const arena = sema.arena;
 
const repeated_val = try val_ptr.castTag(.repeated).?.data.intern(parent.ty.childType(mod), mod);
const array_len_including_sentinel =
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
const elems = try arena.alloc(Value, array_len_including_sentinel);
@memset(elems, Value.fromInterned(repeated_val));
 
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
 
return beginComptimePtrMutationInner(
sema,
block,
src,
elem_ty,
&elems[@intCast(elem_ptr.index)],
ptr_elem_ty,
parent.root,
);
},
 
.aggregate => return beginComptimePtrMutationInner(
sema,
block,
src,
elem_ty,
&val_ptr.castTag(.aggregate).?.data[@intCast(elem_ptr.index)],
ptr_elem_ty,
parent.root,
),
 
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) {
.undef => {
// An array has been initialized to undefined at comptime and now we
// are for the first time setting an element. We must change the representation
// of the array from `undef` to `array`.
const arena = sema.arena;
 
const array_len_including_sentinel =
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
const elems = try arena.alloc(Value, array_len_including_sentinel);
@memset(elems, Value.fromInterned((try mod.intern(.{ .undef = elem_ty.toIntern() }))));
 
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
 
return beginComptimePtrMutationInner(
sema,
block,
src,
elem_ty,
&elems[@intCast(elem_ptr.index)],
ptr_elem_ty,
parent.root,
);
},
else => unreachable,
},
}
return sema.beginComptimePtrMutationInner(
block,
src,
elem_ty,
&aggregate.elems[@intCast(elem_ptr.index)],
ptr_elem_ty,
parent.root,
);
},
else => {
if (elem_ptr.index != 0) {
@@ -31038,7 +30982,7 @@ fn beginComptimePtrMutation(
if (!base_elem_ty.hasWellDefinedLayout(mod)) {
// Even though the parent value type has well-defined memory layout, our
// pointer type does not.
return ComptimePtrMutationKit{
return .{
.root = parent.root,
.pointee = .bad_ptr_ty,
.ty = base_elem_ty,
@@ -31048,7 +30992,7 @@ fn beginComptimePtrMutation(
const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty);
const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64);
const elem_idx = try sema.usizeCast(block, src, elem_ptr.index);
return ComptimePtrMutationKit{
return .{
.root = parent.root,
.pointee = .{ .reinterpret = .{
.val_ptr = reinterpret.val_ptr,
@@ -31067,56 +31011,68 @@ fn beginComptimePtrMutation(
var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(field_ptr.base), base_child_ty);
switch (parent.pointee) {
.opv => unreachable,
.direct => |val_ptr| switch (val_ptr.ip_index) {
.empty_struct => {
const duped = try sema.arena.create(Value);
duped.* = val_ptr.*;
return beginComptimePtrMutationInner(
sema,
.direct => |val_ptr| {
try val_ptr.unintern(mod, sema.arena, false, false);
switch (val_ptr.*) {
.interned,
.eu_payload,
.opt_payload,
.repeated,
.bytes,
=> unreachable,
.aggregate => |*a| return sema.beginComptimePtrMutationInner(
block,
src,
parent.ty.structFieldType(field_index, mod),
duped,
ptr_elem_ty,
parent.root,
);
},
.none => switch (val_ptr.tag()) {
.aggregate => return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.structFieldType(field_index, mod),
&val_ptr.castTag(.aggregate).?.data[field_index],
&a.elems[field_index],
ptr_elem_ty,
parent.root,
),
.repeated => {
const arena = sema.arena;
 
const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod));
@memset(elems, val_ptr.castTag(.repeated).?.data);
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
 
return beginComptimePtrMutationInner(
sema,
.slice => |*s| switch (field_index) {
Value.slice_ptr_index => return sema.beginComptimePtrMutationInner(
block,
src,
parent.ty.structFieldType(field_index, mod),
&elems[field_index],
parent.ty.slicePtrFieldType(mod),
s.ptr,
ptr_elem_ty,
parent.root,
);
),
Value.slice_len_index => return sema.beginComptimePtrMutationInner(
block,
src,
Type.usize,
s.len,
ptr_elem_ty,
parent.root,
),
else => unreachable,
},
.@"union" => {
const payload = &val_ptr.castTag(.@"union").?.data;
.un => |*un| {
const layout = base_child_ty.containerLayout(mod);
 
const tag_type = base_child_ty.unionTagTypeHypothetical(mod);
const hypothetical_tag = try mod.enumValueFieldIndex(tag_type, field_index);
if (layout == .auto or (payload.tag != null and hypothetical_tag.eql(payload.tag.?, tag_type, mod))) {
if (un.tag == .none and un.payload.* == .interned and un.payload.interned == .undef) {
// A union has been initialized to undefined at comptime and now we
// are for the first time setting the payload. We must change the
// tag implicitly.
const payload_ty = parent.ty.structFieldType(field_index, mod);
un.tag = hypothetical_tag.toIntern();
un.payload.* = .{ .interned = try mod.intern(.{ .undef = payload_ty.toIntern() }) };
return beginComptimePtrMutationInner(
sema,
block,
src,
payload_ty,
un.payload,
ptr_elem_ty,
parent.root,
);
}
 
if (layout == .auto or hypothetical_tag.toIntern() == un.tag) {
// We need to set the active field of the union.
payload.tag = hypothetical_tag;
un.tag = hypothetical_tag.toIntern();
 
const field_ty = parent.ty.structFieldType(field_index, mod);
return beginComptimePtrMutationInner(
@@ -31124,7 +31080,7 @@ fn beginComptimePtrMutation(
block,
src,
field_ty,
&payload.val,
un.payload,
ptr_elem_ty,
parent.root,
);
@@ -31132,11 +31088,10 @@ fn beginComptimePtrMutation(
// Writing to a different field (a different or unknown tag is active) requires reinterpreting
// memory of the entire union, which requires knowing its abiSize.
try sema.resolveTypeLayout(parent.ty);
 
// This union value no longer has a well-defined tag type.
// The reinterpretation will read it back out as .none.
payload.val = try payload.val.unintern(sema.arena, mod);
return ComptimePtrMutationKit{
try un.payload.unintern(mod, sema.arena, false, false);
return .{
.root = parent.root,
.pointee = .{ .reinterpret = .{
.val_ptr = val_ptr,
@@ -31147,119 +31102,12 @@ fn beginComptimePtrMutation(
};
}
},
.slice => switch (field_index) {
Value.slice_ptr_index => return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.slicePtrFieldType(mod),
&val_ptr.castTag(.slice).?.data.ptr,
ptr_elem_ty,
parent.root,
),
 
Value.slice_len_index => return beginComptimePtrMutationInner(
sema,
block,
src,
Type.usize,
&val_ptr.castTag(.slice).?.data.len,
ptr_elem_ty,
parent.root,
),
 
else => unreachable,
},
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) {
.undef => {
// A struct or union has been initialized to undefined at comptime and now we
// are for the first time setting a field. We must change the representation
// of the struct/union from `undef` to `struct`/`union`.
const arena = sema.arena;
 
switch (parent.ty.zigTypeTag(mod)) {
.Struct => {
const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod));
for (fields, 0..) |*field, i| field.* = Value.fromInterned((try mod.intern(.{
.undef = parent.ty.structFieldType(i, mod).toIntern(),
})));
 
val_ptr.* = try Value.Tag.aggregate.create(arena, fields);
 
return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.structFieldType(field_index, mod),
&fields[field_index],
ptr_elem_ty,
parent.root,
);
},
.Union => {
const payload = try arena.create(Value.Payload.Union);
const tag_ty = parent.ty.unionTagTypeHypothetical(mod);
const payload_ty = parent.ty.structFieldType(field_index, mod);
payload.* = .{ .data = .{
.tag = try mod.enumValueFieldIndex(tag_ty, field_index),
.val = Value.fromInterned((try mod.intern(.{ .undef = payload_ty.toIntern() }))),
} };
 
val_ptr.* = Value.initPayload(&payload.base);
 
return beginComptimePtrMutationInner(
sema,
block,
src,
payload_ty,
&payload.data.val,
ptr_elem_ty,
parent.root,
);
},
.Pointer => {
assert(parent.ty.isSlice(mod));
const ptr_ty = parent.ty.slicePtrFieldType(mod);
val_ptr.* = try Value.Tag.slice.create(arena, .{
.ptr = Value.fromInterned((try mod.intern(.{ .undef = ptr_ty.toIntern() }))),
.len = Value.fromInterned((try mod.intern(.{ .undef = .usize_type }))),
});
 
switch (field_index) {
Value.slice_ptr_index => return beginComptimePtrMutationInner(
sema,
block,
src,
ptr_ty,
&val_ptr.castTag(.slice).?.data.ptr,
ptr_elem_ty,
parent.root,
),
Value.slice_len_index => return beginComptimePtrMutationInner(
sema,
block,
src,
Type.usize,
&val_ptr.castTag(.slice).?.data.len,
ptr_elem_ty,
parent.root,
),
 
else => unreachable,
}
},
else => unreachable,
}
},
else => unreachable,
},
}
},
.reinterpret => |reinterpret| {
const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod);
const field_offset = try sema.usizeCast(block, src, field_offset_u64);
return ComptimePtrMutationKit{
return .{
.root = parent.root,
.pointee = .{ .reinterpret = .{
.val_ptr = reinterpret.val_ptr,
@@ -31279,7 +31127,7 @@ fn beginComptimePtrMutationInner(
block: *Block,
src: LazySrcLoc,
decl_ty: Type,
decl_val: *Value,
decl_val: *MutableValue,
ptr_elem_ty: Type,
root: ComptimePtrMutationKit.Root,
) CompileError!ComptimePtrMutationKit {
@@ -31287,7 +31135,13 @@ fn beginComptimePtrMutationInner(
const target = mod.getTarget();
const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok;
 
decl_val.* = try decl_val.unintern(sema.arena, mod);
const old_decl_val = decl_val.*;
try decl_val.unintern(mod, sema.arena, false, false);
if (decl_val.* == .un and decl_val.un.tag == .none and decl_val.un.payload.* == .interned and decl_val.un.payload.interned == .undef) {
// HACKHACK: undefined union - re-intern it for now
// `unintern` probably should just leave these as is, but I'm leaving it until I rewrite comptime pointer access.
decl_val.* = old_decl_val;
}
 
if (coerce_ok) {
return ComptimePtrMutationKit{
@@ -31333,21 +31187,16 @@ fn beginComptimePtrMutationInner(
};
}
 
const TypedValueAndOffset = struct {
tv: TypedValue,
byte_offset: usize,
};
 
const ComptimePtrLoadKit = struct {
/// The Value and Type corresponding to the pointee of the provided pointer.
/// If a direct dereference is not possible, this is null.
pointee: ?TypedValue,
pointee: ?MutableValue,
/// The largest parent Value containing `pointee` and having a well-defined memory layout.
/// This is used for bitcasting, if direct dereferencing failed (i.e. `pointee` is null).
parent: ?TypedValueAndOffset,
/// Whether the `pointee` could be mutated by further
/// semantic analysis and a copy must be performed.
is_mutable: bool,
parent: ?struct {
val: MutableValue,
byte_offset: usize,
},
/// If the root decl could not be used as `parent`, this is the type that
/// caused that by not having a well-defined layout
ty_without_well_defined_layout: ?Type,
@@ -31374,53 +31223,41 @@ fn beginComptimePtrLoad(
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl_index| blk: {
const decl = mod.declPtr(decl_index);
const decl_tv = try decl.typedValue();
try sema.declareDependency(.{ .decl_val = decl_index });
if (decl.val.getVariable(mod) != null) return error.RuntimeLoad;
 
const layout_defined = decl.ty.hasWellDefinedLayout(mod);
const decl_val: MutableValue = .{ .interned = decl.val.toIntern() };
const layout_defined = decl.typeOf(mod).hasWellDefinedLayout(mod);
break :blk ComptimePtrLoadKit{
.parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
.pointee = decl_tv,
.is_mutable = false,
.ty_without_well_defined_layout = if (!layout_defined) decl.ty else null,
.parent = if (layout_defined) .{ .val = decl_val, .byte_offset = 0 } else null,
.pointee = decl_val,
.ty_without_well_defined_layout = if (!layout_defined) decl.typeOf(mod) else null,
};
},
.comptime_alloc => |alloc_index| kit: {
const alloc = sema.getComptimeAlloc(alloc_index);
const alloc_tv: TypedValue = .{
.ty = alloc.ty,
.val = alloc.val,
};
const layout_defined = alloc.ty.hasWellDefinedLayout(mod);
const alloc_ty = alloc.val.typeOf(mod);
const layout_defined = alloc_ty.hasWellDefinedLayout(mod);
break :kit .{
.parent = if (layout_defined) .{ .tv = alloc_tv, .byte_offset = 0 } else null,
.pointee = alloc_tv,
.is_mutable = true,
.ty_without_well_defined_layout = if (!layout_defined) alloc.ty else null,
.parent = if (layout_defined) .{ .val = alloc.val, .byte_offset = 0 } else null,
.pointee = alloc.val,
.ty_without_well_defined_layout = if (!layout_defined) alloc_ty else null,
};
},
.anon_decl => |anon_decl| blk: {
const decl_val = anon_decl.val;
if (Value.fromInterned(decl_val).getVariable(mod) != null) return error.RuntimeLoad;
const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
const decl_tv: TypedValue = .{ .ty = decl_ty, .val = Value.fromInterned(decl_val) };
const decl_mv: MutableValue = .{ .interned = decl_val };
const layout_defined = decl_ty.hasWellDefinedLayout(mod);
break :blk ComptimePtrLoadKit{
.parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
.pointee = decl_tv,
.is_mutable = false,
.parent = if (layout_defined) .{ .val = decl_mv, .byte_offset = 0 } else null,
.pointee = decl_mv,
.ty_without_well_defined_layout = if (!layout_defined) decl_ty else null,
};
},
.int => return error.RuntimeLoad,
.eu_payload, .opt_payload => |container_ptr| blk: {
const container_ty = Type.fromInterned(ip.typeOf(container_ptr)).childType(mod);
const payload_ty = switch (ptr.addr) {
.eu_payload => container_ty.errorUnionPayload(mod),
.opt_payload => container_ty.optionalChild(mod),
else => unreachable,
};
var deref = try sema.beginComptimePtrLoad(block, src, Value.fromInterned(container_ptr), container_ty);
 
// eu_payload and opt_payload never have a well-defined layout
@@ -31429,15 +31266,14 @@ fn beginComptimePtrLoad(
deref.ty_without_well_defined_layout = container_ty;
}
 
if (deref.pointee) |*tv| {
if (deref.pointee) |pointee| {
const pointee_ty = pointee.typeOf(mod);
const coerce_in_mem_ok =
(try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
(try sema.coerceInMemoryAllowed(block, container_ty, pointee_ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, pointee_ty, container_ty, false, target, src, src)) == .ok;
if (coerce_in_mem_ok) {
const payload_val = switch (tv.val.ip_index) {
.none => tv.val.cast(Value.Payload.SubValue).?.data,
.null_value => return sema.fail(block, src, "attempt to use null value", .{}),
else => Value.fromInterned(switch (ip.indexToKey(tv.val.toIntern())) {
deref.pointee = switch (pointee) {
.interned => |ip_index| .{ .interned = switch (ip.indexToKey(ip_index)) {
.error_union => |error_union| switch (error_union.val) {
.err_name => |err_name| return sema.fail(
block,
@@ -31452,23 +31288,20 @@ fn beginComptimePtrLoad(
else => |payload| payload,
},
else => unreachable,
}),
} },
.eu_payload, .opt_payload => |p| p.child.*,
else => unreachable,
};
tv.* = TypedValue{ .ty = payload_ty, .val = payload_val };
break :blk deref;
}
}
deref.pointee = null;
break :blk deref;
},
.comptime_field => |comptime_field| blk: {
const field_ty = Type.fromInterned(ip.typeOf(comptime_field));
break :blk ComptimePtrLoadKit{
.parent = null,
.pointee = .{ .ty = field_ty, .val = Value.fromInterned(comptime_field) },
.is_mutable = false,
.ty_without_well_defined_layout = field_ty,
};
.comptime_field => |field_val| .{
.parent = null,
.pointee = .{ .interned = field_val },
.ty_without_well_defined_layout = Type.fromInterned(ip.typeOf(field_val)),
},
.elem => |elem_ptr| blk: {
const elem_ty = Type.fromInterned(ip.typeOf(elem_ptr.base)).elemType2(mod);
@@ -31501,30 +31334,37 @@ fn beginComptimePtrLoad(
 
// If we're loading an elem that was derived from a different type
// than the true type of the underlying decl, we cannot deref directly
const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: {
const deref_elem_ty = deref.pointee.?.ty.childType(mod);
break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok;
const ty_matches = if (deref.pointee) |pointee| match: {
const ty = pointee.typeOf(mod);
if (!ty.isArrayOrVector(mod)) break :match false;
const deref_elem_ty = ty.childType(mod);
if ((try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok) break :match true;
if ((try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok) break :match true;
break :match false;
} else false;
if (!ty_matches) {
deref.pointee = null;
break :blk deref;
}
 
var array_tv = deref.pointee.?;
const check_len = array_tv.ty.arrayLenIncludingSentinel(mod);
var array_val = deref.pointee.?;
const check_len = array_val.typeOf(mod).arrayLenIncludingSentinel(mod);
if (maybe_array_ty) |load_ty| {
// It's possible that we're loading a [N]T, in which case we'd like to slice
// the pointee array directly from our parent array.
if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) {
const len = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod));
const elem_idx = try sema.usizeCast(block, src, elem_ptr.index);
deref.pointee = if (elem_ptr.index + len <= check_len) TypedValue{
.ty = try mod.arrayType(.{
.len = len,
.child = elem_ty.toIntern(),
}),
.val = try array_tv.val.sliceArray(sema, elem_idx, elem_idx + len),
deref.pointee = if (elem_ptr.index + len <= check_len) switch (array_val) {
.aggregate => |a| .{ .aggregate = .{
.ty = (try mod.arrayType(.{ .len = len, .child = elem_ty.toIntern() })).toIntern(),
.elems = a.elems[elem_idx..][0..len],
} },
else => .{
.interned = (try (Value.fromInterned(
try array_val.intern(mod, sema.arena),
).sliceArray(sema, elem_idx, elem_idx + len))).toIntern(),
},
} else null;
break :blk deref;
}
@@ -31535,18 +31375,12 @@ fn beginComptimePtrLoad(
break :blk deref;
}
if (elem_ptr.index == check_len - 1) {
if (array_tv.ty.sentinel(mod)) |sent| {
deref.pointee = TypedValue{
.ty = elem_ty,
.val = sent,
};
if (array_val.typeOf(mod).sentinel(mod)) |sent| {
deref.pointee = .{ .interned = sent.toIntern() };
break :blk deref;
}
}
deref.pointee = TypedValue{
.ty = elem_ty,
.val = try array_tv.val.elemValue(mod, @intCast(elem_ptr.index)),
};
deref.pointee = try array_val.getElem(mod, @intCast(elem_ptr.index));
break :blk deref;
},
.field => |field_ptr| blk: {
@@ -31570,37 +31404,17 @@ fn beginComptimePtrLoad(
deref.ty_without_well_defined_layout = container_ty;
}
 
const tv = deref.pointee orelse {
deref.pointee = null;
break :blk deref;
};
const pointee = deref.pointee orelse break :blk deref;
const pointee_ty = pointee.typeOf(mod);
const coerce_in_mem_ok =
(try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
(try sema.coerceInMemoryAllowed(block, container_ty, pointee_ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, pointee_ty, container_ty, false, target, src, src)) == .ok;
if (!coerce_in_mem_ok) {
deref.pointee = null;
break :blk deref;
}
 
if (container_ty.isSlice(mod)) {
deref.pointee = switch (field_index) {
Value.slice_ptr_index => TypedValue{
.ty = container_ty.slicePtrFieldType(mod),
.val = tv.val.slicePtr(mod),
},
Value.slice_len_index => TypedValue{
.ty = Type.usize,
.val = Value.fromInterned(ip.indexToKey(try tv.val.intern(tv.ty, mod)).slice.len),
},
else => unreachable,
};
} else {
const field_ty = container_ty.structFieldType(field_index, mod);
deref.pointee = TypedValue{
.ty = field_ty,
.val = try tv.val.fieldValue(mod, field_index),
};
}
deref.pointee = try pointee.getElem(mod, field_index);
break :blk deref;
},
},
@@ -31611,9 +31425,9 @@ fn beginComptimePtrLoad(
else => unreachable,
};
 
if (deref.pointee) |tv| {
if (deref.parent == null and tv.ty.hasWellDefinedLayout(mod)) {
deref.parent = .{ .tv = tv, .byte_offset = 0 };
if (deref.pointee) |val| {
if (deref.parent == null and val.typeOf(mod).hasWellDefinedLayout(mod)) {
deref.parent = .{ .val = val, .byte_offset = 0 };
}
}
return deref;
@@ -31760,16 +31574,11 @@ fn coerceArrayPtrToSlice(
if (try sema.resolveValue(inst)) |val| {
const ptr_array_ty = sema.typeOf(inst);
const array_ty = ptr_array_ty.childType(mod);
const slice_ptr_ty = dest_ty.slicePtrFieldType(mod);
const slice_ptr = try mod.getCoerced(val, slice_ptr_ty);
const slice_val = try mod.intern(.{ .slice = .{
.ty = dest_ty.toIntern(),
.ptr = try mod.intern(.{ .ptr = .{
.ty = dest_ty.slicePtrFieldType(mod).toIntern(),
.addr = switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => .{ .int = try mod.intern(.{ .undef = .usize_type }) },
.ptr => |ptr| ptr.addr,
else => unreachable,
},
} }),
.ptr = slice_ptr.toIntern(),
.len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(),
} });
return Air.internedToRef(slice_val);
@@ -31844,7 +31653,7 @@ fn coerceCompatiblePtrs(
}
// The comptime Value representation is compatible with both types.
return Air.internedToRef(
(try mod.getCoerced(Value.fromInterned((try val.intern(inst_ty, mod))), dest_ty)).toIntern(),
(try mod.getCoerced(val, dest_ty)).toIntern(),
);
}
try sema.requireRuntimeBlock(block, inst_src, null);
@@ -31899,7 +31708,7 @@ fn coerceEnumToUnion(
if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| {
const field_index = union_ty.unionTagFieldIndex(val, sema.mod) orelse {
return sema.fail(block, inst_src, "union '{}' has no tag with value '{}'", .{
union_ty.fmt(sema.mod), val.fmtValue(tag_ty, sema.mod),
union_ty.fmt(sema.mod), val.fmtValue(sema.mod),
});
};
 
@@ -32181,7 +31990,7 @@ fn coerceArrayLike(
ref.* = coerced;
if (runtime_src == null) {
if (try sema.resolveValue(coerced)) |elem_val| {
val.* = try elem_val.intern(dest_elem_ty, mod);
val.* = elem_val.toIntern();
} else {
runtime_src = elem_src;
}
@@ -32246,7 +32055,7 @@ fn coerceTupleToArray(
ref.* = coerced;
if (runtime_src == null) {
if (try sema.resolveValue(coerced)) |elem_val| {
val.* = try elem_val.intern(dest_elem_ty, mod);
val.* = elem_val.toIntern();
} else {
runtime_src = elem_src;
}
@@ -32650,7 +32459,7 @@ fn optRefValue(sema: *Sema, opt_val: ?Value) !Value {
return Value.fromInterned((try mod.intern(.{ .opt = .{
.ty = (try mod.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(),
.val = if (opt_val) |val| (try mod.getCoerced(
Value.fromInterned((try sema.refValue(val.toIntern()))),
Value.fromInterned(try sema.refValue(val.toIntern())),
ptr_anyopaque_ty,
)).toIntern() else .none,
} })));
@@ -32668,8 +32477,8 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn
const mod = sema.mod;
try sema.ensureDeclAnalyzed(decl_index);
 
const decl_tv = try mod.declPtr(decl_index).typedValue();
const owner_decl = mod.declPtr(switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) {
const decl_val = try mod.declPtr(decl_index).valueOrFail();
const owner_decl = mod.declPtr(switch (mod.intern_pool.indexToKey(decl_val.toIntern())) {
.variable => |variable| variable.decl,
.extern_func => |extern_func| extern_func.decl,
.func => |func| func.owner_decl,
@@ -32678,10 +32487,10 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn
// TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type
try sema.declareDependency(.{ .decl_val = decl_index });
const ptr_ty = try sema.ptrType(.{
.child = decl_tv.ty.toIntern(),
.child = decl_val.typeOf(mod).toIntern(),
.flags = .{
.alignment = owner_decl.alignment,
.is_const = if (decl_tv.val.getVariable(mod)) |variable| variable.is_const else true,
.is_const = if (decl_val.getVariable(mod)) |variable| variable.is_const else true,
.address_space = owner_decl.@"addrspace",
},
});
@@ -32697,12 +32506,10 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn
fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: InternPool.DeclIndex) !void {
const mod = sema.mod;
const decl = mod.declPtr(decl_index);
const tv = try decl.typedValue();
if (tv.ty.zigTypeTag(mod) != .Fn) return;
if (!try sema.fnHasRuntimeBits(tv.ty)) return;
const func_index = tv.val.toIntern();
if (!mod.intern_pool.isFuncBody(func_index)) return; // undef or extern function
try mod.ensureFuncBodyAnalysisQueued(func_index);
const decl_val = try decl.valueOrFail();
if (!mod.intern_pool.isFuncBody(decl_val.toIntern())) return;
if (!try sema.fnHasRuntimeBits(decl_val.typeOf(mod))) return;
try mod.ensureFuncBodyAnalysisQueued(decl_val.toIntern());
}
 
fn analyzeRef(
@@ -32839,7 +32646,7 @@ fn analyzeSliceLen(
if (slice_val.isUndef(mod)) {
return mod.undefRef(Type.usize);
}
return mod.intRef(Type.usize, slice_val.sliceLen(sema.mod));
return mod.intRef(Type.usize, try slice_val.sliceLen(sema));
}
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.slice_len, Type.usize, slice_inst);
@@ -33121,8 +32928,8 @@ fn analyzeSlice(
msg,
"expected '{}', found '{}'",
.{
Value.zero_comptime_int.fmtValue(Type.comptime_int, mod),
start_value.fmtValue(Type.comptime_int, mod),
Value.zero_comptime_int.fmtValue(mod),
start_value.fmtValue(mod),
},
);
break :msg msg;
@@ -33138,8 +32945,8 @@ fn analyzeSlice(
msg,
"expected '{}', found '{}'",
.{
Value.one_comptime_int.fmtValue(Type.comptime_int, mod),
end_value.fmtValue(Type.comptime_int, mod),
Value.one_comptime_int.fmtValue(mod),
end_value.fmtValue(mod),
},
);
break :msg msg;
@@ -33152,7 +32959,7 @@ fn analyzeSlice(
block,
end_src,
"end index {} out of bounds for slice of single-item pointer",
.{end_value.fmtValue(Type.comptime_int, mod)},
.{end_value.fmtValue(mod)},
);
}
}
@@ -33247,8 +33054,8 @@ fn analyzeSlice(
end_src,
"end index {} out of bounds for array of length {}{s}",
.{
end_val.fmtValue(Type.usize, mod),
len_val.fmtValue(Type.usize, mod),
end_val.fmtValue(mod),
len_val.fmtValue(mod),
sentinel_label,
},
);
@@ -33278,7 +33085,7 @@ fn analyzeSlice(
return sema.fail(block, src, "slice of undefined", .{});
}
const has_sentinel = slice_ty.sentinel(mod) != null;
const slice_len = slice_val.sliceLen(mod);
const slice_len = try slice_val.sliceLen(sema);
const len_plus_sent = slice_len + @intFromBool(has_sentinel);
const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent);
if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) {
@@ -33292,8 +33099,8 @@ fn analyzeSlice(
end_src,
"end index {} out of bounds for slice of length {d}{s}",
.{
end_val.fmtValue(Type.usize, mod),
slice_val.sliceLen(mod),
end_val.fmtValue(mod),
try slice_val.sliceLen(sema),
sentinel_label,
},
);
@@ -33352,8 +33159,8 @@ fn analyzeSlice(
start_src,
"start index {} is larger than end index {}",
.{
start_val.fmtValue(Type.usize, mod),
end_val.fmtValue(Type.usize, mod),
start_val.fmtValue(mod),
end_val.fmtValue(mod),
},
);
}
@@ -33391,8 +33198,8 @@ fn analyzeSlice(
const msg = try sema.errMsg(block, src, "value in memory does not match slice sentinel", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "expected '{}', found '{}'", .{
expected_sentinel.fmtValue(elem_ty, mod),
actual_sentinel.fmtValue(elem_ty, mod),
expected_sentinel.fmtValue(mod),
actual_sentinel.fmtValue(mod),
});
 
break :msg msg;
@@ -33483,10 +33290,7 @@ fn analyzeSlice(
};
 
if (!new_ptr_val.isUndef(mod)) {
return Air.internedToRef((try mod.getCoerced(
Value.fromInterned((try new_ptr_val.intern(new_ptr_ty, mod))),
return_ty,
)).toIntern());
return Air.internedToRef((try mod.getCoerced(new_ptr_val, return_ty)).toIntern());
}
 
// Special case: @as([]i32, undefined)[x..x]
@@ -33525,7 +33329,7 @@ fn analyzeSlice(
if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
// we don't need to add one for sentinels because the
// underlying value data includes the sentinel
break :blk try mod.intRef(Type.usize, slice_val.sliceLen(mod));
break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(sema));
}
 
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
@@ -33998,7 +33802,7 @@ fn wrapErrorUnionPayload(
if (try sema.resolveValue(coerced)) |val| {
return Air.internedToRef((try mod.intern(.{ .error_union = .{
.ty = dest_ty.toIntern(),
.val = .{ .payload = try val.intern(dest_payload_ty, mod) },
.val = .{ .payload = val.toIntern() },
} })));
}
try sema.requireRuntimeBlock(block, inst_src, null);
@@ -36611,7 +36415,7 @@ fn resolveInferredErrorSet(
// inferred error sets, each call gets an adhoc InferredErrorSet object, which
// has no corresponding function body.
const ies_func_owner_decl = mod.declPtr(func.owner_decl);
const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?;
const ies_func_info = mod.typeToFunc(ies_func_owner_decl.typeOf(mod)).?;
// if ies declared by a inline function with generic return type, the return_type should be generic_poison,
// because inline function does not create a new declaration, and the ies has been filled with analyzeCall,
// so here we can simply skip this case.
@@ -37174,15 +36978,14 @@ fn semaStructFieldInits(
});
};
 
const field_init = try default_val.intern(field_ty, mod);
if (Value.fromInterned(field_init).canMutateComptimeVarState(mod)) {
if (default_val.canMutateComptimeVarState(mod)) {
const init_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
.range = .value,
}).lazy;
return sema.fail(&block_scope, init_src, "field default value contains reference to comptime-mutable memory", .{});
}
struct_type.field_inits.get(ip)[field_i] = field_init;
struct_type.field_inits.get(ip)[field_i] = default_val.toIntern();
}
}
}
@@ -37410,7 +37213,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded
const field_src = mod.fieldSrcLoc(union_type.decl, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(union_type.decl, .{ .index = gop.index }).lazy;
const msg = msg: {
const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(int_tag_ty, mod)});
const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(mod)});
errdefer msg.destroy(gpa);
try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{});
break :msg msg;
@@ -37607,10 +37410,12 @@ fn generateUnionTagTypeNumbered(
errdefer mod.destroyDecl(new_decl_index);
const fqn = try union_owner_decl.fullyQualifiedName(mod);
const name = try ip.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)});
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
}, name);
try mod.initNewAnonDecl(
new_decl_index,
src_decl.src_line,
Value.@"unreachable",
name,
);
errdefer mod.abortAnonDecl(new_decl_index);
 
const new_decl = mod.declPtr(new_decl_index);
@@ -37629,7 +37434,6 @@ fn generateUnionTagTypeNumbered(
.tag_mode = .explicit,
});
 
new_decl.ty = Type.type;
new_decl.val = Value.fromInterned(enum_ty);
 
try mod.finalizeAnonDecl(new_decl_index);
@@ -37652,10 +37456,12 @@ fn generateUnionTagTypeSimple(
const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node);
errdefer mod.destroyDecl(new_decl_index);
const name = try ip.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)});
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
}, name);
try mod.initNewAnonDecl(
new_decl_index,
src_decl.src_line,
Value.@"unreachable",
name,
);
mod.declPtr(new_decl_index).name_fully_qualified = true;
break :new_decl_index new_decl_index;
};
@@ -37675,7 +37481,6 @@ fn generateUnionTagTypeSimple(
 
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
new_decl.ty = Type.type;
new_decl.val = Value.fromInterned(enum_ty);
 
try mod.finalizeAnonDecl(new_decl_index);
@@ -37991,7 +37796,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
return sema.failWithOwnedErrorMsg(null, msg);
}
if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| {
field_val.* = try field_opv.intern(field_ty, mod);
field_val.* = field_opv.toIntern();
} else return null;
}
 
@@ -38290,17 +38095,18 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value
else => |e| return e,
};
 
if (deref.pointee) |tv| {
if (deref.pointee) |pointee| {
const uncoerced_val = Value.fromInterned(try pointee.intern(mod, sema.arena));
const ty = Type.fromInterned(mod.intern_pool.typeOf(uncoerced_val.toIntern()));
const coerce_in_mem_ok =
(try sema.coerceInMemoryAllowed(block, load_ty, tv.ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, tv.ty, load_ty, false, target, src, src)) == .ok;
(try sema.coerceInMemoryAllowed(block, load_ty, ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, ty, load_ty, false, target, src, src)) == .ok;
if (coerce_in_mem_ok) {
// We have a Value that lines up in virtual memory exactly with what we want to load,
// and it is in-memory coercible to load_ty. It may be returned without modifications.
// Move mutable decl values to the InternPool and assert other decls are already in
// the InternPool.
const uncoerced_val = if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern();
const coerced_val = try mod.getCoerced(Value.fromInterned(uncoerced_val), load_ty);
const coerced_val = try mod.getCoerced(uncoerced_val, load_ty);
return .{ .val = coerced_val };
}
}
@@ -38314,21 +38120,35 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value
const load_sz = try sema.typeAbiSize(load_ty);
 
// Try the smaller bit-cast first, since that's more efficient than using the larger `parent`
if (deref.pointee) |tv| if (load_sz <= try sema.typeAbiSize(tv.ty))
return DerefResult{ .val = (try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0)) orelse return .runtime_load };
if (deref.pointee) |pointee| {
const val_ip_index = try pointee.intern(mod, sema.arena);
const val = Value.fromInterned(val_ip_index);
const ty = Type.fromInterned(mod.intern_pool.typeOf(val_ip_index));
if (load_sz <= try sema.typeAbiSize(ty)) {
return .{ .val = (try sema.bitCastVal(block, src, val, ty, load_ty, 0)) orelse return .runtime_load };
}
}
 
// If that fails, try to bit-cast from the largest parent value with a well-defined layout
if (deref.parent) |parent| if (load_sz + parent.byte_offset <= try sema.typeAbiSize(parent.tv.ty))
return DerefResult{ .val = (try sema.bitCastVal(block, src, parent.tv.val, parent.tv.ty, load_ty, parent.byte_offset)) orelse return .runtime_load };
if (deref.parent) |parent| {
const parent_ip_index = try parent.val.intern(mod, sema.arena);
const parent_val = Value.fromInterned(parent_ip_index);
const parent_ty = Type.fromInterned(mod.intern_pool.typeOf(parent_ip_index));
if (load_sz + parent.byte_offset <= try sema.typeAbiSize(parent_ty)) {
return .{ .val = (try sema.bitCastVal(block, src, parent_val, parent_ty, load_ty, parent.byte_offset)) orelse return .runtime_load };
}
}
 
if (deref.ty_without_well_defined_layout) |bad_ty| {
// We got no parent for bit-casting, or the parent we got was too small. Either way, the problem
// is that some type we encountered when de-referencing does not have a well-defined layout.
return DerefResult{ .needed_well_defined = bad_ty };
return .{ .needed_well_defined = bad_ty };
} else {
// If all encountered types had well-defined layouts, the parent is the root decl and it just
// wasn't big enough for the load.
return DerefResult{ .out_of_bounds = deref.parent.?.tv.ty };
const parent_ip_index = try deref.parent.?.val.intern(mod, sema.arena);
const parent_ty = Type.fromInterned(mod.intern_pool.typeOf(parent_ip_index));
return .{ .out_of_bounds = parent_ty };
}
}
 
@@ -38530,7 +38350,7 @@ fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usi
},
else => |e| return e,
};
scalar.* = try val.intern(scalar_ty, mod);
scalar.* = val.toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -38620,7 +38440,7 @@ fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usi
},
else => |e| return e,
};
scalar.* = try val.intern(scalar_ty, mod);
scalar.* = val.toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -38690,8 +38510,8 @@ fn intSubWithOverflow(
const lhs_elem = try lhs.elemValue(sema.mod, i);
const rhs_elem = try rhs.elemValue(sema.mod, i);
const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty);
of.* = try of_math_result.overflow_bit.intern(Type.u1, mod);
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
of.* = of_math_result.overflow_bit.toIntern();
scalar.* = of_math_result.wrapped_result.toIntern();
}
return Value.OverflowArithmeticResult{
.overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
@@ -38746,19 +38566,17 @@ fn intFromFloat(
) CompileError!Value {
const mod = sema.mod;
if (float_ty.zigTypeTag(mod) == .Vector) {
const elem_ty = float_ty.scalarType(mod);
const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod));
const scalar_ty = int_ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(sema.mod, i);
scalar.* = try (try sema.intFromFloatScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod), mode)).intern(scalar_ty, mod);
scalar.* = (try sema.intFromFloatScalar(block, src, elem_val, int_ty.scalarType(mod), mode)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = int_ty.toIntern(),
.storage = .{ .elems = result_data },
} })));
}
return sema.intFromFloatScalar(block, src, val, float_ty, int_ty, mode);
return sema.intFromFloatScalar(block, src, val, int_ty, mode);
}
 
// float is expected to be finite and non-NaN
@@ -38791,7 +38609,6 @@ fn intFromFloatScalar(
block: *Block,
src: LazySrcLoc,
val: Value,
float_ty: Type,
int_ty: Type,
mode: IntFromFloatMode,
) CompileError!Value {
@@ -38803,7 +38620,7 @@ fn intFromFloatScalar(
block,
src,
"fractional component prevents float value '{}' from coercion to type '{}'",
.{ val.fmtValue(float_ty, mod), int_ty.fmt(mod) },
.{ val.fmtValue(mod), int_ty.fmt(mod) },
);
 
const float = val.toFloat(f128, mod);
@@ -38825,7 +38642,7 @@ fn intFromFloatScalar(
 
if (!(try sema.intFitsInType(cti_result, int_ty, null))) {
return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{
val.fmtValue(float_ty, sema.mod), int_ty.fmt(sema.mod),
val.fmtValue(sema.mod), int_ty.fmt(sema.mod),
});
}
return mod.getCoerced(cti_result, int_ty);
@@ -38944,8 +38761,8 @@ fn intAddWithOverflow(
const lhs_elem = try lhs.elemValue(sema.mod, i);
const rhs_elem = try rhs.elemValue(sema.mod, i);
const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty);
of.* = try of_math_result.overflow_bit.intern(Type.u1, mod);
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
of.* = of_math_result.overflow_bit.toIntern();
scalar.* = of_math_result.wrapped_result.toIntern();
}
return Value.OverflowArithmeticResult{
.overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
@@ -39055,7 +38872,7 @@ fn compareVector(
const lhs_elem = try lhs.elemValue(sema.mod, i);
const rhs_elem = try rhs.elemValue(sema.mod, i);
const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod));
scalar.* = try Value.makeBool(res_bool).intern(Type.bool, mod);
scalar.* = Value.makeBool(res_bool).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(),
@@ -39232,45 +39049,22 @@ fn validateRuntimeValue(sema: *Sema, block: *Block, val_src: LazySrcLoc, val: Ai
}
 
/// Returns true if any value contained in `val` is undefined.
fn anyUndef(sema: *Sema, val: Value) !bool {
fn anyUndef(sema: *Sema, block: *Block, src: LazySrcLoc, val: Value) !bool {
const mod = sema.mod;
if (val.ip_index == .none) return switch (val.tag()) {
.eu_payload => try sema.anyUndef(val.castTag(.eu_payload).?.data),
.opt_payload => try sema.anyUndef(val.castTag(.opt_payload).?.data),
.repeated => try sema.anyUndef(val.castTag(.repeated).?.data),
.slice => {
const slice = val.castTag(.slice).?.data;
for (0..@intCast(slice.len.toUnsignedInt(mod))) |idx| {
if (try sema.anyUndef((try slice.ptr.maybeElemValueFull(sema, mod, idx)).?)) return true;
}
return false;
},
.bytes => false,
.aggregate => for (val.castTag(.aggregate).?.data) |elem| {
if (try sema.anyUndef(elem)) break true;
} else false,
.@"union" => {
const un = val.castTag(.@"union").?.data;
if (un.tag) |t| {
if (try sema.anyUndef(t)) return true;
}
return sema.anyUndef(un.val);
},
};
return switch (val.toIntern()) {
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => true,
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => true,
.simple_value => |v| v == .undefined,
.slice => |slice| for (0..@intCast(Value.fromInterned(slice.len).toUnsignedInt(mod))) |idx| {
if (try sema.anyUndef((try val.maybeElemValueFull(sema, mod, idx)).?)) break true;
} else false,
.aggregate => |aggregate| for (0..aggregate.storage.values().len) |i| {
const elem = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.values()[i];
if (try sema.anyUndef(Value.fromInterned(elem))) break true;
} else false,
else => false,
.simple_value => |v| v == .undefined,
.slice => {
// If the slice contents are runtime-known, reification will fail later on with a
// specific error message.
const arr = try sema.maybeDerefSliceAsArray(block, src, val) orelse return false;
return sema.anyUndef(block, src, arr);
},
.aggregate => |aggregate| for (0..aggregate.storage.values().len) |i| {
const elem = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.values()[i];
if (try sema.anyUndef(block, src, Value.fromInterned(elem))) break true;
} else false,
else => false,
};
}
 
@@ -39283,12 +39077,11 @@ fn sliceToIpString(
reason: NeededComptimeReason,
) CompileError!InternPool.NullTerminatedString {
const zcu = sema.mod;
const ip = &zcu.intern_pool;
const slice_ty = Type.fromInterned(ip.typeOf(slice_val.toIntern()));
const slice_ty = slice_val.typeOf(zcu);
assert(slice_ty.isSlice(zcu));
assert(slice_ty.childType(zcu).toIntern() == .u8_type);
const array_val = try sema.derefSliceAsArray(block, src, slice_val, reason);
const array_ty = Type.fromInterned(ip.typeOf(array_val.toIntern()));
const array_ty = array_val.typeOf(zcu);
return array_val.toIpString(array_ty, zcu);
}
 
@@ -39302,9 +39095,23 @@ fn derefSliceAsArray(
slice_val: Value,
reason: NeededComptimeReason,
) CompileError!Value {
return try sema.maybeDerefSliceAsArray(block, src, slice_val) orelse {
return sema.failWithNeededComptime(block, src, reason);
};
}
 
/// Given a slice value, attempts to dereference it into a comptime-known array.
/// Returns `null` if the contents of the slice are not comptime-known.
/// Asserts that `slice_val` is a slice.
fn maybeDerefSliceAsArray(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
slice_val: Value,
) CompileError!?Value {
const zcu = sema.mod;
const ip = &zcu.intern_pool;
assert(Type.fromInterned(ip.typeOf(slice_val.toIntern())).isSlice(zcu));
assert(slice_val.typeOf(zcu).isSlice(zcu));
const slice = switch (ip.indexToKey(slice_val.toIntern())) {
.undef => return sema.failWithUseOfUndef(block, src),
.slice => |slice| slice,
@@ -39324,7 +39131,5 @@ fn derefSliceAsArray(
break :p p;
});
const casted_ptr = try zcu.getCoerced(Value.fromInterned(slice.ptr), ptr_ty);
return try sema.pointerDeref(block, src, casted_ptr, ptr_ty) orelse {
return sema.failWithNeededComptime(block, src, reason);
};
return sema.pointerDeref(block, src, casted_ptr, ptr_ty);
}
 
ev/null added: 2056, removed: 2746, total 0
@@ -1,528 +0,0 @@
const std = @import("std");
const Type = @import("type.zig").Type;
const Value = @import("Value.zig");
const Module = @import("Module.zig");
const Allocator = std.mem.Allocator;
const TypedValue = @This();
const Target = std.Target;
 
ty: Type,
val: Value,
 
/// Memory management for TypedValue. The main purpose of this type
/// is to be small and have a deinit() function to free associated resources.
pub const Managed = struct {
/// If the tag value is less than Tag.no_payload_count, then no pointer
/// dereference is needed.
typed_value: TypedValue,
/// If this is `null` then there is no memory management needed.
arena: ?*std.heap.ArenaAllocator.State = null,
 
pub fn deinit(self: *Managed, allocator: Allocator) void {
if (self.arena) |a| a.promote(allocator).deinit();
self.* = undefined;
}
};
 
/// Assumes arena allocation. Does a recursive copy.
pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue {
return TypedValue{
.ty = self.ty,
.val = try self.val.copy(arena),
};
}
 
pub fn eql(a: TypedValue, b: TypedValue, mod: *Module) bool {
if (a.ty.toIntern() != b.ty.toIntern()) return false;
return a.val.eql(b.val, a.ty, mod);
}
 
pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, mod: *Module) void {
return tv.val.hash(tv.ty, hasher, mod);
}
 
pub fn intFromEnum(tv: TypedValue, mod: *Module) Allocator.Error!Value {
return tv.val.intFromEnum(tv.ty, mod);
}
 
const max_aggregate_items = 100;
const max_string_len = 256;
 
const FormatContext = struct {
tv: TypedValue,
mod: *Module,
};
 
pub fn format(
ctx: FormatContext,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = options;
comptime std.debug.assert(fmt.len == 0);
return ctx.tv.print(writer, 3, ctx.mod) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function
else => |e| return e,
};
}
 
/// Prints the Value according to the Type, not according to the Value Tag.
pub fn print(
tv: TypedValue,
writer: anytype,
level: u8,
mod: *Module,
) (@TypeOf(writer).Error || Allocator.Error)!void {
var val = tv.val;
var ty = tv.ty;
const ip = &mod.intern_pool;
while (true) switch (val.ip_index) {
.none => switch (val.tag()) {
.aggregate => return printAggregate(ty, val, writer, level, mod),
.@"union" => {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const payload = val.castTag(.@"union").?.data;
try writer.writeAll(".{ ");
 
if (payload.tag) |tag| {
try print(.{
.ty = Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty),
.val = tag,
}, writer, level - 1, mod);
try writer.writeAll(" = ");
const field_ty = ty.unionFieldType(tag, mod).?;
try print(.{
.ty = field_ty,
.val = payload.val,
}, writer, level - 1, mod);
} else {
try writer.writeAll("(unknown tag) = ");
const backing_ty = try ty.unionBackingType(mod);
try print(.{
.ty = backing_ty,
.val = payload.val,
}, writer, level - 1, mod);
}
 
return writer.writeAll(" }");
},
.bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}),
.repeated => {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
var i: u32 = 0;
try writer.writeAll(".{ ");
const elem_tv = TypedValue{
.ty = ty.elemType2(mod),
.val = val.castTag(.repeated).?.data,
};
const len = ty.arrayLen(mod);
const max_len = @min(len, max_aggregate_items);
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
try print(elem_tv, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
},
.slice => {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const payload = val.castTag(.slice).?.data;
const elem_ty = ty.elemType2(mod);
const len = payload.len.toUnsignedInt(mod);
 
if (elem_ty.eql(Type.u8, mod)) str: {
const max_len: usize = @min(len, max_string_len);
var buf: [max_string_len]u8 = undefined;
 
var i: u32 = 0;
while (i < max_len) : (i += 1) {
const maybe_elem_val = payload.ptr.maybeElemValue(mod, i) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic
};
const elem_val = maybe_elem_val orelse return writer.writeAll(".{ (reinterpreted data) }");
if (elem_val.isUndef(mod)) break :str;
buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str;
}
 
// TODO would be nice if this had a bit of unicode awareness.
const truncated = if (len > max_string_len) " (truncated)" else "";
return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated });
}
 
try writer.writeAll(".{ ");
 
const max_len = @min(len, max_aggregate_items);
var i: u32 = 0;
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
const maybe_elem_val = payload.ptr.maybeElemValue(mod, i) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic
};
const elem_val = maybe_elem_val orelse return writer.writeAll("(reinterpreted data) }");
try print(.{
.ty = elem_ty,
.val = elem_val,
}, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
},
.eu_payload => {
val = val.castTag(.eu_payload).?.data;
ty = ty.errorUnionPayload(mod);
},
.opt_payload => {
val = val.castTag(.opt_payload).?.data;
ty = ty.optionalChild(mod);
},
},
else => switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.struct_type,
.anon_struct_type,
.union_type,
.opaque_type,
.enum_type,
.func_type,
.error_set_type,
.inferred_error_set_type,
=> return Type.print(val.toType(), writer, mod),
.undef => return writer.writeAll("undefined"),
.simple_value => |simple_value| switch (simple_value) {
.void => return writer.writeAll("{}"),
.empty_struct => return printAggregate(ty, val, writer, level, mod),
.generic_poison => return writer.writeAll("(generic poison)"),
else => return writer.writeAll(@tagName(simple_value)),
},
.variable => return writer.writeAll("(variable)"),
.extern_func => |extern_func| return writer.print("(extern function '{}')", .{
mod.declPtr(extern_func.decl).name.fmt(ip),
}),
.func => |func| return writer.print("(function '{}')", .{
mod.declPtr(func.owner_decl).name.fmt(ip),
}),
.int => |int| switch (int.storage) {
inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}),
.lazy_align => |lazy_ty| return writer.print("{d}", .{
Type.fromInterned(lazy_ty).abiAlignment(mod),
}),
.lazy_size => |lazy_ty| return writer.print("{d}", .{
Type.fromInterned(lazy_ty).abiSize(mod),
}),
},
.err => |err| return writer.print("error.{}", .{
err.name.fmt(ip),
}),
.error_union => |error_union| switch (error_union.val) {
.err_name => |err_name| return writer.print("error.{}", .{
err_name.fmt(ip),
}),
.payload => |payload| {
val = Value.fromInterned(payload);
ty = ty.errorUnionPayload(mod);
},
},
.enum_literal => |enum_literal| return writer.print(".{}", .{
enum_literal.fmt(ip),
}),
.enum_tag => |enum_tag| {
if (level == 0) {
return writer.writeAll("(enum)");
}
const enum_type = ip.loadEnumType(ty.toIntern());
if (enum_type.tagValueIndex(ip, val.toIntern())) |tag_index| {
try writer.print(".{i}", .{enum_type.names.get(ip)[tag_index].fmt(ip)});
return;
}
try writer.writeAll("@enumFromInt(");
try print(.{
.ty = Type.fromInterned(ip.typeOf(enum_tag.int)),
.val = Value.fromInterned(enum_tag.int),
}, writer, level - 1, mod);
try writer.writeAll(")");
return;
},
.empty_enum_value => return writer.writeAll("(empty enum value)"),
.float => |float| switch (float.storage) {
inline else => |x| return writer.print("{d}", .{@as(f64, @floatCast(x))}),
},
.slice => |slice| {
const ptr_ty = switch (ip.indexToKey(slice.ptr)) {
.ptr => |ptr| ty: {
if (ptr.addr == .int) return print(.{
.ty = Type.fromInterned(ptr.ty),
.val = Value.fromInterned(slice.ptr),
}, writer, level - 1, mod);
break :ty ip.indexToKey(ptr.ty).ptr_type;
},
.undef => |ptr_ty| ip.indexToKey(ptr_ty).ptr_type,
else => unreachable,
};
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const elem_ty = Type.fromInterned(ptr_ty.child);
const len = Value.fromInterned(slice.len).toUnsignedInt(mod);
if (elem_ty.eql(Type.u8, mod)) str: {
const max_len = @min(len, max_string_len);
var buf: [max_string_len]u8 = undefined;
for (buf[0..max_len], 0..) |*c, i| {
const maybe_elem = try val.maybeElemValue(mod, i);
const elem = maybe_elem orelse return writer.writeAll(".{ (reinterpreted data) }");
if (elem.isUndef(mod)) break :str;
c.* = @as(u8, @intCast(elem.toUnsignedInt(mod)));
}
const truncated = if (len > max_string_len) " (truncated)" else "";
return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated });
}
try writer.writeAll(".{ ");
const max_len = @min(len, max_aggregate_items);
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
const maybe_elem = try val.maybeElemValue(mod, i);
const elem = maybe_elem orelse return writer.writeAll("(reinterpreted data) }");
try print(.{
.ty = elem_ty,
.val = elem,
}, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
},
.ptr => |ptr| {
switch (ptr.addr) {
.decl => |decl_index| {
const decl = mod.declPtr(decl_index);
if (level == 0) return writer.print("(decl '{}')", .{decl.name.fmt(ip)});
return print(.{
.ty = decl.ty,
.val = decl.val,
}, writer, level - 1, mod);
},
.anon_decl => |anon_decl| {
const decl_val = anon_decl.val;
if (level == 0) return writer.print("(anon decl '{d}')", .{
@intFromEnum(decl_val),
});
return print(.{
.ty = Type.fromInterned(ip.typeOf(decl_val)),
.val = Value.fromInterned(decl_val),
}, writer, level - 1, mod);
},
.comptime_alloc => {
// TODO: we need a Sema to print this!
return writer.writeAll("(comptime alloc)");
},
.comptime_field => |field_val_ip| {
return print(.{
.ty = Type.fromInterned(ip.typeOf(field_val_ip)),
.val = Value.fromInterned(field_val_ip),
}, writer, level - 1, mod);
},
.int => |int_ip| {
try writer.writeAll("@ptrFromInt(");
try print(.{
.ty = Type.usize,
.val = Value.fromInterned(int_ip),
}, writer, level - 1, mod);
try writer.writeByte(')');
},
.eu_payload => |eu_ip| {
try writer.writeAll("(payload of ");
try print(.{
.ty = Type.fromInterned(ip.typeOf(eu_ip)),
.val = Value.fromInterned(eu_ip),
}, writer, level - 1, mod);
try writer.writeAll(")");
},
.opt_payload => |opt_ip| {
try print(.{
.ty = Type.fromInterned(ip.typeOf(opt_ip)),
.val = Value.fromInterned(opt_ip),
}, writer, level - 1, mod);
try writer.writeAll(".?");
},
.elem => |elem| {
if (level == 0) {
try writer.writeAll("(...)");
} else {
try print(.{
.ty = Type.fromInterned(ip.typeOf(elem.base)),
.val = Value.fromInterned(elem.base),
}, writer, level - 1, mod);
}
try writer.print("[{}]", .{elem.index});
},
.field => |field| {
const ptr_container_ty = Type.fromInterned(ip.typeOf(field.base));
if (level == 0) {
try writer.writeAll("(...)");
} else {
try print(.{
.ty = ptr_container_ty,
.val = Value.fromInterned(field.base),
}, writer, level - 1, mod);
}
 
const container_ty = ptr_container_ty.childType(mod);
switch (container_ty.zigTypeTag(mod)) {
.Struct => {
if (container_ty.structFieldName(@intCast(field.index), mod).unwrap()) |field_name| {
try writer.print(".{i}", .{field_name.fmt(ip)});
} else {
try writer.print("[{d}]", .{field.index});
}
},
.Union => {
const field_name = mod.typeToUnion(container_ty).?.loadTagType(ip).names.get(ip)[@intCast(field.index)];
try writer.print(".{i}", .{field_name.fmt(ip)});
},
.Pointer => {
std.debug.assert(container_ty.isSlice(mod));
try writer.writeAll(switch (field.index) {
Value.slice_ptr_index => ".ptr",
Value.slice_len_index => ".len",
else => unreachable,
});
},
else => unreachable,
}
},
}
return;
},
.opt => |opt| switch (opt.val) {
.none => return writer.writeAll("null"),
else => |payload| {
val = Value.fromInterned(payload);
ty = ty.optionalChild(mod);
},
},
.aggregate => |aggregate| switch (aggregate.storage) {
.bytes => |bytes| {
// Strip the 0 sentinel off of strings before printing
const zero_sent = blk: {
const sent = ty.sentinel(mod) orelse break :blk false;
break :blk sent.eql(Value.zero_u8, Type.u8, mod);
};
const str = if (zero_sent) bytes[0 .. bytes.len - 1] else bytes;
return writer.print("\"{}\"", .{std.zig.fmtEscapes(str)});
},
.elems, .repeated_elem => return printAggregate(ty, val, writer, level, mod),
},
.un => |un| {
try writer.writeAll(".{ ");
if (level > 0) {
if (un.tag != .none) {
try print(.{
.ty = ty.unionTagTypeHypothetical(mod),
.val = Value.fromInterned(un.tag),
}, writer, level - 1, mod);
try writer.writeAll(" = ");
const field_ty = ty.unionFieldType(Value.fromInterned(un.tag), mod).?;
try print(.{
.ty = field_ty,
.val = Value.fromInterned(un.val),
}, writer, level - 1, mod);
} else {
try writer.writeAll("(unknown tag) = ");
const backing_ty = try ty.unionBackingType(mod);
try print(.{
.ty = backing_ty,
.val = Value.fromInterned(un.val),
}, writer, level - 1, mod);
}
} else try writer.writeAll("...");
return writer.writeAll(" }");
},
.memoized_call => unreachable,
},
};
}
 
fn printAggregate(
ty: Type,
val: Value,
writer: anytype,
level: u8,
mod: *Module,
) (@TypeOf(writer).Error || Allocator.Error)!void {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const ip = &mod.intern_pool;
if (ty.zigTypeTag(mod) == .Struct) {
try writer.writeAll(".{");
const max_len = @min(ty.structFieldCount(mod), max_aggregate_items);
 
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
 
const field_name = ty.structFieldName(@intCast(i), mod);
 
if (field_name.unwrap()) |name| try writer.print(".{} = ", .{name.fmt(ip)});
try print(.{
.ty = ty.structFieldType(i, mod),
.val = try val.fieldValue(mod, i),
}, writer, level - 1, mod);
}
if (ty.structFieldCount(mod) > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll("}");
} else {
const elem_ty = ty.elemType2(mod);
const len = ty.arrayLen(mod);
 
if (elem_ty.eql(Type.u8, mod)) str: {
const max_len: usize = @min(len, max_string_len);
var buf: [max_string_len]u8 = undefined;
 
var i: u32 = 0;
while (i < max_len) : (i += 1) {
const elem = try val.fieldValue(mod, i);
if (elem.isUndef(mod)) break :str;
buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str;
}
 
const truncated = if (len > max_string_len) " (truncated)" else "";
return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated });
}
 
try writer.writeAll(".{ ");
 
const max_len = @min(len, max_aggregate_items);
var i: u32 = 0;
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
try print(.{
.ty = elem_ty,
.val = try val.fieldValue(mod, i),
}, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
}
}
 
src/Value.zig added: 2056, removed: 2746, total 0
@@ -8,129 +8,13 @@ const Target = std.Target;
const Allocator = std.mem.Allocator;
const Zcu = @import("Module.zig");
const Module = Zcu;
const TypedValue = @import("TypedValue.zig");
const Sema = @import("Sema.zig");
const InternPool = @import("InternPool.zig");
const print_value = @import("print_value.zig");
const Value = @This();
 
/// We are migrating towards using this for every Value object. However, many
/// values are still represented the legacy way. This is indicated by using
/// InternPool.Index.none.
ip_index: InternPool.Index,
 
/// This is the raw data, with no bookkeeping, no memory awareness,
/// no de-duplication, and no type system awareness.
/// This union takes advantage of the fact that the first page of memory
/// is unmapped, giving us 4096 possible enum tags that have no payload.
legacy: extern union {
ptr_otherwise: *Payload,
},
 
// Keep in sync with tools/stage2_pretty_printers_common.py
pub const Tag = enum(usize) {
// The first section of this enum are tags that require no payload.
// After this, the tag requires a payload.
 
/// When the type is error union:
/// * If the tag is `.@"error"`, the error union is an error.
/// * If the tag is `.eu_payload`, the error union is a payload.
/// * A nested error such as `anyerror!(anyerror!T)` in which the the outer error union
/// is non-error, but the inner error union is an error, is represented as
/// a tag of `.eu_payload`, with a sub-tag of `.@"error"`.
eu_payload,
/// When the type is optional:
/// * If the tag is `.null_value`, the optional is null.
/// * If the tag is `.opt_payload`, the optional is a payload.
/// * A nested optional such as `??T` in which the the outer optional
/// is non-null, but the inner optional is null, is represented as
/// a tag of `.opt_payload`, with a sub-tag of `.null_value`.
opt_payload,
/// Pointer and length as sub `Value` objects.
slice,
/// A slice of u8 whose memory is managed externally.
bytes,
/// This value is repeated some number of times. The amount of times to repeat
/// is stored externally.
repeated,
/// An instance of a struct, array, or vector.
/// Each element/field stored as a `Value`.
/// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
/// so the slice length will be one more than the type's array length.
aggregate,
/// An instance of a union.
@"union",
 
pub fn Type(comptime t: Tag) type {
return switch (t) {
.eu_payload,
.opt_payload,
.repeated,
=> Payload.SubValue,
.slice => Payload.Slice,
.bytes => Payload.Bytes,
.aggregate => Payload.Aggregate,
.@"union" => Payload.Union,
};
}
 
pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!Value {
const ptr = try ally.create(t.Type());
ptr.* = .{
.base = .{ .tag = t },
.data = data,
};
return Value{
.ip_index = .none,
.legacy = .{ .ptr_otherwise = &ptr.base },
};
}
 
pub fn Data(comptime t: Tag) type {
return std.meta.fieldInfo(t.Type(), .data).type;
}
};
 
pub fn initPayload(payload: *Payload) Value {
return Value{
.ip_index = .none,
.legacy = .{ .ptr_otherwise = payload },
};
}
 
pub fn tag(self: Value) Tag {
assert(self.ip_index == .none);
return self.legacy.ptr_otherwise.tag;
}
 
/// Prefer `castTag` to this.
pub fn cast(self: Value, comptime T: type) ?*T {
if (self.ip_index != .none) {
return null;
}
if (@hasField(T, "base_tag")) {
return self.castTag(T.base_tag);
}
inline for (@typeInfo(Tag).Enum.fields) |field| {
const t = @as(Tag, @enumFromInt(field.value));
if (self.legacy.ptr_otherwise.tag == t) {
if (T == t.Type()) {
return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise);
}
return null;
}
}
unreachable;
}
 
pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() {
if (self.ip_index != .none) return null;
 
if (self.legacy.ptr_otherwise.tag == t)
return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise);
 
return null;
}
 
pub fn format(val: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = val;
_ = fmt;
@@ -148,42 +32,16 @@ pub fn dump(
out_stream: anytype,
) !void {
comptime assert(fmt.len == 0);
if (start_val.ip_index != .none) {
try out_stream.print("(interned: {})", .{start_val.toIntern()});
return;
}
var val = start_val;
while (true) switch (val.tag()) {
.aggregate => {
return out_stream.writeAll("(aggregate)");
},
.@"union" => {
return out_stream.writeAll("(union value)");
},
.bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}),
.repeated => {
try out_stream.writeAll("(repeated) ");
val = val.castTag(.repeated).?.data;
},
.eu_payload => {
try out_stream.writeAll("(eu_payload) ");
val = val.castTag(.repeated).?.data;
},
.opt_payload => {
try out_stream.writeAll("(opt_payload) ");
val = val.castTag(.repeated).?.data;
},
.slice => return out_stream.writeAll("(slice)"),
};
try out_stream.print("(interned: {})", .{start_val.toIntern()});
}
 
pub fn fmtDebug(val: Value) std.fmt.Formatter(dump) {
return .{ .data = val };
}
 
pub fn fmtValue(val: Value, ty: Type, mod: *Module) std.fmt.Formatter(TypedValue.format) {
pub fn fmtValue(val: Value, mod: *Module) std.fmt.Formatter(print_value.format) {
return .{ .data = .{
.tv = .{ .ty = ty, .val = val },
.val = val,
.mod = mod,
} };
}
@@ -252,162 +110,9 @@ fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTermi
return ip.getOrPutTrailingString(gpa, len);
}
 
pub fn intern2(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index {
if (val.ip_index != .none) return val.ip_index;
return intern(val, ty, mod);
}
 
pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index {
if (val.ip_index != .none) return (try mod.getCoerced(val, ty)).toIntern();
const ip = &mod.intern_pool;
switch (val.tag()) {
.eu_payload => {
const pl = val.castTag(.eu_payload).?.data;
return mod.intern(.{ .error_union = .{
.ty = ty.toIntern(),
.val = .{ .payload = try pl.intern(ty.errorUnionPayload(mod), mod) },
} });
},
.opt_payload => {
const pl = val.castTag(.opt_payload).?.data;
return mod.intern(.{ .opt = .{
.ty = ty.toIntern(),
.val = try pl.intern(ty.optionalChild(mod), mod),
} });
},
.slice => {
const pl = val.castTag(.slice).?.data;
return mod.intern(.{ .slice = .{
.ty = ty.toIntern(),
.len = try pl.len.intern(Type.usize, mod),
.ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod),
} });
},
.bytes => {
const pl = val.castTag(.bytes).?.data;
return mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .bytes = pl },
} });
},
.repeated => {
const pl = val.castTag(.repeated).?.data;
return mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .repeated_elem = try pl.intern(ty.childType(mod), mod) },
} });
},
.aggregate => {
const len = @as(usize, @intCast(ty.arrayLen(mod)));
const old_elems = val.castTag(.aggregate).?.data[0..len];
const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len);
defer mod.gpa.free(new_elems);
const ty_key = ip.indexToKey(ty.toIntern());
for (new_elems, old_elems, 0..) |*new_elem, old_elem, field_i|
new_elem.* = try old_elem.intern(switch (ty_key) {
.struct_type => ty.structFieldType(field_i, mod),
.anon_struct_type => |info| Type.fromInterned(info.types.get(ip)[field_i]),
inline .array_type, .vector_type => |info| Type.fromInterned(info.child),
else => unreachable,
}, mod);
return mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = new_elems },
} });
},
.@"union" => {
const pl = val.castTag(.@"union").?.data;
if (pl.tag) |pl_tag| {
return mod.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = try pl_tag.intern(ty.unionTagTypeHypothetical(mod), mod),
.val = try pl.val.intern(ty.unionFieldType(pl_tag, mod).?, mod),
} });
} else {
return mod.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = .none,
.val = try pl.val.intern(try ty.unionBackingType(mod), mod),
} });
}
},
}
}
 
pub fn unintern(val: Value, arena: Allocator, mod: *Module) Allocator.Error!Value {
return if (val.ip_index == .none) val else switch (mod.intern_pool.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.struct_type,
.anon_struct_type,
.union_type,
.opaque_type,
.enum_type,
.func_type,
.error_set_type,
.inferred_error_set_type,
 
.undef,
.simple_value,
.variable,
.extern_func,
.func,
.int,
.err,
.enum_literal,
.enum_tag,
.empty_enum_value,
.float,
.ptr,
=> val,
 
.error_union => |error_union| switch (error_union.val) {
.err_name => val,
.payload => |payload| Tag.eu_payload.create(arena, Value.fromInterned(payload)),
},
 
.slice => |slice| Tag.slice.create(arena, .{
.ptr = Value.fromInterned(slice.ptr),
.len = Value.fromInterned(slice.len),
}),
 
.opt => |opt| switch (opt.val) {
.none => val,
else => |payload| Tag.opt_payload.create(arena, Value.fromInterned(payload)),
},
 
.aggregate => |aggregate| switch (aggregate.storage) {
.bytes => |bytes| Tag.bytes.create(arena, try arena.dupe(u8, bytes)),
.elems => |old_elems| {
const new_elems = try arena.alloc(Value, old_elems.len);
for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = Value.fromInterned(old_elem);
return Tag.aggregate.create(arena, new_elems);
},
.repeated_elem => |elem| Tag.repeated.create(arena, Value.fromInterned(elem)),
},
 
.un => |un| Tag.@"union".create(arena, .{
// toValue asserts that the value cannot be .none which is valid on unions.
.tag = if (un.tag == .none) null else Value.fromInterned(un.tag),
.val = Value.fromInterned(un.val),
}),
 
.memoized_call => unreachable,
};
}
 
pub fn fromInterned(i: InternPool.Index) Value {
assert(i != .none);
return .{
.ip_index = i,
.legacy = undefined,
};
return .{ .ip_index = i };
}
 
pub fn toIntern(val: Value) InternPool.Index {
@@ -492,24 +197,24 @@ pub fn isFuncBody(val: Value, mod: *Module) bool {
}
 
pub fn getFunction(val: Value, mod: *Module) ?InternPool.Key.Func {
return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) {
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.func => |x| x,
else => null,
} else null;
};
}
 
pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc {
return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) {
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.extern_func => |extern_func| extern_func,
else => null,
} else null;
};
}
 
pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable {
return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) {
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.variable => |variable| variable,
else => null,
} else null;
};
}
 
/// If the value fits in a u64, return it, otherwise null.
@@ -544,12 +249,12 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64
.int => |int| Value.fromInterned(int).getUnsignedIntAdvanced(mod, opt_sema),
.elem => |elem| {
const base_addr = (try Value.fromInterned(elem.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null;
const elem_ty = Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod);
const elem_ty = Value.fromInterned(elem.base).typeOf(mod).elemType2(mod);
return base_addr + elem.index * elem_ty.abiSize(mod);
},
.field => |field| {
const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null;
const struct_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base)).childType(mod);
const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod);
if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty);
return base_addr + struct_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod);
},
@@ -600,16 +305,16 @@ pub fn toBool(val: Value) bool {
};
}
 
fn isDeclRef(val: Value, mod: *Module) bool {
fn ptrHasIntAddr(val: Value, mod: *Module) bool {
var check = val;
while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.decl, .comptime_alloc, .comptime_field, .anon_decl => return true,
.decl, .comptime_alloc, .comptime_field, .anon_decl => return false,
.int => return true,
.eu_payload, .opt_payload => |base| check = Value.fromInterned(base),
.elem, .field => |base_index| check = Value.fromInterned(base_index.base),
.int => return false,
},
else => return false,
else => unreachable,
};
}
 
@@ -677,25 +382,14 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
.auto => return error.IllDefinedMemoryLayout,
.@"extern" => for (0..struct_type.field_types.len) |i| {
const off: usize = @intCast(ty.structFieldOffset(i, mod));
const field_val = switch (val.ip_index) {
.none => switch (val.tag()) {
.bytes => {
buffer[off] = val.castTag(.bytes).?.data[i];
continue;
},
.aggregate => val.castTag(.aggregate).?.data[i],
.repeated => val.castTag(.repeated).?.data,
else => unreachable,
const field_val = Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| {
buffer[off] = bytes[i];
continue;
},
else => Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| {
buffer[off] = bytes[i];
continue;
},
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
}),
};
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
});
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
try writeToMemory(field_val, field_ty, mod, buffer[off..]);
},
@@ -745,7 +439,7 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
},
.Pointer => {
if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout;
if (val.isDeclRef(mod)) return error.ReinterpretDeclRef;
if (!val.ptrHasIntAddr(mod)) return error.ReinterpretDeclRef;
return val.writeToMemory(Type.usize, mod, buffer);
},
.Optional => {
@@ -842,19 +536,11 @@ pub fn writeToPackedMemory(
assert(struct_type.layout == .@"packed");
var bits: u16 = 0;
for (0..struct_type.field_types.len) |i| {
const field_val = switch (val.ip_index) {
.none => switch (val.tag()) {
.bytes => unreachable,
.aggregate => val.castTag(.aggregate).?.data[i],
.repeated => val.castTag(.repeated).?.data,
else => unreachable,
},
else => Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
}),
};
const field_val = Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
});
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
const field_bits: u16 = @intCast(field_ty.bitSize(mod));
try field_val.writeToPackedMemory(field_ty, mod, buffer, bit_offset + bits);
@@ -880,7 +566,7 @@ pub fn writeToPackedMemory(
},
.Pointer => {
assert(!ty.isSlice(mod)); // No well defined layout.
if (val.isDeclRef(mod)) return error.ReinterpretDeclRef;
if (!val.ptrHasIntAddr(mod)) return error.ReinterpretDeclRef;
return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset);
},
.Optional => {
@@ -972,7 +658,7 @@ pub fn readFromMemory(
const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod))));
var offset: usize = 0;
for (elems) |*elem| {
elem.* = try (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).intern(elem_ty, mod);
elem.* = (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).toIntern();
offset += @as(usize, @intCast(elem_size));
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
@@ -997,7 +683,7 @@ pub fn readFromMemory(
const field_ty = Type.fromInterned(field_types.get(ip)[i]);
const off: usize = @intCast(ty.structFieldOffset(i, mod));
const sz: usize = @intCast(field_ty.abiSize(mod));
field_val.* = try (try readFromMemory(field_ty, mod, buffer[off..(off + sz)], arena)).intern(field_ty, mod);
field_val.* = (try readFromMemory(field_ty, mod, buffer[off..(off + sz)], arena)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -1027,7 +713,7 @@ pub fn readFromMemory(
.@"extern" => {
const union_size = ty.abiSize(mod);
const array_ty = try mod.arrayType(.{ .len = union_size, .child = .u8_type });
const val = try (try readFromMemory(array_ty, mod, buffer, arena)).intern(array_ty, mod);
const val = (try readFromMemory(array_ty, mod, buffer, arena)).toIntern();
return Value.fromInterned((try mod.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = .none,
@@ -1094,30 +780,19 @@ pub fn readFromPackedMemory(
return Value.true;
}
},
.Int, .Enum => |ty_tag| {
.Int => {
if (buffer.len == 0) return mod.intValue(ty, 0);
const int_info = ty.intInfo(mod);
const bits = int_info.bits;
if (bits == 0) return mod.intValue(ty, 0);
 
// Fast path for integers <= u64
if (bits <= 64) {
const int_ty = switch (ty_tag) {
.Int => ty,
.Enum => ty.intTagType(mod),
else => unreachable,
};
return mod.getCoerced(switch (int_info.signedness) {
.signed => return mod.intValue(
int_ty,
std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed),
),
.unsigned => return mod.intValue(
int_ty,
std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned),
),
}, ty);
}
if (bits <= 64) switch (int_info.signedness) {
// Use different backing types for unsigned vs signed to avoid the need to go via
// a larger type like `i128`.
.unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)),
.signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)),
};
 
// Slow path, we have to construct a big-int
const abi_size = @as(usize, @intCast(ty.abiSize(mod)));
@@ -1129,6 +804,11 @@ pub fn readFromPackedMemory(
bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness);
return mod.intValue_big(ty, bigint.toConst());
},
.Enum => {
const int_ty = ty.intTagType(mod);
const int_val = try Value.readFromPackedMemory(int_ty, mod, buffer, bit_offset, arena);
return mod.getCoerced(int_val, ty);
},
.Float => return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = switch (ty.floatBits(target)) {
@@ -1149,7 +829,7 @@ pub fn readFromPackedMemory(
for (elems, 0..) |_, i| {
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .big) elems.len - i - 1 else i;
elems[tgt_elem_i] = try (try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena)).intern(elem_ty, mod);
elems[tgt_elem_i] = (try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena)).toIntern();
bits += elem_bit_size;
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
@@ -1166,7 +846,7 @@ pub fn readFromPackedMemory(
for (field_vals, 0..) |*field_val, i| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
const field_bits: u16 = @intCast(field_ty.bitSize(mod));
field_val.* = try (try readFromPackedMemory(field_ty, mod, buffer, bit_offset + bits, arena)).intern(field_ty, mod);
field_val.* = (try readFromPackedMemory(field_ty, mod, buffer, bit_offset + bits, arena)).toIntern();
bits += field_bits;
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
@@ -1581,80 +1261,34 @@ pub fn slicePtr(val: Value, mod: *Module) Value {
return Value.fromInterned(mod.intern_pool.slicePtr(val.toIntern()));
}
 
pub fn sliceLen(val: Value, mod: *Module) u64 {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(val.toIntern())) {
.ptr => |ptr| switch (ip.indexToKey(switch (ptr.addr) {
.decl => |decl| mod.declPtr(decl).ty.toIntern(),
.comptime_alloc => @panic("TODO"),
.anon_decl => |anon_decl| ip.typeOf(anon_decl.val),
.comptime_field => |comptime_field| ip.typeOf(comptime_field),
else => unreachable,
})) {
.array_type => |array_type| array_type.len,
else => 1,
/// Gets the `len` field of a slice value as a `u64`.
/// Resolves the length using the provided `Sema` if necessary.
pub fn sliceLen(val: Value, sema: *Sema) !u64 {
return Value.fromInterned(sema.mod.intern_pool.sliceLen(val.toIntern())).toUnsignedIntAdvanced(sema);
}
 
/// Asserts the value is an aggregate, and returns the element value at the given index.
pub fn elemValue(val: Value, zcu: *Zcu, index: usize) Allocator.Error!Value {
const ip = &zcu.intern_pool;
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
.undef => |ty| {
return Value.fromInterned(try zcu.intern(.{ .undef = Type.fromInterned(ty).childType(zcu).toIntern() }));
},
.aggregate => |aggregate| {
const len = ip.aggregateTypeLen(aggregate.ty);
if (index < len) return Value.fromInterned(switch (aggregate.storage) {
.bytes => |bytes| try zcu.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes[index] },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
});
assert(index == len);
return Type.fromInterned(aggregate.ty).sentinel(zcu).?;
},
.slice => |slice| Value.fromInterned(slice.len).toUnsignedInt(mod),
else => unreachable,
};
}
 
/// Asserts the value is a single-item pointer to an array, or an array,
/// or an unknown-length pointer, and returns the element value at the index.
pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value {
return (try val.maybeElemValue(mod, index)).?;
}
 
/// Like `elemValue`, but returns `null` instead of asserting on failure.
pub fn maybeElemValue(val: Value, mod: *Module, index: usize) Allocator.Error!?Value {
return val.maybeElemValueFull(null, mod, index);
}
 
pub fn maybeElemValueFull(val: Value, sema: ?*Sema, mod: *Module, index: usize) Allocator.Error!?Value {
return switch (val.ip_index) {
.none => switch (val.tag()) {
.bytes => try mod.intValue(Type.u8, val.castTag(.bytes).?.data[index]),
.repeated => val.castTag(.repeated).?.data,
.aggregate => val.castTag(.aggregate).?.data[index],
.slice => val.castTag(.slice).?.data.ptr.maybeElemValueFull(sema, mod, index),
else => null,
},
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => |ty| Value.fromInterned((try mod.intern(.{
.undef = Type.fromInterned(ty).elemType2(mod).toIntern(),
}))),
.slice => |slice| return Value.fromInterned(slice.ptr).maybeElemValueFull(sema, mod, index),
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| mod.declPtr(decl).val.maybeElemValueFull(sema, mod, index),
.anon_decl => |anon_decl| Value.fromInterned(anon_decl.val).maybeElemValueFull(sema, mod, index),
.comptime_alloc => |idx| if (sema) |s| s.getComptimeAlloc(idx).val.maybeElemValueFull(sema, mod, index) else null,
.int, .eu_payload => null,
.opt_payload => |base| Value.fromInterned(base).maybeElemValueFull(sema, mod, index),
.comptime_field => |field_val| Value.fromInterned(field_val).maybeElemValueFull(sema, mod, index),
.elem => |elem| Value.fromInterned(elem.base).maybeElemValueFull(sema, mod, index + @as(usize, @intCast(elem.index))),
.field => |field| if (Value.fromInterned(field.base).pointerDecl(mod)) |decl_index| {
const base_decl = mod.declPtr(decl_index);
const field_val = try base_decl.val.fieldValue(mod, @as(usize, @intCast(field.index)));
return field_val.maybeElemValueFull(sema, mod, index);
} else null,
},
.opt => |opt| Value.fromInterned(opt.val).maybeElemValueFull(sema, mod, index),
.aggregate => |aggregate| {
const len = mod.intern_pool.aggregateTypeLen(aggregate.ty);
if (index < len) return Value.fromInterned(switch (aggregate.storage) {
.bytes => |bytes| try mod.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes[index] },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
});
assert(index == len);
return Value.fromInterned(mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel);
},
else => null,
},
};
}
}
 
pub fn isLazyAlign(val: Value, mod: *Module) bool {
@@ -1686,83 +1320,48 @@ pub fn sliceArray(
) error{OutOfMemory}!Value {
// TODO: write something like getCoercedInts to avoid needing to dupe
const mod = sema.mod;
return switch (val.ip_index) {
.none => switch (val.tag()) {
.slice => val.castTag(.slice).?.data.ptr.sliceArray(sema, start, end),
.bytes => Tag.bytes.create(sema.arena, val.castTag(.bytes).?.data[start..end]),
.repeated => val,
.aggregate => Tag.aggregate.create(sema.arena, val.castTag(.aggregate).?.data[start..end]),
const aggregate = mod.intern_pool.indexToKey(val.toIntern()).aggregate;
return Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) {
.array_type => |array_type| try mod.arrayType(.{
.len = @as(u32, @intCast(end - start)),
.child = array_type.child,
.sentinel = if (end == array_type.len) array_type.sentinel else .none,
}),
.vector_type => |vector_type| try mod.vectorType(.{
.len = @as(u32, @intCast(end - start)),
.child = vector_type.child,
}),
else => unreachable,
}.toIntern(),
.storage = switch (aggregate.storage) {
.bytes => .{ .bytes = try sema.arena.dupe(u8, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.bytes[start..end]) },
.elems => .{ .elems = try sema.arena.dupe(InternPool.Index, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.elems[start..end]) },
.repeated_elem => |elem| .{ .repeated_elem = elem },
},
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| try mod.declPtr(decl).val.sliceArray(sema, start, end),
.comptime_alloc => |idx| sema.getComptimeAlloc(idx).val.sliceArray(sema, start, end),
.comptime_field => |comptime_field| Value.fromInterned(comptime_field)
.sliceArray(sema, start, end),
.elem => |elem| Value.fromInterned(elem.base)
.sliceArray(sema, start + @as(usize, @intCast(elem.index)), end + @as(usize, @intCast(elem.index))),
else => unreachable,
},
.aggregate => |aggregate| Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) {
.array_type => |array_type| try mod.arrayType(.{
.len = @as(u32, @intCast(end - start)),
.child = array_type.child,
.sentinel = if (end == array_type.len) array_type.sentinel else .none,
}),
.vector_type => |vector_type| try mod.vectorType(.{
.len = @as(u32, @intCast(end - start)),
.child = vector_type.child,
}),
else => unreachable,
}.toIntern(),
.storage = switch (aggregate.storage) {
.bytes => .{ .bytes = try sema.arena.dupe(u8, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.bytes[start..end]) },
.elems => .{ .elems = try sema.arena.dupe(InternPool.Index, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.elems[start..end]) },
.repeated_elem => |elem| .{ .repeated_elem = elem },
},
} }))),
else => unreachable,
},
};
} }));
}
 
pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value {
return switch (val.ip_index) {
.none => switch (val.tag()) {
.aggregate => {
const field_values = val.castTag(.aggregate).?.data;
return field_values[index];
},
.@"union" => {
const payload = val.castTag(.@"union").?.data;
// TODO assert the tag is correct
return payload.val;
},
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => |ty| Value.fromInterned((try mod.intern(.{
.undef = Type.fromInterned(ty).structFieldType(index, mod).toIntern(),
}))),
.aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) {
.bytes => |bytes| try mod.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes[index] },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
}),
// TODO assert the tag is correct
.un => |un| Value.fromInterned(un.val),
else => unreachable,
},
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => |ty| Value.fromInterned((try mod.intern(.{
.undef = Type.fromInterned(ty).structFieldType(index, mod).toIntern(),
}))),
.aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) {
.bytes => |bytes| try mod.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes[index] },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
}),
// TODO assert the tag is correct
.un => |un| Value.fromInterned(un.val),
else => unreachable,
};
}
 
pub fn unionTag(val: Value, mod: *Module) ?Value {
if (val.ip_index == .none) return val.castTag(.@"union").?.data.tag;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef, .enum_tag => val,
.un => |un| if (un.tag != .none) Value.fromInterned(un.tag) else return null,
@@ -1771,7 +1370,6 @@ pub fn unionTag(val: Value, mod: *Module) ?Value {
}
 
pub fn unionValue(val: Value, mod: *Module) Value {
if (val.ip_index == .none) return val.castTag(.@"union").?.data.val;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.un => |un| Value.fromInterned(un.val),
else => unreachable,
@@ -1792,7 +1390,7 @@ pub fn elemPtr(
};
switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.elem => |elem| if (Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod).eql(elem_ty, mod))
.elem => |elem| if (Value.fromInterned(elem.base).typeOf(mod).elemType2(mod).eql(elem_ty, mod))
return Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = elem_ptr_ty.toIntern(),
.addr = .{ .elem = .{
@@ -1817,7 +1415,7 @@ pub fn elemPtr(
}
 
pub fn isUndef(val: Value, mod: *Module) bool {
return val.ip_index != .none and mod.intern_pool.isUndef(val.toIntern());
return mod.intern_pool.isUndef(val.toIntern());
}
 
/// TODO: check for cases such as array that is not marked undef but all the element
@@ -1911,7 +1509,7 @@ pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty
const scalar_ty = float_ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try floatFromIntScalar(elem_val, scalar_ty, mod, opt_sema)).intern(scalar_ty, mod);
scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, opt_sema)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_ty.toIntern(),
@@ -1989,7 +1587,7 @@ pub fn intAddSat(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
scalar.* = (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2039,7 +1637,7 @@ pub fn intSubSat(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
scalar.* = (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2091,8 +1689,8 @@ pub fn intMulWithOverflow(
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod);
of.* = try of_math_result.overflow_bit.intern(Type.u1, mod);
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
of.* = of_math_result.overflow_bit.toIntern();
scalar.* = of_math_result.wrapped_result.toIntern();
}
return OverflowArithmeticResult{
.overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
@@ -2157,7 +1755,7 @@ pub fn numberMulWrap(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
scalar.* = (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2203,7 +1801,7 @@ pub fn intMulSat(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
scalar.* = (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2279,7 +1877,7 @@ pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try bitwiseNotScalar(elem_val, scalar_ty, arena, mod)).intern(scalar_ty, mod);
scalar.* = (try bitwiseNotScalar(elem_val, scalar_ty, arena, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2322,7 +1920,7 @@ pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
scalar.* = (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2361,7 +1959,7 @@ pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Mod
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
scalar.* = (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2389,7 +1987,7 @@ pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *M
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
scalar.* = (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2427,7 +2025,7 @@ pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
scalar.* = (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2493,7 +2091,7 @@ fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator
},
else => |e| return e,
};
scalar.* = try val.intern(scalar_ty, mod);
scalar.* = val.toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2541,7 +2139,7 @@ pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod:
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
scalar.* = (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2583,7 +2181,7 @@ pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Modu
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
scalar.* = (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2620,7 +2218,6 @@ pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod:
 
/// Returns true if the value is a floating point type and is NaN. Returns false otherwise.
pub fn isNan(val: Value, mod: *const Module) bool {
if (val.ip_index == .none) return false;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.float => |float| switch (float.storage) {
inline else => |x| std.math.isNan(x),
@@ -2631,7 +2228,6 @@ pub fn isNan(val: Value, mod: *const Module) bool {
 
/// Returns true if the value is a floating point type and is infinite. Returns false otherwise.
pub fn isInf(val: Value, mod: *const Module) bool {
if (val.ip_index == .none) return false;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.float => |float| switch (float.storage) {
inline else => |x| std.math.isInf(x),
@@ -2641,7 +2237,6 @@ pub fn isInf(val: Value, mod: *const Module) bool {
}
 
pub fn isNegativeInf(val: Value, mod: *const Module) bool {
if (val.ip_index == .none) return false;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.float => |float| switch (float.storage) {
inline else => |x| std.math.isNegativeInf(x),
@@ -2657,7 +2252,7 @@ pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod:
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -2690,7 +2285,7 @@ pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod:
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -2751,7 +2346,7 @@ fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator
},
else => |e| return e,
};
scalar.* = try val.intern(scalar_ty, mod);
scalar.* = val.toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2793,7 +2388,7 @@ pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.buil
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod)).intern(scalar_ty, mod);
scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2818,7 +2413,7 @@ pub fn intTruncBitsAsValue(
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
const bits_elem = try bits.elemValue(mod, i);
scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @as(u16, @intCast(bits_elem.toUnsignedInt(mod))), mod)).intern(scalar_ty, mod);
scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @as(u16, @intCast(bits_elem.toUnsignedInt(mod))), mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2858,7 +2453,7 @@ pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module)
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
scalar.* = (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -2908,8 +2503,8 @@ pub fn shlWithOverflow(
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod);
of.* = try of_math_result.overflow_bit.intern(Type.u1, mod);
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
of.* = of_math_result.overflow_bit.toIntern();
scalar.* = of_math_result.wrapped_result.toIntern();
}
return OverflowArithmeticResult{
.overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
@@ -2969,7 +2564,7 @@ pub fn shlSat(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
scalar.* = (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -3019,7 +2614,7 @@ pub fn shlTrunc(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
scalar.* = (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -3049,7 +2644,7 @@ pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module)
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
scalar.* = (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -3101,7 +2696,7 @@ pub fn floatNeg(
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try floatNegScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try floatNegScalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3144,7 +2739,7 @@ pub fn floatAdd(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3188,7 +2783,7 @@ pub fn floatSub(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3232,7 +2827,7 @@ pub fn floatDiv(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3276,7 +2871,7 @@ pub fn floatDivFloor(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3320,7 +2915,7 @@ pub fn floatDivTrunc(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3364,7 +2959,7 @@ pub fn floatMul(
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3401,7 +2996,7 @@ pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try sqrtScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try sqrtScalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3433,7 +3028,7 @@ pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try sinScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try sinScalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3465,7 +3060,7 @@ pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try cosScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try cosScalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3497,7 +3092,7 @@ pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try tanScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try tanScalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3529,7 +3124,7 @@ pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try expScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try expScalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3561,7 +3156,7 @@ pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try exp2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try exp2Scalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3593,7 +3188,7 @@ pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try logScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try logScalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3625,7 +3220,7 @@ pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try log2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try log2Scalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3657,7 +3252,7 @@ pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Valu
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try log10Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try log10Scalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3689,7 +3284,7 @@ pub fn abs(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try absScalar(elem_val, scalar_ty, mod, arena)).intern(scalar_ty, mod);
scalar.* = (try absScalar(elem_val, scalar_ty, mod, arena)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@@ -3740,7 +3335,7 @@ pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Valu
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try floorScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try floorScalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3772,7 +3367,7 @@ pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try ceilScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try ceilScalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3804,7 +3399,7 @@ pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Valu
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try roundScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try roundScalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3836,7 +3431,7 @@ pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Valu
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try truncScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
scalar.* = (try truncScalar(elem_val, scalar_ty, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3877,7 +3472,7 @@ pub fn mulAdd(
const mulend1_elem = try mulend1.elemValue(mod, i);
const mulend2_elem = try mulend2.elemValue(mod, i);
const addend_elem = try addend.elemValue(mod, i);
scalar.* = try (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, mod)).intern(scalar_ty, mod);
scalar.* = (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
@@ -3939,6 +3534,10 @@ pub fn isGenericPoison(val: Value) bool {
return val.toIntern() == .generic_poison;
}
 
pub fn typeOf(val: Value, zcu: *const Zcu) Type {
return Type.fromInterned(zcu.intern_pool.typeOf(val.toIntern()));
}
 
/// For an integer (comptime or fixed-width) `val`, returns the comptime-known bounds of the value.
/// If `val` is not undef, the bounds are both `val`.
/// If `val` is undef and has a fixed-width type, the bounds are the bounds of the type.
@@ -3953,98 +3552,26 @@ pub fn intValueBounds(val: Value, mod: *Module) !?[2]Value {
};
}
 
/// This type is not copyable since it may contain pointers to its inner data.
pub const Payload = struct {
tag: Tag,
 
pub const Slice = struct {
base: Payload,
data: struct {
ptr: Value,
len: Value,
},
};
 
pub const Bytes = struct {
base: Payload,
/// Includes the sentinel, if any.
data: []const u8,
};
 
pub const SubValue = struct {
base: Payload,
data: Value,
};
 
pub const Aggregate = struct {
base: Payload,
/// Field values. The types are according to the struct or array type.
/// The length is provided here so that copying a Value does not depend on the Type.
data: []Value,
};
 
pub const Union = struct {
pub const base_tag = Tag.@"union";
 
base: Payload = .{ .tag = base_tag },
data: Data,
 
pub const Data = struct {
tag: ?Value,
val: Value,
};
};
};
 
pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace;
 
pub const zero_usize: Value = .{ .ip_index = .zero_usize, .legacy = undefined };
pub const zero_u8: Value = .{ .ip_index = .zero_u8, .legacy = undefined };
pub const zero_comptime_int: Value = .{ .ip_index = .zero, .legacy = undefined };
pub const one_comptime_int: Value = .{ .ip_index = .one, .legacy = undefined };
pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one, .legacy = undefined };
pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined };
pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined };
pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined };
pub const @"false": Value = .{ .ip_index = .bool_false, .legacy = undefined };
pub const @"true": Value = .{ .ip_index = .bool_true, .legacy = undefined };
pub const @"unreachable": Value = .{ .ip_index = .unreachable_value, .legacy = undefined };
pub const zero_usize: Value = .{ .ip_index = .zero_usize };
pub const zero_u8: Value = .{ .ip_index = .zero_u8 };
pub const zero_comptime_int: Value = .{ .ip_index = .zero };
pub const one_comptime_int: Value = .{ .ip_index = .one };
pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one };
pub const undef: Value = .{ .ip_index = .undef };
pub const @"void": Value = .{ .ip_index = .void_value };
pub const @"null": Value = .{ .ip_index = .null_value };
pub const @"false": Value = .{ .ip_index = .bool_false };
pub const @"true": Value = .{ .ip_index = .bool_true };
pub const @"unreachable": Value = .{ .ip_index = .unreachable_value };
 
pub const generic_poison: Value = .{ .ip_index = .generic_poison, .legacy = undefined };
pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type, .legacy = undefined };
pub const empty_struct: Value = .{ .ip_index = .empty_struct, .legacy = undefined };
pub const generic_poison: Value = .{ .ip_index = .generic_poison };
pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type };
pub const empty_struct: Value = .{ .ip_index = .empty_struct };
 
pub fn makeBool(x: bool) Value {
return if (x) Value.true else Value.false;
}
 
pub const RuntimeIndex = InternPool.RuntimeIndex;
 
/// This function is used in the debugger pretty formatters in tools/ to fetch the
/// Tag to Payload mapping to facilitate fancy debug printing for this type.
fn dbHelper(self: *Value, tag_to_payload_map: *map: {
const tags = @typeInfo(Tag).Enum.fields;
var fields: [tags.len]std.builtin.Type.StructField = undefined;
for (&fields, tags) |*field, t| field.* = .{
.name = t.name ++ "",
.type = *@field(Tag, t.name).Type(),
.default_value = null,
.is_comptime = false,
.alignment = 0,
};
break :map @Type(.{ .Struct = .{
.layout = .@"extern",
.fields = &fields,
.decls = &.{},
.is_tuple = false,
} });
}) void {
_ = self;
_ = tag_to_payload_map;
}
 
comptime {
if (!builtin.strip_debug_info) {
_ = &dbHelper;
}
}
 
src/arch/aarch64/CodeGen.zig added: 2056, removed: 2746, total 0
@@ -10,7 +10,6 @@ const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Type = @import("../../type.zig").Type;
const Value = @import("../../Value.zig");
const TypedValue = @import("../../TypedValue.zig");
const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
const InternPool = @import("../../InternPool.zig");
@@ -342,7 +341,7 @@ pub fn generate(
const func = zcu.funcInfo(func_index);
const fn_owner_decl = zcu.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
const fn_type = fn_owner_decl.typeOf(zcu);
const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace);
const target = &namespace.file_scope.mod.resolved_target.result;
 
@@ -6143,10 +6142,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod))
return MCValue{ .none = {} };
 
const inst_index = inst.toIndex() orelse return self.genTypedValue(.{
.ty = inst_ty,
.val = (try self.air.value(inst, mod)).?,
});
const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, mod)).?);
 
return self.getResolvedInstValue(inst_index);
}
@@ -6163,11 +6159,11 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
 
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
const mcv: MCValue = switch (try codegen.genTypedValue(
self.bin_file,
self.src_loc,
arg_tv,
val,
self.owner_decl,
)) {
.mcv => |mcv| switch (mcv) {
 
src/arch/arm/CodeGen.zig added: 2056, removed: 2746, total 0
@@ -10,7 +10,6 @@ const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Type = @import("../../type.zig").Type;
const Value = @import("../../Value.zig");
const TypedValue = @import("../../TypedValue.zig");
const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
const InternPool = @import("../../InternPool.zig");
@@ -349,7 +348,7 @@ pub fn generate(
const func = zcu.funcInfo(func_index);
const fn_owner_decl = zcu.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
const fn_type = fn_owner_decl.typeOf(zcu);
const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace);
const target = &namespace.file_scope.mod.resolved_target.result;
 
@@ -6097,10 +6096,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod))
return MCValue{ .none = {} };
 
const inst_index = inst.toIndex() orelse return self.genTypedValue(.{
.ty = inst_ty,
.val = (try self.air.value(inst, mod)).?,
});
const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, mod)).?);
 
return self.getResolvedInstValue(inst_index);
}
@@ -6117,12 +6113,12 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
 
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
const mod = self.bin_file.comp.module.?;
const mcv: MCValue = switch (try codegen.genTypedValue(
self.bin_file,
self.src_loc,
arg_tv,
val,
mod.funcOwnerDeclIndex(self.func_index),
)) {
.mcv => |mcv| switch (mcv) {
 
src/arch/riscv64/CodeGen.zig added: 2056, removed: 2746, total 0
@@ -9,7 +9,6 @@ const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Type = @import("../../type.zig").Type;
const Value = @import("../../Value.zig");
const TypedValue = @import("../../TypedValue.zig");
const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
const InternPool = @import("../../InternPool.zig");
@@ -230,7 +229,7 @@ pub fn generate(
const func = zcu.funcInfo(func_index);
const fn_owner_decl = zcu.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
const fn_type = fn_owner_decl.typeOf(zcu);
const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace);
const target = &namespace.file_scope.mod.resolved_target.result;
 
@@ -2552,10 +2551,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
if (!inst_ty.hasRuntimeBits(mod))
return MCValue{ .none = {} };
 
const inst_index = inst.toIndex() orelse return self.genTypedValue(.{
.ty = inst_ty,
.val = (try self.air.value(inst, mod)).?,
});
const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, mod)).?);
 
return self.getResolvedInstValue(inst_index);
}
@@ -2572,12 +2568,12 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
 
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
const mod = self.bin_file.comp.module.?;
const mcv: MCValue = switch (try codegen.genTypedValue(
self.bin_file,
self.src_loc,
typed_value,
val,
mod.funcOwnerDeclIndex(self.func_index),
)) {
.mcv => |mcv| switch (mcv) {
 
src/arch/sparc64/CodeGen.zig added: 2056, removed: 2746, total 0
@@ -12,7 +12,7 @@ const builtin = @import("builtin");
const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
const InternPool = @import("../../InternPool.zig");
const TypedValue = @import("../../TypedValue.zig");
const Value = @import("../../Value.zig");
const ErrorMsg = Module.ErrorMsg;
const codegen = @import("../../codegen.zig");
const Air = @import("../../Air.zig");
@@ -273,7 +273,7 @@ pub fn generate(
const func = zcu.funcInfo(func_index);
const fn_owner_decl = zcu.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
const fn_type = fn_owner_decl.typeOf(zcu);
const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace);
const target = &namespace.file_scope.mod.resolved_target.result;
 
@@ -4118,12 +4118,12 @@ fn genStoreASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Re
}
}
 
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
const mod = self.bin_file.comp.module.?;
const mcv: MCValue = switch (try codegen.genTypedValue(
self.bin_file,
self.src_loc,
typed_value,
val,
mod.funcOwnerDeclIndex(self.func_index),
)) {
.mcv => |mcv| switch (mcv) {
@@ -4546,10 +4546,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
return self.getResolvedInstValue(inst);
}
 
return self.genTypedValue(.{
.ty = ty,
.val = (try self.air.value(ref, mod)).?,
});
return self.genTypedValue((try self.air.value(ref, mod)).?);
}
 
fn ret(self: *Self, mcv: MCValue) !void {
 
src/arch/wasm/CodeGen.zig added: 2056, removed: 2746, total 0
@@ -18,7 +18,6 @@ const Value = @import("../../Value.zig");
const Compilation = @import("../../Compilation.zig");
const LazySrcLoc = std.zig.LazySrcLoc;
const link = @import("../../link.zig");
const TypedValue = @import("../../TypedValue.zig");
const Air = @import("../../Air.zig");
const Liveness = @import("../../Liveness.zig");
const target_util = @import("../../target.zig");
@@ -805,7 +804,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
// In the other cases, we will simply lower the constant to a value that fits
// into a single local (such as a pointer, integer, bool, etc).
const result = if (isByRef(ty, mod)) blk: {
const sym_index = try func.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, func.decl_index);
const sym_index = try func.bin_file.lowerUnnamedConst(val, func.decl_index);
break :blk WValue{ .memory = sym_index };
} else try func.lowerConstant(val, ty);
 
@@ -1243,12 +1242,12 @@ pub fn generate(
fn genFunc(func: *CodeGen) InnerError!void {
const mod = func.bin_file.base.comp.module.?;
const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(func.decl.ty).?;
const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), mod);
defer func_type.deinit(func.gpa);
_ = try func.bin_file.storeDeclType(func.decl_index, func_type);
 
var cc_result = try func.resolveCallingConventionValues(func.decl.ty);
var cc_result = try func.resolveCallingConventionValues(func.decl.typeOf(mod));
defer cc_result.deinit(func.gpa);
 
func.args = cc_result.args;
@@ -2087,7 +2086,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.comp.module.?;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
const fn_info = mod.typeToFunc(func.decl.ty).?;
const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
const ret_ty = Type.fromInterned(fn_info.return_type);
 
// result must be stored in the stack and we return a pointer
@@ -2135,7 +2134,7 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try func.allocStack(Type.usize); // create pointer to void
}
 
const fn_info = mod.typeToFunc(func.decl.ty).?;
const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod)) {
break :result func.return_value;
}
@@ -2152,7 +2151,7 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(un_op);
const ret_ty = func.typeOf(un_op).childType(mod);
 
const fn_info = mod.typeToFunc(func.decl.ty).?;
const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (ret_ty.isError(mod)) {
try func.addImm32(0);
@@ -2193,7 +2192,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
break :blk function.owner_decl;
} else if (func_val.getExternFunc(mod)) |extern_func| {
const ext_decl = mod.declPtr(extern_func.decl);
const ext_info = mod.typeToFunc(ext_decl.ty).?;
const ext_info = mod.typeToFunc(ext_decl.typeOf(mod)).?;
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), Type.fromInterned(ext_info.return_type), mod);
defer func_type.deinit(func.gpa);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl);
@@ -2216,7 +2215,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
},
else => {},
}
return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
return func.fail("Expected a function, but instead found '{s}'", .{@tagName(ip.indexToKey(func_val.toIntern()))});
};
 
const sret = if (first_param_sret) blk: {
@@ -2530,7 +2529,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.comp.module.?;
const arg_index = func.arg_index;
const arg = func.args[arg_index];
const cc = mod.typeToFunc(func.decl.ty).?.cc;
const cc = mod.typeToFunc(func.decl.typeOf(mod)).?.cc;
const arg_ty = func.typeOfIndex(inst);
if (cc == .C) {
const arg_classes = abi.classifyType(arg_ty, mod);
@@ -3119,11 +3118,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
}
 
fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue {
const mod = func.bin_file.base.comp.module.?;
const decl = mod.declPtr(decl_index);
try mod.markDeclAlive(decl);
const ptr_ty = try mod.singleMutPtrType(decl.ty);
return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index, offset);
return func.lowerDeclRefValue(ptr_val, decl_index, offset);
}
 
fn lowerAnonDeclRef(
@@ -3158,7 +3153,7 @@ fn lowerAnonDeclRef(
} else return WValue{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } };
}
 
fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue {
fn lowerDeclRefValue(func: *CodeGen, val: Value, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue {
const mod = func.bin_file.base.comp.module.?;
 
const decl = mod.declPtr(decl_index);
@@ -3166,23 +3161,23 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: InternPool.Decl
// want to lower the actual decl, rather than the alias itself.
if (decl.val.getFunction(mod)) |func_val| {
if (func_val.owner_decl != decl_index) {
return func.lowerDeclRefValue(tv, func_val.owner_decl, offset);
return func.lowerDeclRefValue(val, func_val.owner_decl, offset);
}
} else if (decl.val.getExternFunc(mod)) |func_val| {
if (func_val.decl != decl_index) {
return func.lowerDeclRefValue(tv, func_val.decl, offset);
return func.lowerDeclRefValue(val, func_val.decl, offset);
}
}
if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) {
const decl_ty = decl.typeOf(mod);
if (decl_ty.zigTypeTag(mod) != .Fn and !decl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return WValue{ .imm32 = 0xaaaaaaaa };
}
 
try mod.markDeclAlive(decl);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index);
const atom = func.bin_file.getAtom(atom_index);
 
const target_sym_index = @intFromEnum(atom.sym_index);
if (decl.ty.zigTypeTag(mod) == .Fn) {
if (decl_ty.zigTypeTag(mod) == .Fn) {
return WValue{ .function_index = target_sym_index };
} else if (offset == 0) {
return WValue{ .memory = target_sym_index };
@@ -3281,23 +3276,23 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
},
.error_union => |error_union| {
const err_int_ty = try mod.errorIntType();
const err_tv: TypedValue = switch (error_union.val) {
const err_ty, const err_val = switch (error_union.val) {
.err_name => |err_name| .{
.ty = ty.errorUnionSet(mod),
.val = Value.fromInterned((try mod.intern(.{ .err = .{
ty.errorUnionSet(mod),
Value.fromInterned((try mod.intern(.{ .err = .{
.ty = ty.errorUnionSet(mod).toIntern(),
.name = err_name,
} }))),
},
.payload => .{
.ty = err_int_ty,
.val = try mod.intValue(err_int_ty, 0),
err_int_ty,
try mod.intValue(err_int_ty, 0),
},
};
const payload_type = ty.errorUnionPayload(mod);
if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
// We use the error type directly as the type.
return func.lowerConstant(err_tv.val, err_tv.ty);
return func.lowerConstant(err_val, err_ty);
}
 
return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{});
@@ -3321,10 +3316,10 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
.elem, .field => |base_index| ptr = ip.indexToKey(base_index.base).ptr,
.comptime_field, .comptime_alloc => unreachable,
};
return .{ .memory = try func.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, owner_decl) };
return .{ .memory = try func.bin_file.lowerUnnamedConst(val, owner_decl) };
},
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl, 0),
.decl => |decl| return func.lowerDeclRefValue(val, decl, 0),
.int => |int| return func.lowerConstant(Value.fromInterned(int), Type.fromInterned(ip.typeOf(int))),
.opt_payload, .elem, .field => return func.lowerParentPtr(val, 0),
.anon_decl => |ad| return func.lowerAnonDeclRef(ad, 0),
@@ -7286,7 +7281,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
.storage = .{ .bytes = tag_name },
} });
const tag_sym_index = try func.bin_file.lowerUnnamedConst(
.{ .ty = name_ty, .val = Value.fromInterned(name_val) },
Value.fromInterned(name_val),
enum_decl_index,
);
 
 
src/arch/x86_64/CodeGen.zig added: 2056, removed: 2746, total 0
@@ -32,7 +32,6 @@ const InternPool = @import("../../InternPool.zig");
const Alignment = InternPool.Alignment;
const Target = std.Target;
const Type = @import("../../type.zig").Type;
const TypedValue = @import("../../TypedValue.zig");
const Value = @import("../../Value.zig");
const Instruction = @import("encoder.zig").Instruction;
 
@@ -808,7 +807,7 @@ pub fn generate(
const func = zcu.funcInfo(func_index);
const fn_owner_decl = zcu.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
const fn_type = fn_owner_decl.typeOf(zcu);
const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace);
const mod = namespace.file_scope.mod;
 
@@ -2250,7 +2249,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, tag_index| {
const tag_name_len = ip.stringToSlice(tag_names.get(ip)[tag_index]).len;
const tag_val = try mod.enumValueFieldIndex(enum_ty, @intCast(tag_index));
const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val });
const tag_mcv = try self.genTypedValue(tag_val);
try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv);
const skip_reloc = try self.asmJccReloc(.ne, undefined);
 
@@ -3323,7 +3322,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
.storage = .{ .repeated_elem = mask_val.ip_index },
} });
 
const splat_mcv = try self.genTypedValue(.{ .ty = splat_ty, .val = Value.fromInterned(splat_val) });
const splat_mcv = try self.genTypedValue(Value.fromInterned(splat_val));
const splat_addr_mcv: MCValue = switch (splat_mcv) {
.memory, .indirect, .load_frame => splat_mcv.address(),
else => .{ .register = try self.copyToTmpRegister(Type.usize, splat_mcv.address()) },
@@ -4992,17 +4991,14 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
defer self.register_manager.unlockReg(shift_lock);
 
const mask_ty = try mod.vectorType(.{ .len = 16, .child = .u8_type });
const mask_mcv = try self.genTypedValue(.{
.ty = mask_ty,
.val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = &([1]InternPool.Index{
(try rhs_ty.childType(mod).maxIntScalar(mod, Type.u8)).toIntern(),
} ++ [1]InternPool.Index{
(try mod.intValue(Type.u8, 0)).toIntern(),
} ** 15) },
} }))),
});
const mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = &([1]InternPool.Index{
(try rhs_ty.childType(mod).maxIntScalar(mod, Type.u8)).toIntern(),
} ++ [1]InternPool.Index{
(try mod.intValue(Type.u8, 0)).toIntern(),
} ** 15) },
} })));
const mask_addr_reg =
try self.copyToTmpRegister(Type.usize, mask_mcv.address());
const mask_addr_lock = self.register_manager.lockRegAssumeUnused(mask_addr_reg);
@@ -6860,11 +6856,11 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
.child = (try mod.intType(.signed, scalar_bits)).ip_index,
});
 
const sign_mcv = try self.genTypedValue(.{ .ty = vec_ty, .val = switch (tag) {
const sign_mcv = try self.genTypedValue(switch (tag) {
.neg => try vec_ty.minInt(mod, vec_ty),
.abs => try vec_ty.maxInt(mod, vec_ty),
else => unreachable,
} });
});
const sign_mem: Memory = if (sign_mcv.isMemory())
try sign_mcv.mem(self, Memory.Size.fromSize(abi_size))
else
@@ -11130,10 +11126,7 @@ fn genBinOp(
.cmp_neq,
=> {
const unsigned_ty = try lhs_ty.toUnsigned(mod);
const not_mcv = try self.genTypedValue(.{
.ty = lhs_ty,
.val = try unsigned_ty.maxInt(mod, unsigned_ty),
});
const not_mcv = try self.genTypedValue(try unsigned_ty.maxInt(mod, unsigned_ty));
const not_mem: Memory = if (not_mcv.isMemory())
try not_mcv.mem(self, Memory.Size.fromSize(abi_size))
else
@@ -12258,12 +12251,11 @@ fn genCall(self: *Self, info: union(enum) {
switch (switch (func_key) {
else => func_key,
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| mod.intern_pool.indexToKey(try mod.declPtr(decl).internValue(mod)),
.decl => |decl| mod.intern_pool.indexToKey(mod.declPtr(decl).val.toIntern()),
else => func_key,
},
}) {
.func => |func| {
try mod.markDeclAlive(mod.declPtr(func.owner_decl));
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl);
const sym = elf_file.symbol(sym_index);
@@ -12323,7 +12315,6 @@ fn genCall(self: *Self, info: union(enum) {
},
.extern_func => |extern_func| {
const owner_decl = mod.declPtr(extern_func.decl);
try mod.markDeclAlive(owner_decl);
const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name);
const decl_name = mod.intern_pool.stringToSlice(owner_decl.name);
try self.genExternSymbolRef(.call, lib_name, decl_name);
@@ -14694,10 +14685,7 @@ fn genSetReg(
),
else => unreachable,
},
.segment, .x87, .mmx, .sse => try self.genSetReg(dst_reg, ty, try self.genTypedValue(.{
.ty = ty,
.val = try mod.undefValue(ty),
}), opts),
.segment, .x87, .mmx, .sse => try self.genSetReg(dst_reg, ty, try self.genTypedValue(try mod.undefValue(ty)), opts),
},
.eflags => |cc| try self.asmSetccRegister(cc, dst_reg.to8()),
.immediate => |imm| {
@@ -16895,13 +16883,10 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
.ty = mask_elem_ty.toIntern(),
.storage = .{ .u64 = bit / elem_bits },
} });
const mask_mcv = try self.genTypedValue(.{
.ty = mask_ty,
.val = Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = mask_elems[0..vec_len] },
} })),
});
const mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = mask_elems[0..vec_len] },
} })));
const mask_mem: Memory = .{
.base = .{ .reg = try self.copyToTmpRegister(Type.usize, mask_mcv.address()) },
.mod = .{ .rm = .{ .size = self.memSize(ty) } },
@@ -16923,13 +16908,10 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
.ty = mask_elem_ty.toIntern(),
.storage = .{ .u64 = @as(u32, 1) << @intCast(bit & (elem_bits - 1)) },
} });
const mask_mcv = try self.genTypedValue(.{
.ty = mask_ty,
.val = Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = mask_elems[0..vec_len] },
} })),
});
const mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = mask_elems[0..vec_len] },
} })));
const mask_mem: Memory = .{
.base = .{ .reg = try self.copyToTmpRegister(Type.usize, mask_mcv.address()) },
.mod = .{ .rm = .{ .size = self.memSize(ty) } },
@@ -17660,13 +17642,10 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
else
try select_mask_elem_ty.minIntScalar(mod, select_mask_elem_ty)).toIntern();
}
const select_mask_mcv = try self.genTypedValue(.{
.ty = select_mask_ty,
.val = Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = select_mask_ty.toIntern(),
.storage = .{ .elems = select_mask_elems[0..mask_elems.len] },
} })),
});
const select_mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = select_mask_ty.toIntern(),
.storage = .{ .elems = select_mask_elems[0..mask_elems.len] },
} })));
 
if (self.hasFeature(.sse4_1)) {
const mir_tag: Mir.Inst.FixedTag = .{
@@ -17811,13 +17790,10 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
} });
}
const lhs_mask_ty = try mod.vectorType(.{ .len = max_abi_size, .child = .u8_type });
const lhs_mask_mcv = try self.genTypedValue(.{
.ty = lhs_mask_ty,
.val = Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = lhs_mask_ty.toIntern(),
.storage = .{ .elems = lhs_mask_elems[0..max_abi_size] },
} })),
});
const lhs_mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = lhs_mask_ty.toIntern(),
.storage = .{ .elems = lhs_mask_elems[0..max_abi_size] },
} })));
const lhs_mask_mem: Memory = .{
.base = .{ .reg = try self.copyToTmpRegister(Type.usize, lhs_mask_mcv.address()) },
.mod = .{ .rm = .{ .size = Memory.Size.fromSize(@max(max_abi_size, 16)) } },
@@ -17848,13 +17824,10 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
} });
}
const rhs_mask_ty = try mod.vectorType(.{ .len = max_abi_size, .child = .u8_type });
const rhs_mask_mcv = try self.genTypedValue(.{
.ty = rhs_mask_ty,
.val = Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = rhs_mask_ty.toIntern(),
.storage = .{ .elems = rhs_mask_elems[0..max_abi_size] },
} })),
});
const rhs_mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = rhs_mask_ty.toIntern(),
.storage = .{ .elems = rhs_mask_elems[0..max_abi_size] },
} })));
const rhs_mask_mem: Memory = .{
.base = .{ .reg = try self.copyToTmpRegister(Type.usize, rhs_mask_mcv.address()) },
.mod = .{ .rm = .{ .size = Memory.Size.fromSize(@max(max_abi_size, 16)) } },
@@ -17903,11 +17876,8 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
 
break :result null;
}) orelse return self.fail("TODO implement airShuffle from {} and {} to {} with {}", .{
lhs_ty.fmt(mod), rhs_ty.fmt(mod), dst_ty.fmt(mod),
Value.fromInterned(extra.mask).fmtValue(
Type.fromInterned(mod.intern_pool.typeOf(extra.mask)),
mod,
),
lhs_ty.fmt(mod), rhs_ty.fmt(mod), dst_ty.fmt(mod),
Value.fromInterned(extra.mask).fmtValue(mod),
});
return self.finishAir(inst, result, .{ extra.a, extra.b, .none });
}
@@ -18140,7 +18110,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
.{ .frame = frame_index },
@intCast(elem_size * elements.len),
elem_ty,
try self.genTypedValue(.{ .ty = elem_ty, .val = sentinel }),
try self.genTypedValue(sentinel),
.{},
);
break :result .{ .load_frame = .{ .index = frame_index } };
@@ -18664,7 +18634,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
const ip_index = ref.toInterned().?;
const gop = try self.const_tracking.getOrPut(self.gpa, ip_index);
if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(init: {
const const_mcv = try self.genTypedValue(.{ .ty = ty, .val = Value.fromInterned(ip_index) });
const const_mcv = try self.genTypedValue(Value.fromInterned(ip_index));
switch (const_mcv) {
.lea_tlv => |tlv_sym| switch (self.bin_file.tag) {
.elf, .macho => {
@@ -18729,9 +18699,9 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
return mcv;
}
 
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
const mod = self.bin_file.comp.module.?;
return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getDecl(mod))) {
return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, val, self.owner.getDecl(mod))) {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
 
src/codegen.zig added: 2056, removed: 2746, total 0
@@ -19,7 +19,6 @@ const Liveness = @import("Liveness.zig");
const Module = @import("Module.zig");
const Target = std.Target;
const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
const Value = @import("Value.zig");
const Zir = std.zig.Zir;
const Alignment = InternPool.Alignment;
@@ -171,7 +170,7 @@ pub fn generateLazySymbol(
pub fn generateSymbol(
bin_file: *link.File,
src_loc: Module.SrcLoc,
arg_tv: TypedValue,
val: Value,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
@@ -181,23 +180,20 @@ pub fn generateSymbol(
 
const mod = bin_file.comp.module.?;
const ip = &mod.intern_pool;
const typed_value = arg_tv;
const ty = val.typeOf(mod);
 
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
 
log.debug("generateSymbol: ty = {}, val = {}", .{
typed_value.ty.fmt(mod),
typed_value.val.fmtValue(typed_value.ty, mod),
});
log.debug("generateSymbol: val = {}", .{val.fmtValue(mod)});
 
if (typed_value.val.isUndefDeep(mod)) {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
if (val.isUndefDeep(mod)) {
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
try code.appendNTimes(0xaa, abi_size);
return .ok;
}
 
switch (ip.indexToKey(typed_value.val.toIntern())) {
switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.array_type,
@@ -238,17 +234,17 @@ pub fn generateSymbol(
.empty_enum_value,
=> unreachable, // non-runtime values
.int => {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
var space: Value.BigIntSpace = undefined;
const val = typed_value.val.toBigInt(&space, mod);
val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian);
const int_val = val.toBigInt(&space, mod);
int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian);
},
.err => |err| {
const int = try mod.getErrorValue(err.name);
try code.writer().writeInt(u16, @as(u16, @intCast(int)), endian);
},
.error_union => |error_union| {
const payload_ty = typed_value.ty.errorUnionPayload(mod);
const payload_ty = ty.errorUnionPayload(mod);
const err_val = switch (error_union.val) {
.err_name => |err_name| @as(u16, @intCast(try mod.getErrorValue(err_name))),
.payload => @as(u16, 0),
@@ -261,7 +257,7 @@ pub fn generateSymbol(
 
const payload_align = payload_ty.abiAlignment(mod);
const error_align = Type.anyerror.abiAlignment(mod);
const abi_align = typed_value.ty.abiAlignment(mod);
const abi_align = ty.abiAlignment(mod);
 
// error value first when its type is larger than the error union's payload
if (error_align.order(payload_align) == .gt) {
@@ -271,13 +267,10 @@ pub fn generateSymbol(
// emit payload part of the error union
{
const begin = code.items.len;
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_ty,
.val = Value.fromInterned(switch (error_union.val) {
.err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }),
.payload => |payload| payload,
}),
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (error_union.val) {
.err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }),
.payload => |payload| payload,
}), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
@@ -304,11 +297,8 @@ pub fn generateSymbol(
}
},
.enum_tag => |enum_tag| {
const int_tag_ty = typed_value.ty.intTagType(mod);
switch (try generateSymbol(bin_file, src_loc, .{
.ty = int_tag_ty,
.val = try mod.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty),
}, code, debug_output, reloc_info)) {
const int_tag_ty = ty.intTagType(mod);
switch (try generateSymbol(bin_file, src_loc, try mod.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
@@ -319,42 +309,33 @@ pub fn generateSymbol(
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)),
.f80 => |f80_val| {
writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10));
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
try code.appendNTimes(0, abi_size - 10);
},
.f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)),
},
.ptr => switch (try lowerParentPtr(bin_file, src_loc, typed_value.val.toIntern(), code, debug_output, reloc_info)) {
.ptr => switch (try lowerParentPtr(bin_file, src_loc, val.toIntern(), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
},
.slice => |slice| {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = typed_value.ty.slicePtrFieldType(mod),
.val = Value.fromInterned(slice.ptr),
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(slice.ptr), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.usize,
.val = Value.fromInterned(slice.len),
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(slice.len), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
},
.opt => {
const payload_type = typed_value.ty.optionalChild(mod);
const payload_val = typed_value.val.optionalValue(mod);
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
const payload_type = ty.optionalChild(mod);
const payload_val = val.optionalValue(mod);
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
 
if (typed_value.ty.optionalReprIsPayload(mod)) {
if (ty.optionalReprIsPayload(mod)) {
if (payload_val) |value| {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_type,
.val = value,
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, value, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -365,10 +346,7 @@ pub fn generateSymbol(
const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1;
if (payload_type.hasRuntimeBits(mod)) {
const value = payload_val orelse Value.fromInterned((try mod.intern(.{ .undef = payload_type.toIntern() })));
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_type,
.val = value,
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, value, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -377,7 +355,7 @@ pub fn generateSymbol(
try code.appendNTimes(0, padding);
}
},
.aggregate => |aggregate| switch (ip.indexToKey(typed_value.ty.toIntern())) {
.aggregate => |aggregate| switch (ip.indexToKey(ty.toIntern())) {
.array_type => |array_type| switch (aggregate.storage) {
.bytes => |bytes| try code.appendSlice(bytes),
.elems, .repeated_elem => {
@@ -385,17 +363,14 @@ pub fn generateSymbol(
const len_including_sentinel =
array_type.len + @intFromBool(array_type.sentinel != .none);
while (index < len_including_sentinel) : (index += 1) {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(array_type.child),
.val = Value.fromInterned(switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[@as(usize, @intCast(index))],
.repeated_elem => |elem| if (index < array_type.len)
elem
else
array_type.sentinel,
}),
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[@as(usize, @intCast(index))],
.repeated_elem => |elem| if (index < array_type.len)
elem
else
array_type.sentinel,
}), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
@@ -403,7 +378,7 @@ pub fn generateSymbol(
},
},
.vector_type => |vector_type| {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse
return error.Overflow;
if (vector_type.child == .bool_type) {
const bytes = try code.addManyAsSlice(abi_size);
@@ -449,16 +424,13 @@ pub fn generateSymbol(
.elems, .repeated_elem => {
var index: u64 = 0;
while (index < vector_type.len) : (index += 1) {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(vector_type.child),
.val = Value.fromInterned(switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[
math.cast(usize, index) orelse return error.Overflow
],
.repeated_elem => |elem| elem,
}),
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[
math.cast(usize, index) orelse return error.Overflow
],
.repeated_elem => |elem| elem,
}), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
@@ -491,17 +463,14 @@ pub fn generateSymbol(
.repeated_elem => |elem| elem,
};
 
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(field_ty),
.val = Value.fromInterned(field_val),
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
const unpadded_field_end = code.items.len - struct_begin;
 
// Pad struct members if required
const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod);
const padded_field_end = ty.structFieldOffset(index + 1, mod);
const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse
return error.Overflow;
 
@@ -511,10 +480,10 @@ pub fn generateSymbol(
}
},
.struct_type => {
const struct_type = ip.loadStructType(typed_value.ty.toIntern());
const struct_type = ip.loadStructType(ty.toIntern());
switch (struct_type.layout) {
.@"packed" => {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse
return error.Overflow;
const current_pos = code.items.len;
try code.resize(current_pos + abi_size);
@@ -537,10 +506,7 @@ pub fn generateSymbol(
return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit();
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(field_ty),
.val = Value.fromInterned(field_val),
}, &tmp_list, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), &tmp_list, debug_output, reloc_info)) {
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
.fail => |em| return Result{ .fail = em },
}
@@ -560,7 +526,7 @@ pub fn generateSymbol(
const field_ty = field_types[field_index];
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
 
const field_val = switch (ip.indexToKey(typed_value.val.toIntern()).aggregate.storage) {
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[field_index] },
@@ -575,10 +541,7 @@ pub fn generateSymbol(
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
 
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(field_ty),
.val = Value.fromInterned(field_val),
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -599,37 +562,28 @@ pub fn generateSymbol(
else => unreachable,
},
.un => |un| {
const layout = typed_value.ty.unionGetLayout(mod);
const layout = ty.unionGetLayout(mod);
 
if (layout.payload_size == 0) {
return generateSymbol(bin_file, src_loc, .{
.ty = typed_value.ty.unionTagTypeSafety(mod).?,
.val = Value.fromInterned(un.tag),
}, code, debug_output, reloc_info);
return generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info);
}
 
// Check if we should store the tag first.
if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = typed_value.ty.unionTagTypeSafety(mod).?,
.val = Value.fromInterned(un.tag),
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
}
 
const union_obj = mod.typeToUnion(typed_value.ty).?;
const union_obj = mod.typeToUnion(ty).?;
if (un.tag != .none) {
const field_index = typed_value.ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBits(mod)) {
try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
} else {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
.val = Value.fromInterned(un.val),
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -640,20 +594,14 @@ pub fn generateSymbol(
}
}
} else {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(ip.typeOf(un.val)),
.val = Value.fromInterned(un.val),
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
}
 
if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(union_obj.enum_tag_ty),
.val = Value.fromInterned(un.tag),
}, code, debug_output, reloc_info)) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -681,10 +629,7 @@ fn lowerParentPtr(
return switch (ptr.addr) {
.decl => |decl| try lowerDeclRef(bin_file, src_loc, decl, code, debug_output, reloc_info),
.anon_decl => |ad| try lowerAnonDeclRef(bin_file, src_loc, ad, code, debug_output, reloc_info),
.int => |int| try generateSymbol(bin_file, src_loc, .{
.ty = Type.usize,
.val = Value.fromInterned(int),
}, code, debug_output, reloc_info),
.int => |int| try generateSymbol(bin_file, src_loc, Value.fromInterned(int), code, debug_output, reloc_info),
.eu_payload => |eu_payload| try lowerParentPtr(
bin_file,
src_loc,
@@ -829,14 +774,12 @@ fn lowerDeclRef(
const target = namespace.file_scope.mod.resolved_target.result;
 
const ptr_width = target.ptrBitWidth();
const is_fn_body = decl.ty.zigTypeTag(zcu) == .Fn;
if (!is_fn_body and !decl.ty.hasRuntimeBits(zcu)) {
const is_fn_body = decl.typeOf(zcu).zigTypeTag(zcu) == .Fn;
if (!is_fn_body and !decl.typeOf(zcu).hasRuntimeBits(zcu)) {
try code.appendNTimes(0xaa, @divExact(ptr_width, 8));
return Result.ok;
}
 
try zcu.markDeclAlive(decl);
 
const vaddr = try lf.getDeclVAddr(decl_index, .{
.parent_atom_index = reloc_info.parent_atom_index,
.offset = code.items.len,
@@ -912,11 +855,12 @@ pub const GenResult = union(enum) {
fn genDeclRef(
lf: *link.File,
src_loc: Module.SrcLoc,
tv: TypedValue,
val: Value,
ptr_decl_index: InternPool.DeclIndex,
) CodeGenError!GenResult {
const zcu = lf.comp.module.?;
log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(zcu), tv.val.fmtValue(tv.ty, zcu) });
const ty = val.typeOf(zcu);
log.debug("genDeclRef: val = {}", .{val.fmtValue(zcu)});
 
const ptr_decl = zcu.declPtr(ptr_decl_index);
const namespace = zcu.namespacePtr(ptr_decl.src_namespace);
@@ -925,14 +869,14 @@ fn genDeclRef(
const ptr_bits = target.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
 
const decl_index = switch (zcu.intern_pool.indexToKey(try ptr_decl.internValue(zcu))) {
const decl_index = switch (zcu.intern_pool.indexToKey(ptr_decl.val.toIntern())) {
.func => |func| func.owner_decl,
.extern_func => |extern_func| extern_func.decl,
else => ptr_decl_index,
};
const decl = zcu.declPtr(decl_index);
 
if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
if (!decl.typeOf(zcu).isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
const imm: u64 = switch (ptr_bytes) {
1 => 0xaa,
2 => 0xaaaa,
@@ -947,22 +891,20 @@ fn genDeclRef(
const gpa = comp.gpa;
 
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (tv.ty.castPtrToFn(zcu)) |fn_ty| {
if (ty.castPtrToFn(zcu)) |fn_ty| {
if (zcu.typeToFunc(fn_ty).?.is_generic) {
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(zcu).toByteUnitsOptional().? });
}
} else if (tv.ty.zigTypeTag(zcu) == .Pointer) {
const elem_ty = tv.ty.elemType2(zcu);
} else if (ty.zigTypeTag(zcu) == .Pointer) {
const elem_ty = ty.elemType2(zcu);
if (!elem_ty.hasRuntimeBits(zcu)) {
return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(zcu).toByteUnitsOptional().? });
}
}
 
try zcu.markDeclAlive(decl);
 
const decl_namespace = zcu.namespacePtr(decl.src_namespace);
const single_threaded = decl_namespace.file_scope.mod.single_threaded;
const is_threadlocal = tv.val.isPtrToThreadLocal(zcu) and !single_threaded;
const is_threadlocal = val.isPtrToThreadLocal(zcu) and !single_threaded;
const is_extern = decl.isExtern(zcu);
 
if (lf.cast(link.File.Elf)) |elf_file| {
@@ -1027,14 +969,14 @@ fn genDeclRef(
fn genUnnamedConst(
lf: *link.File,
src_loc: Module.SrcLoc,
tv: TypedValue,
val: Value,
owner_decl_index: InternPool.DeclIndex,
) CodeGenError!GenResult {
const zcu = lf.comp.module.?;
const gpa = lf.comp.gpa;
log.debug("genUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmt(zcu), tv.val.fmtValue(tv.ty, zcu) });
log.debug("genUnnamedConst: val = {}", .{val.fmtValue(zcu)});
 
const local_sym_index = lf.lowerUnnamedConst(tv, owner_decl_index) catch |err| {
const local_sym_index = lf.lowerUnnamedConst(val, owner_decl_index) catch |err| {
return GenResult.fail(gpa, src_loc, "lowering unnamed constant failed: {s}", .{@errorName(err)});
};
switch (lf.tag) {
@@ -1066,18 +1008,15 @@ fn genUnnamedConst(
pub fn genTypedValue(
lf: *link.File,
src_loc: Module.SrcLoc,
arg_tv: TypedValue,
val: Value,
owner_decl_index: InternPool.DeclIndex,
) CodeGenError!GenResult {
const zcu = lf.comp.module.?;
const typed_value = arg_tv;
const ty = val.typeOf(zcu);
 
log.debug("genTypedValue: ty = {}, val = {}", .{
typed_value.ty.fmt(zcu),
typed_value.val.fmtValue(typed_value.ty, zcu),
});
log.debug("genTypedValue: val = {}", .{val.fmtValue(zcu)});
 
if (typed_value.val.isUndef(zcu))
if (val.isUndef(zcu))
return GenResult.mcv(.undef);
 
const owner_decl = zcu.declPtr(owner_decl_index);
@@ -1085,85 +1024,92 @@ pub fn genTypedValue(
const target = namespace.file_scope.mod.resolved_target.result;
const ptr_bits = target.ptrBitWidth();
 
if (!typed_value.ty.isSlice(zcu)) switch (zcu.intern_pool.indexToKey(typed_value.val.toIntern())) {
if (!ty.isSlice(zcu)) switch (zcu.intern_pool.indexToKey(val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| return genDeclRef(lf, src_loc, typed_value, decl),
.decl => |decl| return genDeclRef(lf, src_loc, val, decl),
else => {},
},
else => {},
};
 
switch (typed_value.ty.zigTypeTag(zcu)) {
switch (ty.zigTypeTag(zcu)) {
.Void => return GenResult.mcv(.none),
.Pointer => switch (typed_value.ty.ptrSize(zcu)) {
.Pointer => switch (ty.ptrSize(zcu)) {
.Slice => {},
else => switch (typed_value.val.toIntern()) {
else => switch (val.toIntern()) {
.null_value => {
return GenResult.mcv(.{ .immediate = 0 });
},
.none => {},
else => switch (zcu.intern_pool.indexToKey(typed_value.val.toIntern())) {
else => switch (zcu.intern_pool.indexToKey(val.toIntern())) {
.int => {
return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(zcu) });
return GenResult.mcv(.{ .immediate = val.toUnsignedInt(zcu) });
},
else => {},
},
},
},
.Int => {
const info = typed_value.ty.intInfo(zcu);
const info = ty.intInfo(zcu);
if (info.bits <= ptr_bits) {
const unsigned = switch (info.signedness) {
.signed => @as(u64, @bitCast(typed_value.val.toSignedInt(zcu))),
.unsigned => typed_value.val.toUnsignedInt(zcu),
.signed => @as(u64, @bitCast(val.toSignedInt(zcu))),
.unsigned => val.toUnsignedInt(zcu),
};
return GenResult.mcv(.{ .immediate = unsigned });
}
},
.Bool => {
return GenResult.mcv(.{ .immediate = @intFromBool(typed_value.val.toBool()) });
return GenResult.mcv(.{ .immediate = @intFromBool(val.toBool()) });
},
.Optional => {
if (typed_value.ty.isPtrLikeOptional(zcu)) {
return genTypedValue(lf, src_loc, .{
.ty = typed_value.ty.optionalChild(zcu),
.val = typed_value.val.optionalValue(zcu) orelse return GenResult.mcv(.{ .immediate = 0 }),
}, owner_decl_index);
} else if (typed_value.ty.abiSize(zcu) == 1) {
return GenResult.mcv(.{ .immediate = @intFromBool(!typed_value.val.isNull(zcu)) });
if (ty.isPtrLikeOptional(zcu)) {
return genTypedValue(
lf,
src_loc,
val.optionalValue(zcu) orelse return GenResult.mcv(.{ .immediate = 0 }),
owner_decl_index,
);
} else if (ty.abiSize(zcu) == 1) {
return GenResult.mcv(.{ .immediate = @intFromBool(!val.isNull(zcu)) });
}
},
.Enum => {
const enum_tag = zcu.intern_pool.indexToKey(typed_value.val.toIntern()).enum_tag;
const int_tag_ty = zcu.intern_pool.typeOf(enum_tag.int);
return genTypedValue(lf, src_loc, .{
.ty = Type.fromInterned(int_tag_ty),
.val = Value.fromInterned(enum_tag.int),
}, owner_decl_index);
const enum_tag = zcu.intern_pool.indexToKey(val.toIntern()).enum_tag;
return genTypedValue(
lf,
src_loc,
Value.fromInterned(enum_tag.int),
owner_decl_index,
);
},
.ErrorSet => {
const err_name = zcu.intern_pool.indexToKey(typed_value.val.toIntern()).err.name;
const err_name = zcu.intern_pool.indexToKey(val.toIntern()).err.name;
const error_index = zcu.global_error_set.getIndex(err_name).?;
return GenResult.mcv(.{ .immediate = error_index });
},
.ErrorUnion => {
const err_type = typed_value.ty.errorUnionSet(zcu);
const payload_type = typed_value.ty.errorUnionPayload(zcu);
const err_type = ty.errorUnionSet(zcu);
const payload_type = ty.errorUnionPayload(zcu);
if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
// We use the error type directly as the type.
const err_int_ty = try zcu.errorIntType();
switch (zcu.intern_pool.indexToKey(typed_value.val.toIntern()).error_union.val) {
.err_name => |err_name| return genTypedValue(lf, src_loc, .{
.ty = err_type,
.val = Value.fromInterned((try zcu.intern(.{ .err = .{
switch (zcu.intern_pool.indexToKey(val.toIntern()).error_union.val) {
.err_name => |err_name| return genTypedValue(
lf,
src_loc,
Value.fromInterned(try zcu.intern(.{ .err = .{
.ty = err_type.toIntern(),
.name = err_name,
} }))),
}, owner_decl_index),
.payload => return genTypedValue(lf, src_loc, .{
.ty = err_int_ty,
.val = try zcu.intValue(err_int_ty, 0),
}, owner_decl_index),
} })),
owner_decl_index,
),
.payload => return genTypedValue(
lf,
src_loc,
try zcu.intValue(err_int_ty, 0),
owner_decl_index,
),
}
}
},
@@ -1180,7 +1126,7 @@ pub fn genTypedValue(
else => {},
}
 
return genUnnamedConst(lf, src_loc, typed_value, owner_decl_index);
return genUnnamedConst(lf, src_loc, val, owner_decl_index);
}
 
pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 {
 
src/codegen/c.zig added: 2056, removed: 2746, total 0
@@ -9,7 +9,6 @@ const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
const Value = @import("../Value.zig");
const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
const C = link.File.C;
const Decl = Module.Decl;
const trace = @import("../tracy.zig").trace;
@@ -657,7 +656,7 @@ pub const DeclGen = struct {
assert(decl.has_tv);
 
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
if (ty.isPtrAtRuntime(mod) and !decl.ty.isFnOrHasRuntimeBits(mod)) {
if (ty.isPtrAtRuntime(mod) and !decl.typeOf(mod).isFnOrHasRuntimeBits(mod)) {
return dg.writeCValue(writer, .{ .undef = ty });
}
 
@@ -673,7 +672,7 @@ pub const DeclGen = struct {
// them). The analysis until now should ensure that the C function
// pointers are compatible. If they are not, then there is a bug
// somewhere and we should let the C compiler tell us about it.
const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.childType(mod).eql(decl.ty, mod);
const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.childType(mod).eql(decl.typeOf(mod), mod);
if (need_typecast) {
try writer.writeAll("((");
try dg.renderType(writer, ty);
@@ -1588,9 +1587,10 @@ pub const DeclGen = struct {
const ip = &mod.intern_pool;
 
const fn_decl = mod.declPtr(fn_decl_index);
const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind);
const fn_ty = fn_decl.typeOf(mod);
const fn_cty_idx = try dg.typeToIndex(fn_ty, kind);
 
const fn_info = mod.typeToFunc(fn_decl.ty).?;
const fn_info = mod.typeToFunc(fn_ty).?;
if (fn_info.cc == .Naked) {
switch (kind) {
.forward => try w.writeAll("zig_naked_decl "),
@@ -1876,9 +1876,9 @@ pub const DeclGen = struct {
try renderTypeSuffix(dg.pass, store.*, mod, w, cty_idx, .suffix, .{});
}
 
fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool {
fn declIsGlobal(dg: *DeclGen, val: Value) bool {
const mod = dg.module;
return switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
return switch (mod.intern_pool.indexToKey(val.ip_index)) {
.variable => |variable| mod.decl_exports.contains(variable.decl),
.extern_func => true,
.func => |func| mod.decl_exports.contains(func.owner_decl),
@@ -1971,7 +1971,7 @@ pub const DeclGen = struct {
) !void {
const decl = dg.module.declPtr(decl_index);
const fwd = dg.fwdDeclWriter();
const is_global = variable.is_extern or dg.declIsGlobal(.{ .ty = decl.ty, .val = decl.val });
const is_global = variable.is_extern or dg.declIsGlobal(decl.val);
try fwd.writeAll(if (is_global) "zig_extern " else "static ");
const maybe_exports = dg.module.decl_exports.get(decl_index);
const export_weak_linkage = if (maybe_exports) |exports|
@@ -1982,7 +1982,7 @@ pub const DeclGen = struct {
if (variable.is_threadlocal) try fwd.writeAll("zig_threadlocal ");
try dg.renderTypeAndName(
fwd,
decl.ty,
decl.typeOf(dg.module),
.{ .decl = decl_index },
CQualifiers.init(.{ .@"const" = variable.is_const }),
decl.alignment,
@@ -2009,7 +2009,6 @@ pub const DeclGen = struct {
fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex, export_index: u32) !void {
const mod = dg.module;
const decl = mod.declPtr(decl_index);
try mod.markDeclAlive(decl);
 
if (mod.decl_exports.get(decl_index)) |exports| {
try writer.print("{ }", .{
@@ -2656,13 +2655,12 @@ fn genExports(o: *Object) !void {
.anon, .flush => return,
};
const decl = mod.declPtr(decl_index);
const tv: TypedValue = .{ .ty = decl.ty, .val = Value.fromInterned((try decl.internValue(mod))) };
const fwd = o.dg.fwdDeclWriter();
 
const exports = mod.decl_exports.get(decl_index) orelse return;
if (exports.items.len < 2) return;
 
const is_variable_const = switch (ip.indexToKey(tv.val.toIntern())) {
const is_variable_const = switch (ip.indexToKey(decl.val.toIntern())) {
.func => return for (exports.items[1..], 1..) |@"export", i| {
try fwd.writeAll("zig_extern ");
if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn ");
@@ -2687,7 +2685,7 @@ fn genExports(o: *Object) !void {
const export_name = ip.stringToSlice(@"export".opts.name);
try o.dg.renderTypeAndName(
fwd,
decl.ty,
decl.typeOf(mod),
.{ .identifier = export_name },
CQualifiers.init(.{ .@"const" = is_variable_const }),
decl.alignment,
@@ -2769,7 +2767,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
},
.never_tail, .never_inline => |fn_decl_index| {
const fn_decl = mod.declPtr(fn_decl_index);
const fn_cty = try o.dg.typeToCType(fn_decl.ty, .complete);
const fn_cty = try o.dg.typeToCType(fn_decl.typeOf(mod), .complete);
const fn_info = fn_cty.cast(CType.Payload.Function).?.data;
 
const fwd_decl_writer = o.dg.fwdDeclWriter();
@@ -2805,15 +2803,11 @@ pub fn genFunc(f: *Function) !void {
const gpa = o.dg.gpa;
const decl_index = o.dg.pass.decl;
const decl = mod.declPtr(decl_index);
const tv: TypedValue = .{
.ty = decl.ty,
.val = decl.val,
};
 
o.code_header = std.ArrayList(u8).init(gpa);
defer o.code_header.deinit();
 
const is_global = o.dg.declIsGlobal(tv);
const is_global = o.dg.declIsGlobal(decl.val);
const fwd_decl_writer = o.dg.fwdDeclWriter();
try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
 
@@ -2893,22 +2887,23 @@ pub fn genDecl(o: *Object) !void {
const mod = o.dg.module;
const decl_index = o.dg.pass.decl;
const decl = mod.declPtr(decl_index);
const tv: TypedValue = .{ .ty = decl.ty, .val = Value.fromInterned((try decl.internValue(mod))) };
const decl_val = decl.val;
const decl_ty = decl_val.typeOf(mod);
 
if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return;
if (tv.val.getExternFunc(mod)) |_| {
if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return;
if (decl_val.getExternFunc(mod)) |_| {
const fwd_decl_writer = o.dg.fwdDeclWriter();
try fwd_decl_writer.writeAll("zig_extern ");
try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 });
try fwd_decl_writer.writeAll(";\n");
try genExports(o);
} else if (tv.val.getVariable(mod)) |variable| {
} else if (decl_val.getVariable(mod)) |variable| {
try o.dg.renderFwdDecl(decl_index, variable, .final);
try genExports(o);
 
if (variable.is_extern) return;
 
const is_global = variable.is_extern or o.dg.declIsGlobal(tv);
const is_global = variable.is_extern or o.dg.declIsGlobal(decl_val);
const w = o.writer();
if (!is_global) try w.writeAll("static ");
if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage ");
@@ -2916,22 +2911,22 @@ pub fn genDecl(o: *Object) !void {
if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
try w.print("zig_linksection(\"{s}\", ", .{s});
const decl_c_value = .{ .decl = decl_index };
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.alignment, .complete);
try o.dg.renderTypeAndName(w, decl_ty, decl_c_value, .{}, decl.alignment, .complete);
if (decl.@"linksection" != .none) try w.writeAll(", read, write)");
try w.writeAll(" = ");
try o.dg.renderValue(w, tv.ty, Value.fromInterned(variable.init), .StaticInitializer);
try o.dg.renderValue(w, decl_ty, Value.fromInterned(variable.init), .StaticInitializer);
try w.writeByte(';');
try o.indent_writer.insertNewline();
} else {
const is_global = o.dg.module.decl_exports.contains(decl_index);
const decl_c_value = .{ .decl = decl_index };
try genDeclValue(o, tv, is_global, decl_c_value, decl.alignment, decl.@"linksection");
try genDeclValue(o, decl_val, is_global, decl_c_value, decl.alignment, decl.@"linksection");
}
}
 
pub fn genDeclValue(
o: *Object,
tv: TypedValue,
val: Value,
is_global: bool,
decl_c_value: CValue,
alignment: Alignment,
@@ -2940,8 +2935,10 @@ pub fn genDeclValue(
const mod = o.dg.module;
const fwd_decl_writer = o.dg.fwdDeclWriter();
 
const ty = val.typeOf(mod);
 
try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, Const, alignment, .complete);
try o.dg.renderTypeAndName(fwd_decl_writer, ty, decl_c_value, Const, alignment, .complete);
switch (o.dg.pass) {
.decl => |decl_index| {
if (mod.decl_exports.get(decl_index)) |exports| {
@@ -2964,10 +2961,10 @@ pub fn genDeclValue(
 
if (mod.intern_pool.stringToSliceUnwrap(link_section)) |s|
try w.print("zig_linksection(\"{s}\", ", .{s});
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, alignment, .complete);
try o.dg.renderTypeAndName(w, ty, decl_c_value, Const, alignment, .complete);
if (link_section != .none) try w.writeAll(", read)");
try w.writeAll(" = ");
try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer);
try o.dg.renderValue(w, ty, val, .StaticInitializer);
try w.writeAll(";\n");
}
 
@@ -2978,14 +2975,10 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void {
const mod = dg.module;
const decl_index = dg.pass.decl;
const decl = mod.declPtr(decl_index);
const tv: TypedValue = .{
.ty = decl.ty,
.val = decl.val,
};
const writer = dg.fwdDeclWriter();
 
switch (tv.ty.zigTypeTag(mod)) {
.Fn => if (dg.declIsGlobal(tv)) {
switch (decl.val.typeOf(mod).zigTypeTag(mod)) {
.Fn => if (dg.declIsGlobal(decl.val)) {
try writer.writeAll("zig_extern ");
try dg.renderFunctionSignature(writer, dg.pass.decl, .complete, .{ .export_index = 0 });
try dg.fwd_decl.appendSlice(";\n");
@@ -5304,25 +5297,25 @@ fn airIsNull(
const err_int_ty = try mod.errorIntType();
 
const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
TypedValue{ .ty = Type.bool, .val = Value.true }
Value.true
else if (optional_ty.isPtrLikeOptional(mod))
// operand is a regular pointer, test `operand !=/== NULL`
TypedValue{ .ty = optional_ty, .val = try mod.getCoerced(Value.null, optional_ty) }
try mod.getCoerced(Value.null, optional_ty)
else if (payload_ty.zigTypeTag(mod) == .ErrorSet)
TypedValue{ .ty = err_int_ty, .val = try mod.intValue(err_int_ty, 0) }
try mod.intValue(err_int_ty, 0)
else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: {
try writer.writeAll(".ptr");
const slice_ptr_ty = payload_ty.slicePtrFieldType(mod);
const opt_slice_ptr_ty = try mod.optionalType(slice_ptr_ty.toIntern());
break :rhs TypedValue{ .ty = opt_slice_ptr_ty, .val = try mod.nullValue(opt_slice_ptr_ty) };
break :rhs try mod.nullValue(opt_slice_ptr_ty);
} else rhs: {
try writer.writeAll(".is_null");
break :rhs TypedValue{ .ty = Type.bool, .val = Value.true };
break :rhs Value.true;
};
try writer.writeByte(' ');
try writer.writeAll(operator);
try writer.writeByte(' ');
try f.object.dg.renderValue(writer, rhs.ty, rhs.val, .Other);
try f.object.dg.renderValue(writer, rhs.typeOf(mod), rhs, .Other);
try writer.writeAll(";\n");
return local;
}
@@ -7392,7 +7385,7 @@ fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst);
const decl_index = f.object.dg.pass.decl;
const decl = mod.declPtr(decl_index);
const fn_cty = try f.typeToCType(decl.ty, .complete);
const fn_cty = try f.typeToCType(decl.typeOf(mod), .complete);
const param_len = fn_cty.castTag(.varargs_function).?.data.param_types.len;
 
const writer = f.object.writer();
 
src/codegen/llvm.zig added: 2056, removed: 2746, total 0
@@ -18,7 +18,6 @@ const Module = @import("../Module.zig");
const Zcu = Module;
const InternPool = @import("../InternPool.zig");
const Package = @import("../Package.zig");
const TypedValue = @import("../TypedValue.zig");
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
const Value = @import("../Value.zig");
@@ -1384,7 +1383,7 @@ pub const Object = struct {
const decl = zcu.declPtr(decl_index);
const namespace = zcu.namespacePtr(decl.src_namespace);
const owner_mod = namespace.file_scope.mod;
const fn_info = zcu.typeToFunc(decl.ty).?;
const fn_info = zcu.typeToFunc(decl.typeOf(zcu)).?;
const target = zcu.getTarget();
const ip = &zcu.intern_pool;
 
@@ -1659,7 +1658,7 @@ pub const Object = struct {
const line_number = decl.src_line + 1;
const is_internal_linkage = decl.val.getExternFunc(zcu) == null and
!zcu.decl_exports.contains(decl_index);
const debug_decl_type = try o.lowerDebugType(decl.ty);
const debug_decl_type = try o.lowerDebugType(decl.typeOf(zcu));
 
const subprogram = try o.builder.debugSubprogram(
file,
@@ -1762,7 +1761,7 @@ pub const Object = struct {
const decl_name = decl_name: {
const decl_name = mod.intern_pool.stringToSlice(decl.name);
 
if (mod.getTarget().isWasm() and try decl.isFunction(mod)) {
if (mod.getTarget().isWasm() and decl.val.typeOf(mod).zigTypeTag(mod) == .Fn) {
if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| {
if (!std.mem.eql(u8, lib_name, "c")) {
break :decl_name try self.builder.strtabStringFmt("{s}|{s}", .{ decl_name, lib_name });
@@ -2881,7 +2880,7 @@ pub const Object = struct {
const decl = zcu.declPtr(decl_index);
const namespace = zcu.namespacePtr(decl.src_namespace);
const owner_mod = namespace.file_scope.mod;
const zig_fn_type = decl.ty;
const zig_fn_type = decl.typeOf(zcu);
const gop = try o.decl_map.getOrPut(gpa, decl_index);
if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.function;
 
@@ -3112,7 +3111,7 @@ pub const Object = struct {
try o.builder.strtabString(mod.intern_pool.stringToSlice(
if (is_extern) decl.name else try decl.fullyQualifiedName(mod),
)),
try o.lowerType(decl.ty),
try o.lowerType(decl.typeOf(mod)),
toLlvmGlobalAddressSpace(decl.@"addrspace", mod.getTarget()),
);
gop.value_ptr.* = variable_index.ptrConst(&o.builder).global;
@@ -3722,15 +3721,11 @@ pub const Object = struct {
=> unreachable, // non-runtime values
.extern_func => |extern_func| {
const fn_decl_index = extern_func.decl;
const fn_decl = mod.declPtr(fn_decl_index);
try mod.markDeclAlive(fn_decl);
const function_index = try o.resolveLlvmFunction(fn_decl_index);
return function_index.ptrConst(&o.builder).global.toConst();
},
.func => |func| {
const fn_decl_index = func.owner_decl;
const fn_decl = mod.declPtr(fn_decl_index);
try mod.markDeclAlive(fn_decl);
const function_index = try o.resolveLlvmFunction(fn_decl_index);
return function_index.ptrConst(&o.builder).global.toConst();
},
@@ -4262,8 +4257,7 @@ pub const Object = struct {
fn lowerParentPtrDecl(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant {
const mod = o.module;
const decl = mod.declPtr(decl_index);
try mod.markDeclAlive(decl);
const ptr_ty = try mod.singleMutPtrType(decl.ty);
const ptr_ty = try mod.singleMutPtrType(decl.typeOf(mod));
return o.lowerDeclRefValue(ptr_ty, decl_index);
}
 
@@ -4450,11 +4444,10 @@ pub const Object = struct {
}
}
 
const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn;
if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or
(is_fn_body and mod.typeToFunc(decl.ty).?.is_generic)) return o.lowerPtrToVoid(ty);
 
try mod.markDeclAlive(decl);
const decl_ty = decl.typeOf(mod);
const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or
(is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ty);
 
const llvm_global = if (is_fn_body)
(try o.resolveLlvmFunction(decl_index)).ptrConst(&o.builder).global
@@ -4740,7 +4733,7 @@ pub const DeclGen = struct {
debug_file, // File
debug_file, // Scope
line_number,
try o.lowerDebugType(decl.ty),
try o.lowerDebugType(decl.typeOf(zcu)),
variable_index,
.{ .local = is_internal_linkage },
);
@@ -4829,19 +4822,17 @@ pub const FuncGen = struct {
 
const o = self.dg.object;
const mod = o.module;
const llvm_val = try self.resolveValue(.{
.ty = self.typeOf(inst),
.val = (try self.air.value(inst, mod)).?,
});
const llvm_val = try self.resolveValue((try self.air.value(inst, mod)).?);
gop.value_ptr.* = llvm_val.toValue();
return llvm_val.toValue();
}
 
fn resolveValue(self: *FuncGen, tv: TypedValue) Error!Builder.Constant {
fn resolveValue(self: *FuncGen, val: Value) Error!Builder.Constant {
const o = self.dg.object;
const mod = o.module;
const llvm_val = try o.lowerValue(tv.val.toIntern());
if (!isByRef(tv.ty, mod)) return llvm_val;
const ty = val.typeOf(mod);
const llvm_val = try o.lowerValue(val.toIntern());
if (!isByRef(ty, mod)) return llvm_val;
 
// We have an LLVM value but we need to create a global constant and
// set the value as its initializer, and then return a pointer to the global.
@@ -4855,7 +4846,7 @@ pub const FuncGen = struct {
variable_index.setLinkage(.private, &o.builder);
variable_index.setMutability(.constant, &o.builder);
variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
variable_index.setAlignment(tv.ty.abiAlignment(mod).toLlvm(), &o.builder);
variable_index.setAlignment(ty.abiAlignment(mod).toLlvm(), &o.builder);
return o.builder.convConst(
.unneeded,
variable_index.toConst(&o.builder),
@@ -4867,11 +4858,10 @@ pub const FuncGen = struct {
const o = self.dg.object;
const mod = o.module;
if (o.null_opt_usize == .no_init) {
const ty = try mod.intern(.{ .opt_type = .usize_type });
o.null_opt_usize = try self.resolveValue(.{
.ty = Type.fromInterned(ty),
.val = Value.fromInterned((try mod.intern(.{ .opt = .{ .ty = ty, .val = .none } }))),
});
o.null_opt_usize = try self.resolveValue(Value.fromInterned(try mod.intern(.{ .opt = .{
.ty = try mod.intern(.{ .opt_type = .usize_type }),
.val = .none,
} })));
}
return o.null_opt_usize;
}
@@ -5530,8 +5520,8 @@ pub const FuncGen = struct {
const mod = o.module;
const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?;
const msg_decl = mod.declPtr(msg_decl_index);
const msg_len = msg_decl.ty.childType(mod).arrayLen(mod);
const msg_ptr = try o.lowerValue(try msg_decl.internValue(mod));
const msg_len = msg_decl.typeOf(mod).childType(mod).arrayLen(mod);
const msg_ptr = try o.lowerValue(msg_decl.val.toIntern());
const null_opt_addr_global = try fg.resolveNullOptUsize();
const target = mod.getTarget();
const llvm_usize = try o.lowerType(Type.usize);
@@ -5544,7 +5534,7 @@ pub const FuncGen = struct {
// )
const panic_func = mod.funcInfo(mod.panic_func_index);
const panic_decl = mod.declPtr(panic_func.owner_decl);
const fn_info = mod.typeToFunc(panic_decl.ty).?;
const fn_info = mod.typeToFunc(panic_decl.typeOf(mod)).?;
const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl);
_ = try fg.wip.call(
.normal,
@@ -5612,7 +5602,7 @@ pub const FuncGen = struct {
_ = try self.wip.retVoid();
return .none;
}
const fn_info = mod.typeToFunc(self.dg.decl.ty).?;
const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
@@ -5674,7 +5664,7 @@ pub const FuncGen = struct {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ptr_ty = self.typeOf(un_op);
const ret_ty = ptr_ty.childType(mod);
const fn_info = mod.typeToFunc(self.dg.decl.ty).?;
const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
@@ -10067,10 +10057,7 @@ pub const FuncGen = struct {
const elem_ptr = try self.wip.gep(.inbounds, llvm_result_ty, alloca_inst, &.{
usize_zero, try o.builder.intValue(llvm_usize, array_info.len),
}, "");
const llvm_elem = try self.resolveValue(.{
.ty = array_info.elem_type,
.val = sent_val,
});
const llvm_elem = try self.resolveValue(sent_val);
try self.store(elem_ptr, elem_ptr_ty, llvm_elem.toValue(), .none);
}
 
 
src/codegen/spirv.zig added: 2056, removed: 2746, total 0
@@ -255,7 +255,6 @@ pub const Object = struct {
pub fn resolveDecl(self: *Object, mod: *Module, decl_index: InternPool.DeclIndex) !SpvModule.Decl.Index {
const decl = mod.declPtr(decl_index);
assert(decl.has_tv); // TODO: Do we need to handle a situation where this is false?
try mod.markDeclAlive(decl);
 
const entry = try self.decl_link.getOrPut(self.gpa, decl_index);
if (!entry.found_existing) {
@@ -861,7 +860,7 @@ const DeclGen = struct {
 
const val = arg_val;
 
log.debug("constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(ty, mod) });
log.debug("constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod) });
if (val.isUndefDeep(mod)) {
return self.spv.constUndef(result_ty_ref);
}
@@ -1221,7 +1220,7 @@ const DeclGen = struct {
else => {},
}
 
if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
if (!decl.typeOf(mod).isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
// Pointer to nothing - return undefined.
return self.spv.constUndef(ty_ref);
}
@@ -1237,7 +1236,7 @@ const DeclGen = struct {
const final_storage_class = self.spvStorageClass(decl.@"addrspace");
try self.addFunctionDep(spv_decl_index, final_storage_class);
 
const decl_ptr_ty_ref = try self.ptrType(decl.ty, final_storage_class);
const decl_ptr_ty_ref = try self.ptrType(decl.typeOf(mod), final_storage_class);
 
const ptr_id = switch (final_storage_class) {
.Generic => try self.castToGeneric(self.typeId(decl_ptr_ty_ref), decl_id),
@@ -2044,11 +2043,11 @@ const DeclGen = struct {
 
switch (self.spv.declPtr(spv_decl_index).kind) {
.func => {
assert(decl.ty.zigTypeTag(mod) == .Fn);
const fn_info = mod.typeToFunc(decl.ty).?;
assert(decl.typeOf(mod).zigTypeTag(mod) == .Fn);
const fn_info = mod.typeToFunc(decl.typeOf(mod)).?;
const return_ty_ref = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
 
const prototype_ty_ref = try self.resolveType(decl.ty, .direct);
const prototype_ty_ref = try self.resolveType(decl.typeOf(mod), .direct);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
.id_result_type = self.typeId(return_ty_ref),
.id_result = result_id,
@@ -2121,7 +2120,7 @@ const DeclGen = struct {
const final_storage_class = self.spvStorageClass(decl.@"addrspace");
assert(final_storage_class != .Generic); // These should be instance globals
 
const ptr_ty_ref = try self.ptrType(decl.ty, final_storage_class);
const ptr_ty_ref = try self.ptrType(decl.typeOf(mod), final_storage_class);
 
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = self.typeId(ptr_ty_ref),
@@ -2144,7 +2143,7 @@ const DeclGen = struct {
 
try self.spv.declareDeclDeps(spv_decl_index, &.{});
 
const ptr_ty_ref = try self.ptrType(decl.ty, .Function);
const ptr_ty_ref = try self.ptrType(decl.typeOf(mod), .Function);
 
if (maybe_init_val) |init_val| {
// TODO: Combine with resolveAnonDecl?
@@ -2168,7 +2167,7 @@ const DeclGen = struct {
});
self.current_block_label = root_block_id;
 
const val_id = try self.constant(decl.ty, init_val, .indirect);
const val_id = try self.constant(decl.typeOf(mod), init_val, .indirect);
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = result_id,
.object = val_id,
@@ -4785,7 +4784,7 @@ const DeclGen = struct {
const mod = self.module;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const decl = mod.declPtr(self.decl_index);
const fn_info = mod.typeToFunc(decl.ty).?;
const fn_info = mod.typeToFunc(decl.typeOf(mod)).?;
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
@@ -4810,7 +4809,7 @@ const DeclGen = struct {
 
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const decl = mod.declPtr(self.decl_index);
const fn_info = mod.typeToFunc(decl.ty).?;
const fn_info = mod.typeToFunc(decl.typeOf(mod)).?;
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
 
src/link.zig added: 2056, removed: 2746, total 0
@@ -17,7 +17,7 @@ const Liveness = @import("Liveness.zig");
const Module = @import("Module.zig");
const InternPool = @import("InternPool.zig");
const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
const Value = @import("Value.zig");
const LlvmObject = @import("codegen/llvm.zig").Object;
 
/// When adding a new field, remember to update `hashAddSystemLibs`.
@@ -376,14 +376,14 @@ pub const File = struct {
/// Called from within the CodeGen to lower a local variable instantion as an unnamed
/// constant. Returns the symbol index of the lowered constant in the read-only section
/// of the final binary.
pub fn lowerUnnamedConst(base: *File, tv: TypedValue, decl_index: InternPool.DeclIndex) UpdateDeclError!u32 {
pub fn lowerUnnamedConst(base: *File, val: Value, decl_index: InternPool.DeclIndex) UpdateDeclError!u32 {
if (build_options.only_c) @compileError("unreachable");
switch (base.tag) {
.spirv => unreachable,
.c => unreachable,
.nvptx => unreachable,
inline else => |t| {
return @fieldParentPtr(t.Type(), "base", base).lowerUnnamedConst(tv, decl_index);
return @fieldParentPtr(t.Type(), "base", base).lowerUnnamedConst(val, decl_index);
},
}
}
 
src/link/C.zig added: 2056, removed: 2746, total 0
@@ -209,7 +209,7 @@ pub fn updateFunc(
.module = module,
.error_msg = null,
.pass = .{ .decl = decl_index },
.is_naked_fn = decl.ty.fnCallingConvention(module) == .Naked,
.is_naked_fn = decl.typeOf(module).fnCallingConvention(module) == .Naked,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctypes = ctypes.*,
.anon_decl_deps = self.anon_decls,
@@ -283,13 +283,9 @@ fn updateAnonDecl(self: *C, module: *Module, i: usize) !void {
code.* = object.code.moveToUnmanaged();
}
 
const tv: @import("../TypedValue.zig") = .{
.ty = Type.fromInterned(module.intern_pool.typeOf(anon_decl)),
.val = Value.fromInterned(anon_decl),
};
const c_value: codegen.CValue = .{ .constant = anon_decl };
const alignment: Alignment = self.aligned_anon_decls.get(anon_decl) orelse .none;
codegen.genDeclValue(&object, tv, false, c_value, alignment, .none) catch |err| switch (err) {
codegen.genDeclValue(&object, Value.fromInterned(anon_decl), false, c_value, alignment, .none) catch |err| switch (err) {
error.AnalysisFail => {
@panic("TODO: C backend AnalysisFail on anonymous decl");
//try module.failed_decls.put(gpa, decl_index, object.dg.error_msg.?);
 
src/link/Coff.zig added: 2056, removed: 2746, total 0
@@ -1167,7 +1167,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air:
return self.updateExports(mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index));
}
 
pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: InternPool.DeclIndex) !u32 {
pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclIndex) !u32 {
const gpa = self.base.comp.gpa;
const mod = self.base.comp.module.?;
const decl = mod.declPtr(decl_index);
@@ -1180,7 +1180,8 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: InternPool.Dec
const index = unnamed_consts.items.len;
const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
defer gpa.free(sym_name);
const atom_index = switch (try self.lowerConst(sym_name, tv, tv.ty.abiAlignment(mod), self.rdata_section_index.?, decl.srcLoc(mod))) {
const ty = val.typeOf(mod);
const atom_index = switch (try self.lowerConst(sym_name, val, ty.abiAlignment(mod), self.rdata_section_index.?, decl.srcLoc(mod))) {
.ok => |atom_index| atom_index,
.fail => |em| {
decl.analysis = .codegen_failure;
@@ -1198,7 +1199,7 @@ const LowerConstResult = union(enum) {
fail: *Module.ErrorMsg,
};
 
fn lowerConst(self: *Coff, name: []const u8, tv: TypedValue, required_alignment: InternPool.Alignment, sect_id: u16, src_loc: Module.SrcLoc) !LowerConstResult {
fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: InternPool.Alignment, sect_id: u16, src_loc: Module.SrcLoc) !LowerConstResult {
const gpa = self.base.comp.gpa;
 
var code_buffer = std.ArrayList(u8).init(gpa);
@@ -1209,7 +1210,7 @@ fn lowerConst(self: *Coff, name: []const u8, tv: TypedValue, required_alignment:
try self.setSymbolName(sym, name);
sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_id + 1));
 
const res = try codegen.generateSymbol(&self.base, src_loc, tv, &code_buffer, .none, .{
const res = try codegen.generateSymbol(&self.base, src_loc, val, &code_buffer, .none, .{
.parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
@@ -1271,10 +1272,7 @@ pub fn updateDecl(
defer code_buffer.deinit();
 
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), decl_val, &code_buffer, .none, .{
.parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
@@ -1399,8 +1397,8 @@ pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: InternPool.DeclIndex) !At
 
fn getDeclOutputSection(self: *Coff, decl_index: InternPool.DeclIndex) u16 {
const decl = self.base.comp.module.?.declPtr(decl_index);
const ty = decl.ty;
const mod = self.base.comp.module.?;
const ty = decl.typeOf(mod);
const zig_ty = ty.zigTypeTag(mod);
const val = decl.val;
const index: u16 = blk: {
@@ -1535,7 +1533,7 @@ pub fn updateExports(
.x86 => std.builtin.CallingConvention.Stdcall,
else => std.builtin.CallingConvention.C,
};
const decl_cc = exported_decl.ty.fnCallingConvention(mod);
const decl_cc = exported_decl.typeOf(mod).fnCallingConvention(mod);
if (decl_cc == .C and ip.stringEqlSlice(exp.opts.name, "main") and
comp.config.link_libc)
{
@@ -1887,14 +1885,13 @@ pub fn lowerAnonDecl(
}
 
const val = Value.fromInterned(decl_val);
const tv = TypedValue{ .ty = ty, .val = val };
var name_buf: [32]u8 = undefined;
const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
@intFromEnum(decl_val),
}) catch unreachable;
const res = self.lowerConst(
name,
tv,
val,
decl_alignment,
self.rdata_section_index.?,
src_loc,
@@ -2754,7 +2751,6 @@ const TableSection = @import("table_section.zig").TableSection;
const StringTable = @import("StringTable.zig");
const Type = @import("../type.zig").Type;
const Value = @import("../Value.zig");
const TypedValue = @import("../TypedValue.zig");
 
pub const base_tag: link.File.Tag = .coff;
 
 
src/link/Dwarf.zig added: 2056, removed: 2746, total 0
@@ -1109,7 +1109,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
 
assert(decl.has_tv);
 
switch (decl.ty.zigTypeTag(mod)) {
switch (decl.typeOf(mod).zigTypeTag(mod)) {
.Fn => {
_ = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
 
@@ -1162,7 +1162,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
try dbg_info_buffer.ensureUnusedCapacity(1 + ptr_width_bytes + 4 + 4 +
(decl_name_slice.len + 1) + (decl_linkage_name_slice.len + 1));
 
const fn_ret_type = decl.ty.fnReturnType(mod);
const fn_ret_type = decl.typeOf(mod).fnReturnType(mod);
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(
@as(AbbrevCode, if (fn_ret_has_bits) .subprogram else .subprogram_retvoid),
@@ -1215,7 +1215,7 @@ pub fn commitDeclState(
var dbg_info_buffer = &decl_state.dbg_info;
 
assert(decl.has_tv);
switch (decl.ty.zigTypeTag(zcu)) {
switch (decl.typeOf(zcu).zigTypeTag(zcu)) {
.Fn => {
try decl_state.setInlineFunc(decl.val.toIntern());
 
 
src/link/Elf.zig added: 2056, removed: 2746, total 0
@@ -3039,8 +3039,8 @@ pub fn updateDecl(
return self.zigObjectPtr().?.updateDecl(self, mod, decl_index);
}
 
pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: InternPool.DeclIndex) !u32 {
return self.zigObjectPtr().?.lowerUnnamedConst(self, typed_value, decl_index);
pub fn lowerUnnamedConst(self: *Elf, val: Value, decl_index: InternPool.DeclIndex) !u32 {
return self.zigObjectPtr().?.lowerUnnamedConst(self, val, decl_index);
}
 
pub fn updateExports(
@@ -6260,7 +6260,7 @@ const SharedObject = @import("Elf/SharedObject.zig");
const Symbol = @import("Elf/Symbol.zig");
const StringTable = @import("StringTable.zig");
const Thunk = thunks.Thunk;
const TypedValue = @import("../TypedValue.zig");
const Value = @import("../Value.zig");
const VerneedSection = synthetic_sections.VerneedSection;
const ZigGotSection = synthetic_sections.ZigGotSection;
const ZigObject = @import("Elf/ZigObject.zig");
 
src/link/Elf/ZigObject.zig added: 2056, removed: 2746, total 0
@@ -702,7 +702,6 @@ pub fn lowerAnonDecl(
}
 
const val = Value.fromInterned(decl_val);
const tv = TypedValue{ .ty = ty, .val = val };
var name_buf: [32]u8 = undefined;
const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
@intFromEnum(decl_val),
@@ -710,7 +709,7 @@ pub fn lowerAnonDecl(
const res = self.lowerConst(
elf_file,
name,
tv,
val,
decl_alignment,
elf_file.zig_data_rel_ro_section_index.?,
src_loc,
@@ -846,7 +845,7 @@ fn getDeclShdrIndex(
_ = self;
const mod = elf_file.base.comp.module.?;
const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded;
const shdr_index = switch (decl.ty.zigTypeTag(mod)) {
const shdr_index = switch (decl.typeOf(mod).zigTypeTag(mod)) {
.Fn => elf_file.zig_text_section_index.?,
else => blk: {
if (decl.getOwnedVariable(mod)) |variable| {
@@ -1157,19 +1156,13 @@ pub fn updateDecl(
// TODO implement .debug_info for global variables
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
const res = if (decl_state) |*ds|
try codegen.generateSymbol(&elf_file.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{
try codegen.generateSymbol(&elf_file.base, decl.srcLoc(mod), decl_val, &code_buffer, .{
.dwarf = ds,
}, .{
.parent_atom_index = sym_index,
})
else
try codegen.generateSymbol(&elf_file.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
try codegen.generateSymbol(&elf_file.base, decl.srcLoc(mod), decl_val, &code_buffer, .none, .{
.parent_atom_index = sym_index,
});
 
@@ -1289,7 +1282,7 @@ fn updateLazySymbol(
pub fn lowerUnnamedConst(
self: *ZigObject,
elf_file: *Elf,
typed_value: TypedValue,
val: Value,
decl_index: InternPool.DeclIndex,
) !u32 {
const gpa = elf_file.base.comp.gpa;
@@ -1304,11 +1297,12 @@ pub fn lowerUnnamedConst(
const index = unnamed_consts.items.len;
const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
defer gpa.free(name);
const ty = val.typeOf(mod);
const sym_index = switch (try self.lowerConst(
elf_file,
name,
typed_value,
typed_value.ty.abiAlignment(mod),
val,
ty.abiAlignment(mod),
elf_file.zig_data_rel_ro_section_index.?,
decl.srcLoc(mod),
)) {
@@ -1334,7 +1328,7 @@ fn lowerConst(
self: *ZigObject,
elf_file: *Elf,
name: []const u8,
tv: TypedValue,
val: Value,
required_alignment: InternPool.Alignment,
output_section_index: u32,
src_loc: Module.SrcLoc,
@@ -1346,7 +1340,7 @@ fn lowerConst(
 
const sym_index = try self.addAtom(elf_file);
 
const res = try codegen.generateSymbol(&elf_file.base, src_loc, tv, &code_buffer, .{
const res = try codegen.generateSymbol(&elf_file.base, src_loc, val, &code_buffer, .{
.none = {},
}, .{
.parent_atom_index = sym_index,
@@ -1657,5 +1651,4 @@ const Symbol = @import("Symbol.zig");
const StringTable = @import("../StringTable.zig");
const Type = @import("../../type.zig").Type;
const Value = @import("../../Value.zig");
const TypedValue = @import("../../TypedValue.zig");
const ZigObject = @This();
 
src/link/MachO.zig added: 2056, removed: 2746, total 0
@@ -3127,8 +3127,8 @@ pub fn updateFunc(self: *MachO, mod: *Module, func_index: InternPool.Index, air:
return self.getZigObject().?.updateFunc(self, mod, func_index, air, liveness);
}
 
pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: InternPool.DeclIndex) !u32 {
return self.getZigObject().?.lowerUnnamedConst(self, typed_value, decl_index);
pub fn lowerUnnamedConst(self: *MachO, val: Value, decl_index: InternPool.DeclIndex) !u32 {
return self.getZigObject().?.lowerUnnamedConst(self, val, decl_index);
}
 
pub fn updateDecl(self: *MachO, mod: *Module, decl_index: InternPool.DeclIndex) !void {
@@ -4689,7 +4689,7 @@ const StubsHelperSection = synthetic.StubsHelperSection;
const Symbol = @import("MachO/Symbol.zig");
const Thunk = thunks.Thunk;
const TlvPtrSection = synthetic.TlvPtrSection;
const TypedValue = @import("../TypedValue.zig");
const Value = @import("../Value.zig");
const UnwindInfo = @import("MachO/UnwindInfo.zig");
const WeakBindSection = synthetic.WeakBindSection;
const ZigGotSection = synthetic.ZigGotSection;
 
src/link/MachO/ZigObject.zig added: 2056, removed: 2746, total 0
@@ -567,8 +567,6 @@ pub fn lowerAnonDecl(
return .ok;
}
 
const val = Value.fromInterned(decl_val);
const tv = TypedValue{ .ty = ty, .val = val };
var name_buf: [32]u8 = undefined;
const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
@intFromEnum(decl_val),
@@ -576,7 +574,7 @@ pub fn lowerAnonDecl(
const res = self.lowerConst(
macho_file,
name,
tv,
Value.fromInterned(decl_val),
decl_alignment,
macho_file.zig_const_sect_index.?,
src_loc,
@@ -738,11 +736,7 @@ pub fn updateDecl(
 
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none;
const res =
try codegen.generateSymbol(&macho_file.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, dio, .{
const res = try codegen.generateSymbol(&macho_file.base, decl.srcLoc(mod), decl_val, &code_buffer, dio, .{
.parent_atom_index = sym_index,
});
 
@@ -1021,7 +1015,7 @@ fn getDeclOutputSection(
_ = self;
const mod = macho_file.base.comp.module.?;
const any_non_single_threaded = macho_file.base.comp.config.any_non_single_threaded;
const sect_id: u8 = switch (decl.ty.zigTypeTag(mod)) {
const sect_id: u8 = switch (decl.typeOf(mod).zigTypeTag(mod)) {
.Fn => macho_file.zig_text_sect_index.?,
else => blk: {
if (decl.getOwnedVariable(mod)) |variable| {
@@ -1068,7 +1062,7 @@ fn getDeclOutputSection(
pub fn lowerUnnamedConst(
self: *ZigObject,
macho_file: *MachO,
typed_value: TypedValue,
val: Value,
decl_index: InternPool.DeclIndex,
) !u32 {
const gpa = macho_file.base.comp.gpa;
@@ -1086,8 +1080,8 @@ pub fn lowerUnnamedConst(
const sym_index = switch (try self.lowerConst(
macho_file,
name,
typed_value,
typed_value.ty.abiAlignment(mod),
val,
val.typeOf(mod).abiAlignment(mod),
macho_file.zig_const_sect_index.?,
decl.srcLoc(mod),
)) {
@@ -1113,7 +1107,7 @@ fn lowerConst(
self: *ZigObject,
macho_file: *MachO,
name: []const u8,
tv: TypedValue,
val: Value,
required_alignment: Atom.Alignment,
output_section_index: u8,
src_loc: Module.SrcLoc,
@@ -1125,7 +1119,7 @@ fn lowerConst(
 
const sym_index = try self.addAtom(macho_file);
 
const res = try codegen.generateSymbol(&macho_file.base, src_loc, tv, &code_buffer, .{
const res = try codegen.generateSymbol(&macho_file.base, src_loc, val, &code_buffer, .{
.none = {},
}, .{
.parent_atom_index = sym_index,
@@ -1580,5 +1574,4 @@ const Symbol = @import("Symbol.zig");
const StringTable = @import("../StringTable.zig");
const Type = @import("../../type.zig").Type;
const Value = @import("../../Value.zig");
const TypedValue = @import("../../TypedValue.zig");
const ZigObject = @This();
 
src/link/Plan9.zig added: 2056, removed: 2746, total 0
@@ -15,7 +15,6 @@ const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
const Type = @import("../type.zig").Type;
const Value = @import("../Value.zig");
const TypedValue = @import("../TypedValue.zig");
 
const std = @import("std");
const builtin = @import("builtin");
@@ -177,7 +176,7 @@ pub const Atom = struct {
return if (self.code_ptr) |p| p[0..self.other.code_len] else blk: {
const decl_index = self.other.decl_index;
const decl = mod.declPtr(decl_index);
if (decl.ty.zigTypeTag(mod) == .Fn) {
if (decl.typeOf(mod).zigTypeTag(mod) == .Fn) {
const table = plan9.fn_decl_table.get(decl.getFileScope(mod)).?.functions;
const output = table.get(decl_index).?;
break :blk output.code;
@@ -463,7 +462,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air:
return self.updateFinish(decl_index);
}
 
pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: InternPool.DeclIndex) !u32 {
pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIndex) !u32 {
const gpa = self.base.comp.gpa;
_ = try self.seeDecl(decl_index);
var code_buffer = std.ArrayList(u8).init(gpa);
@@ -500,7 +499,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: InternPool.De
};
self.syms.items[info.sym_index.?] = sym;
 
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .{
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), val, &code_buffer, .{
.none = {},
}, .{
.parent_atom_index = new_atom_idx,
@@ -539,10 +538,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex)
defer code_buffer.deinit();
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
// TODO we need the symbol index for symbol in the table of locals for the containing atom
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{ .none = {} }, .{
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), decl_val, &code_buffer, .{ .none = {} }, .{
.parent_atom_index = @as(Atom.Index, @intCast(atom_idx)),
});
const code = switch (res) {
@@ -566,7 +562,7 @@ fn updateFinish(self: *Plan9, decl_index: InternPool.DeclIndex) !void {
const gpa = self.base.comp.gpa;
const mod = self.base.comp.module.?;
const decl = mod.declPtr(decl_index);
const is_fn = (decl.ty.zigTypeTag(mod) == .Fn);
const is_fn = (decl.typeOf(mod).zigTypeTag(mod) == .Fn);
const sym_t: aout.Sym.Type = if (is_fn) .t else .d;
 
const atom = self.getAtomPtr(self.decls.get(decl_index).?.index);
@@ -1545,11 +1541,8 @@ pub fn lowerAnonDecl(
// ...
const gpa = self.base.comp.gpa;
const gop = try self.anon_decls.getOrPut(gpa, decl_val);
const mod = self.base.comp.module.?;
if (!gop.found_existing) {
const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
const val = Value.fromInterned(decl_val);
const tv = TypedValue{ .ty = ty, .val = val };
const name = try std.fmt.allocPrint(gpa, "__anon_{d}", .{@intFromEnum(decl_val)});
 
const index = try self.createAtom();
@@ -1557,7 +1550,7 @@ pub fn lowerAnonDecl(
gop.value_ptr.* = index;
// we need to free name latex
var code_buffer = std.ArrayList(u8).init(gpa);
const res = try codegen.generateSymbol(&self.base, src_loc, tv, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = index });
const res = try codegen.generateSymbol(&self.base, src_loc, val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = index });
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| return .{ .fail = em },
 
src/link/SpirV.zig added: 2056, removed: 2746, total 0
@@ -163,7 +163,7 @@ pub fn updateExports(
if (decl.val.isFuncBody(mod)) {
const target = mod.getTarget();
const spv_decl_index = try self.object.resolveDecl(mod, decl_index);
const execution_model = switch (decl.ty.fnCallingConvention(mod)) {
const execution_model = switch (decl.typeOf(mod).fnCallingConvention(mod)) {
.Vertex => spec.ExecutionModel.Vertex,
.Fragment => spec.ExecutionModel.Fragment,
.Kernel => spec.ExecutionModel.Kernel,
 
src/link/Wasm.zig added: 2056, removed: 2746, total 0
@@ -32,7 +32,7 @@ const Module = @import("../Module.zig");
const Object = @import("Wasm/Object.zig");
const Symbol = @import("Wasm/Symbol.zig");
const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
const Value = @import("../Value.zig");
const ZigObject = @import("Wasm/ZigObject.zig");
 
pub const Atom = @import("Wasm/Atom.zig");
@@ -1504,8 +1504,8 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type {
/// Lowers a constant typed value to a local symbol and atom.
/// Returns the symbol index of the local
/// The given `decl` is the parent decl whom owns the constant.
pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: InternPool.DeclIndex) !u32 {
return wasm.zigObjectPtr().?.lowerUnnamedConst(wasm, tv, decl_index);
pub fn lowerUnnamedConst(wasm: *Wasm, val: Value, decl_index: InternPool.DeclIndex) !u32 {
return wasm.zigObjectPtr().?.lowerUnnamedConst(wasm, val, decl_index);
}
 
/// Returns the symbol index from a symbol of which its flag is set global,
 
src/link/Wasm/ZigObject.zig added: 2056, removed: 2746, total 0
@@ -270,7 +270,7 @@ pub fn updateDecl(
const res = try codegen.generateSymbol(
&wasm_file.base,
decl.srcLoc(mod),
.{ .ty = decl.ty, .val = val },
val,
&code_writer,
.none,
.{ .parent_atom_index = @intFromEnum(atom.sym_index) },
@@ -346,7 +346,7 @@ fn finishUpdateDecl(
try atom.code.appendSlice(gpa, code);
atom.size = @intCast(code.len);
 
switch (decl.ty.zigTypeTag(mod)) {
switch (decl.typeOf(mod).zigTypeTag(mod)) {
.Fn => {
sym.index = try zig_object.appendFunction(gpa, .{ .type_index = zig_object.atom_types.get(atom_index).? });
sym.tag = .function;
@@ -444,15 +444,12 @@ pub fn lowerAnonDecl(
const gpa = wasm_file.base.comp.gpa;
const gop = try zig_object.anon_decls.getOrPut(gpa, decl_val);
if (!gop.found_existing) {
const mod = wasm_file.base.comp.module.?;
const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
const tv: TypedValue = .{ .ty = ty, .val = Value.fromInterned(decl_val) };
var name_buf: [32]u8 = undefined;
const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
@intFromEnum(decl_val),
}) catch unreachable;
 
switch (try zig_object.lowerConst(wasm_file, name, tv, src_loc)) {
switch (try zig_object.lowerConst(wasm_file, name, Value.fromInterned(decl_val), src_loc)) {
.ok => |atom_index| zig_object.anon_decls.values()[gop.index] = atom_index,
.fail => |em| return .{ .fail = em },
}
@@ -472,10 +469,10 @@ pub fn lowerAnonDecl(
/// Lowers a constant typed value to a local symbol and atom.
/// Returns the symbol index of the local
/// The given `decl` is the parent decl whom owns the constant.
pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, tv: TypedValue, decl_index: InternPool.DeclIndex) !u32 {
pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, decl_index: InternPool.DeclIndex) !u32 {
const gpa = wasm_file.base.comp.gpa;
const mod = wasm_file.base.comp.module.?;
std.debug.assert(tv.ty.zigTypeTag(mod) != .Fn); // cannot create local symbols for functions
std.debug.assert(val.typeOf(mod).zigTypeTag(mod) != .Fn); // cannot create local symbols for functions
const decl = mod.declPtr(decl_index);
 
const parent_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index);
@@ -487,7 +484,7 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, tv: TypedValu
});
defer gpa.free(name);
 
switch (try zig_object.lowerConst(wasm_file, name, tv, decl.srcLoc(mod))) {
switch (try zig_object.lowerConst(wasm_file, name, val, decl.srcLoc(mod))) {
.ok => |atom_index| {
try wasm_file.getAtomPtr(parent_atom_index).locals.append(gpa, atom_index);
return @intFromEnum(wasm_file.getAtom(atom_index).sym_index);
@@ -505,10 +502,12 @@ const LowerConstResult = union(enum) {
fail: *Module.ErrorMsg,
};
 
fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, tv: TypedValue, src_loc: Module.SrcLoc) !LowerConstResult {
fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: Value, src_loc: Module.SrcLoc) !LowerConstResult {
const gpa = wasm_file.base.comp.gpa;
const mod = wasm_file.base.comp.module.?;
 
const ty = val.typeOf(mod);
 
// Create and initialize a new local symbol and atom
const sym_index = try zig_object.allocateSymbol(gpa);
const atom_index = try wasm_file.createAtom(sym_index, zig_object.index);
@@ -517,7 +516,7 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, tv: Ty
 
const code = code: {
const atom = wasm_file.getAtomPtr(atom_index);
atom.alignment = tv.ty.abiAlignment(mod);
atom.alignment = ty.abiAlignment(mod);
const segment_name = try std.mem.concat(gpa, u8, &.{ ".rodata.", name });
errdefer gpa.free(segment_name);
zig_object.symbol(sym_index).* = .{
@@ -527,7 +526,7 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, tv: Ty
.index = try zig_object.createDataSegment(
gpa,
segment_name,
tv.ty.abiAlignment(mod),
ty.abiAlignment(mod),
),
.virtual_address = undefined,
};
@@ -535,7 +534,7 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, tv: Ty
const result = try codegen.generateSymbol(
&wasm_file.base,
src_loc,
tv,
val,
&value_bytes,
.none,
.{
@@ -764,7 +763,7 @@ pub fn getDeclVAddr(
const atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = @enumFromInt(reloc_info.parent_atom_index) }).?;
const atom = wasm_file.getAtomPtr(atom_index);
const is_wasm32 = target.cpu.arch == .wasm32;
if (decl.ty.zigTypeTag(mod) == .Fn) {
if (decl.typeOf(mod).zigTypeTag(mod) == .Fn) {
std.debug.assert(reloc_info.addend == 0); // addend not allowed for function relocations
try atom.relocs.append(gpa, .{
.index = target_symbol_index,
@@ -964,7 +963,7 @@ pub fn freeDecl(zig_object: *ZigObject, wasm_file: *Wasm, decl_index: InternPool
if (sym.isGlobal()) {
std.debug.assert(zig_object.global_syms.remove(atom.sym_index));
}
switch (decl.ty.zigTypeTag(mod)) {
switch (decl.typeOf(mod).zigTypeTag(mod)) {
.Fn => {
zig_object.functions_free_list.append(gpa, sym.index) catch {};
std.debug.assert(zig_object.atom_types.remove(atom_index));
@@ -1242,7 +1241,6 @@ const Module = @import("../../Module.zig");
const StringTable = @import("../StringTable.zig");
const Symbol = @import("Symbol.zig");
const Type = @import("../../type.zig").Type;
const TypedValue = @import("../../TypedValue.zig");
const Value = @import("../../Value.zig");
const Wasm = @import("../Wasm.zig");
const ZigObject = @This();
 
filename was Deleted added: 2056, removed: 2746, total 0
@@ -0,0 +1,508 @@
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Zcu = @import("Module.zig");
const InternPool = @import("InternPool.zig");
const Type = @import("type.zig").Type;
const Value = @import("Value.zig");
 
/// We use a tagged union here because while it wastes a few bytes for some tags, having a fixed
/// size for the type makes the common `aggregate` representation more efficient.
/// For aggregates, the sentinel value, if any, *is* stored.
pub const MutableValue = union(enum) {
/// An interned value.
interned: InternPool.Index,
/// An error union value which is a payload (not an error).
eu_payload: SubValue,
/// An optional value which is a payload (not `null`).
opt_payload: SubValue,
/// An aggregate consisting of a single repeated value.
repeated: SubValue,
/// An aggregate of `u8` consisting of "plain" bytes (no lazy or undefined elements).
bytes: Bytes,
/// An aggregate with arbitrary sub-values.
aggregate: Aggregate,
/// A slice, containing a pointer and length.
slice: Slice,
/// An instance of a union.
un: Union,
 
pub const SubValue = struct {
ty: InternPool.Index,
child: *MutableValue,
};
pub const Bytes = struct {
ty: InternPool.Index,
data: []u8,
};
pub const Aggregate = struct {
ty: InternPool.Index,
elems: []MutableValue,
};
pub const Slice = struct {
ty: InternPool.Index,
/// Must have the appropriate many-ptr type.
/// TODO: we want this to be an `InternPool.Index`, but `Sema.beginComptimePtrMutation` doesn't support it.
ptr: *MutableValue,
/// Must be of type `usize`.
/// TODO: we want this to be an `InternPool.Index`, but `Sema.beginComptimePtrMutation` doesn't support it.
len: *MutableValue,
};
pub const Union = struct {
ty: InternPool.Index,
tag: InternPool.Index,
payload: *MutableValue,
};
 
pub fn intern(mv: MutableValue, zcu: *Zcu, arena: Allocator) Allocator.Error!InternPool.Index {
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
return switch (mv) {
.interned => |ip_index| ip_index,
.eu_payload => |sv| try ip.get(gpa, .{ .error_union = .{
.ty = sv.ty,
.val = .{ .payload = try sv.child.intern(zcu, arena) },
} }),
.opt_payload => |sv| try ip.get(gpa, .{ .opt = .{
.ty = sv.ty,
.val = try sv.child.intern(zcu, arena),
} }),
.repeated => |sv| try ip.get(gpa, .{ .aggregate = .{
.ty = sv.ty,
.storage = .{ .repeated_elem = try sv.child.intern(zcu, arena) },
} }),
.bytes => |b| try ip.get(gpa, .{ .aggregate = .{
.ty = b.ty,
.storage = .{ .bytes = b.data },
} }),
.aggregate => |a| {
const elems = try arena.alloc(InternPool.Index, a.elems.len);
for (a.elems, elems) |mut_elem, *interned_elem| {
interned_elem.* = try mut_elem.intern(zcu, arena);
}
return ip.get(gpa, .{ .aggregate = .{
.ty = a.ty,
.storage = .{ .elems = elems },
} });
},
.slice => |s| try ip.get(gpa, .{ .slice = .{
.ty = s.ty,
.ptr = try s.ptr.intern(zcu, arena),
.len = try s.len.intern(zcu, arena),
} }),
.un => |u| try ip.get(gpa, .{ .un = .{
.ty = u.ty,
.tag = u.tag,
.val = try u.payload.intern(zcu, arena),
} }),
};
}
 
/// Un-interns the top level of this `MutableValue`, if applicable.
/// * Non-error error unions use `eu_payload`
/// * Non-null optionals use `eu_payload
/// * Slices use `slice`
/// * Unions use `un`
/// * Aggregates use `repeated` or `bytes` or `aggregate`
/// If `!allow_bytes`, the `bytes` representation will not be used.
/// If `!allow_repeated`, the `repeated` representation will not be used.
pub fn unintern(
mv: *MutableValue,
zcu: *Zcu,
arena: Allocator,
allow_bytes: bool,
allow_repeated: bool,
) Allocator.Error!void {
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
switch (mv.*) {
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
.opt => |opt| if (opt.val != .none) {
const mut_payload = try arena.create(MutableValue);
mut_payload.* = .{ .interned = opt.val };
mv.* = .{ .opt_payload = .{
.ty = opt.ty,
.child = mut_payload,
} };
},
.error_union => |eu| switch (eu.val) {
.err_name => {},
.payload => |payload| {
const mut_payload = try arena.create(MutableValue);
mut_payload.* = .{ .interned = payload };
mv.* = .{ .eu_payload = .{
.ty = eu.ty,
.child = mut_payload,
} };
},
},
.slice => |slice| {
const ptr = try arena.create(MutableValue);
const len = try arena.create(MutableValue);
ptr.* = .{ .interned = slice.ptr };
len.* = .{ .interned = slice.len };
mv.* = .{ .slice = .{
.ty = slice.ty,
.ptr = ptr,
.len = len,
} };
},
.un => |un| {
const payload = try arena.create(MutableValue);
payload.* = .{ .interned = un.val };
mv.* = .{ .un = .{
.ty = un.ty,
.tag = un.tag,
.payload = payload,
} };
},
.aggregate => |agg| switch (agg.storage) {
.bytes => |bytes| {
assert(bytes.len == ip.aggregateTypeLenIncludingSentinel(agg.ty));
assert(ip.childType(agg.ty) == .u8_type);
if (allow_bytes) {
const arena_bytes = try arena.alloc(u8, bytes.len);
@memcpy(arena_bytes, bytes);
mv.* = .{ .bytes = .{
.ty = agg.ty,
.data = arena_bytes,
} };
} else {
const mut_elems = try arena.alloc(MutableValue, bytes.len);
for (bytes, mut_elems) |b, *mut_elem| {
mut_elem.* = .{ .interned = try ip.get(gpa, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = b },
} }) };
}
mv.* = .{ .aggregate = .{
.ty = agg.ty,
.elems = mut_elems,
} };
}
},
.elems => |elems| {
assert(elems.len == ip.aggregateTypeLenIncludingSentinel(agg.ty));
const mut_elems = try arena.alloc(MutableValue, elems.len);
for (elems, mut_elems) |interned_elem, *mut_elem| {
mut_elem.* = .{ .interned = interned_elem };
}
mv.* = .{ .aggregate = .{
.ty = agg.ty,
.elems = mut_elems,
} };
},
.repeated_elem => |val| {
if (allow_repeated) {
const repeated_val = try arena.create(MutableValue);
repeated_val.* = .{ .interned = val };
mv.* = .{ .repeated = .{
.ty = agg.ty,
.child = repeated_val,
} };
} else {
const len = ip.aggregateTypeLenIncludingSentinel(agg.ty);
const mut_elems = try arena.alloc(MutableValue, @intCast(len));
@memset(mut_elems, .{ .interned = val });
mv.* = .{ .aggregate = .{
.ty = agg.ty,
.elems = mut_elems,
} };
}
},
},
.undef => |ty_ip| switch (Type.fromInterned(ty_ip).zigTypeTag(zcu)) {
.Struct, .Array, .Vector => |type_tag| {
const ty = Type.fromInterned(ty_ip);
const opt_sent = ty.sentinel(zcu);
if (type_tag == .Struct or opt_sent != null or !allow_repeated) {
const len_no_sent = ip.aggregateTypeLen(ty_ip);
const elems = try arena.alloc(MutableValue, @intCast(len_no_sent + @intFromBool(opt_sent != null)));
switch (type_tag) {
.Array, .Vector => {
const elem_ty = ip.childType(ty_ip);
const undef_elem = try ip.get(gpa, .{ .undef = elem_ty });
@memset(elems[0..@intCast(len_no_sent)], .{ .interned = undef_elem });
},
.Struct => for (elems[0..@intCast(len_no_sent)], 0..) |*mut_elem, i| {
const field_ty = ty.structFieldType(i, zcu).toIntern();
mut_elem.* = .{ .interned = try ip.get(gpa, .{ .undef = field_ty }) };
},
else => unreachable,
}
if (opt_sent) |s| elems[@intCast(len_no_sent)] = .{ .interned = s.toIntern() };
mv.* = .{ .aggregate = .{
.ty = ty_ip,
.elems = elems,
} };
} else {
const repeated_val = try arena.create(MutableValue);
repeated_val.* = .{
.interned = try ip.get(gpa, .{ .undef = ip.childType(ty_ip) }),
};
mv.* = .{ .repeated = .{
.ty = ty_ip,
.child = repeated_val,
} };
}
},
.Union => {
const payload = try arena.create(MutableValue);
// HACKHACK: this logic is silly, but Sema detects it and reverts the change where needed.
// See comment at the top of `Sema.beginComptimePtrMutationInner`.
payload.* = .{ .interned = .undef };
mv.* = .{ .un = .{
.ty = ty_ip,
.tag = .none,
.payload = payload,
} };
},
.Pointer => {
const ptr_ty = ip.indexToKey(ty_ip).ptr_type;
if (ptr_ty.flags.size != .Slice) return;
const ptr = try arena.create(MutableValue);
const len = try arena.create(MutableValue);
ptr.* = .{ .interned = try ip.get(gpa, .{ .undef = ip.slicePtrType(ty_ip) }) };
len.* = .{ .interned = try ip.get(gpa, .{ .undef = .usize_type }) };
mv.* = .{ .slice = .{
.ty = ty_ip,
.ptr = ptr,
.len = len,
} };
},
else => {},
},
else => {},
},
.bytes => |bytes| if (!allow_bytes) {
const elems = try arena.alloc(MutableValue, bytes.data.len);
for (bytes.data, elems) |byte, *interned_byte| {
interned_byte.* = .{ .interned = try ip.get(gpa, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = byte },
} }) };
}
mv.* = .{ .aggregate = .{
.ty = bytes.ty,
.elems = elems,
} };
},
else => {},
}
}
 
/// Get a pointer to the `MutableValue` associated with a field/element.
/// The returned pointer can be safety mutated through to modify the field value.
/// The returned pointer is valid until the representation of `mv` changes.
/// This function does *not* support accessing the ptr/len field of slices.
pub fn elem(
mv: *MutableValue,
zcu: *Zcu,
arena: Allocator,
field_idx: usize,
) Allocator.Error!*MutableValue {
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
// Convert to the `aggregate` representation.
switch (mv) {
.eu_payload, .opt_payload, .slice, .un => unreachable,
.interned => {
try mv.unintern(zcu, arena, false, false);
},
.bytes => |bytes| {
const elems = try arena.alloc(MutableValue, bytes.data.len);
for (bytes.data, elems) |byte, interned_byte| {
interned_byte.* = try ip.get(gpa, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = byte },
} });
}
mv.* = .{ .aggregate = .{
.ty = bytes.ty,
.elems = elems,
} };
},
.repeated => |repeated| {
const len = ip.aggregateTypeLenIncludingSentinel(repeated.ty);
const elems = try arena.alloc(MutableValue, @intCast(len));
@memset(elems, repeated.child.*);
mv.* = .{ .aggregate = .{
.ty = repeated.ty,
.elems = elems,
} };
},
.aggregate => {},
}
return &mv.aggregate.elems[field_idx];
}
 
/// Modify a single field of a `MutableValue` which represents an aggregate or slice, leaving others
/// untouched. When an entire field must be modified, this should be used in preference to `elemPtr`
/// to allow for an optimal representation.
/// For slices, uses `Value.slice_ptr_index` and `Value.slice_len_index`.
pub fn setElem(
mv: *MutableValue,
zcu: *Zcu,
arena: Allocator,
field_idx: usize,
field_val: MutableValue,
) Allocator.Error!void {
const ip = &zcu.intern_pool;
const is_trivial_int = field_val.isTrivialInt(zcu);
try mv.unintern(arena, is_trivial_int, true);
switch (mv) {
.interned,
.eu_payload,
.opt_payload,
.un,
=> unreachable,
.slice => |*s| switch (field_idx) {
Value.slice_ptr_index => s.ptr = field_val,
Value.slice_len_index => s.len = field_val,
},
.bytes => |b| {
assert(is_trivial_int);
assert(field_val.typeOf() == Type.u8);
b.data[field_idx] = Value.fromInterned(field_val.interned).toUnsignedInt(zcu);
},
.repeated => |r| {
if (field_val.eqlTrivial(r.child.*)) return;
// We must switch to either the `aggregate` or the `bytes` representation.
const len_inc_sent = ip.aggregateTypeLenIncludingSentinel(r.ty);
if (ip.zigTypeTag(r.ty) != .Struct and
is_trivial_int and
Type.fromInterned(r.ty).childType(zcu) == .u8_type and
r.child.isTrivialInt(zcu))
{
// We can use the `bytes` representation.
const bytes = try arena.alloc(u8, @intCast(len_inc_sent));
const repeated_byte = Value.fromInterned(r.child.interned).getUnsignedInt(zcu);
@memset(bytes, repeated_byte);
bytes[field_idx] = Value.fromInterned(field_val.interned).getUnsignedInt(zcu);
mv.* = .{ .bytes = .{
.ty = r.ty,
.data = bytes,
} };
} else {
// We must use the `aggregate` representation.
const mut_elems = try arena.alloc(u8, @intCast(len_inc_sent));
@memset(mut_elems, r.child.*);
mut_elems[field_idx] = field_val;
mv.* = .{ .aggregate = .{
.ty = r.ty,
.elems = mut_elems,
} };
}
},
.aggregate => |a| {
a.elems[field_idx] = field_val;
const is_struct = ip.zigTypeTag(a.ty) == .Struct;
// Attempt to switch to a more efficient representation.
const is_repeated = for (a.elems) |e| {
if (!e.eqlTrivial(field_val)) break false;
} else true;
if (is_repeated) {
// Switch to `repeated` repr
const mut_repeated = try arena.create(MutableValue);
mut_repeated.* = field_val;
mv.* = .{ .repeated = .{
.ty = a.ty,
.child = mut_repeated,
} };
} else if (!is_struct and is_trivial_int and Type.fromInterned(a.ty).childType(zcu).toIntern() == .u8_type) {
// See if we can switch to `bytes` repr
for (a.elems) |e| {
switch (e) {
else => break,
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
else => break,
.int => |int| switch (int.storage) {
.u64, .i64, .big_int => {},
.lazy_align, .lazy_size => break,
},
},
}
} else {
const bytes = try arena.alloc(u8, a.elems.len);
for (a.elems, bytes) |elem_val, *b| {
b.* = Value.fromInterned(elem_val.interned).toUnsignedInt(zcu);
}
mv.* = .{ .bytes = .{
.ty = a.ty,
.data = bytes,
} };
}
}
},
}
}
 
/// Get the value of a single field of a `MutableValue` which represents an aggregate or slice.
/// For slices, uses `Value.slice_ptr_index` and `Value.slice_len_index`.
pub fn getElem(
mv: MutableValue,
zcu: *Zcu,
field_idx: usize,
) Allocator.Error!MutableValue {
return switch (mv) {
.eu_payload,
.opt_payload,
=> unreachable,
.interned => |ip_index| {
const ty = Type.fromInterned(zcu.intern_pool.typeOf(ip_index));
switch (ty.zigTypeTag(zcu)) {
.Array, .Vector => return .{ .interned = (try Value.fromInterned(ip_index).elemValue(zcu, field_idx)).toIntern() },
.Struct, .Union => return .{ .interned = (try Value.fromInterned(ip_index).fieldValue(zcu, field_idx)).toIntern() },
.Pointer => {
assert(ty.isSlice(zcu));
return switch (field_idx) {
Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(zcu).toIntern() },
Value.slice_len_index => .{ .interned = switch (zcu.intern_pool.indexToKey(ip_index)) {
.undef => try zcu.intern(.{ .undef = .usize_type }),
.slice => |s| s.len,
else => unreachable,
} },
else => unreachable,
};
},
else => unreachable,
}
},
.un => |un| {
// TODO assert the tag is correct
return un.payload.*;
},
.slice => |s| switch (field_idx) {
Value.slice_ptr_index => s.ptr.*,
Value.slice_len_index => s.len.*,
else => unreachable,
},
.bytes => |b| .{ .interned = try zcu.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = b.data[field_idx] },
} }) },
.repeated => |r| r.child.*,
.aggregate => |a| a.elems[field_idx],
};
}
 
fn isTrivialInt(mv: MutableValue, zcu: *Zcu) bool {
return switch (mv) {
else => false,
.interned => |ip_index| switch (zcu.intern_pool.indexToKey(ip_index)) {
else => false,
.int => |int| switch (int.storage) {
.u64, .i64, .big_int => true,
.lazy_align, .lazy_size => false,
},
},
};
}
 
pub fn typeOf(mv: MutableValue, zcu: *Zcu) Type {
return switch (mv) {
.interned => |ip_index| Type.fromInterned(zcu.intern_pool.typeOf(ip_index)),
inline else => |x| Type.fromInterned(x.ty),
};
}
};
 
src/print_air.zig added: 2056, removed: 2746, total 0
@@ -951,7 +951,7 @@ const Writer = struct {
const ty = Type.fromInterned(mod.intern_pool.indexToKey(ip_index).typeOf());
try s.print("<{}, {}>", .{
ty.fmt(mod),
Value.fromInterned(ip_index).fmtValue(ty, mod),
Value.fromInterned(ip_index).fmtValue(mod),
});
} else {
return w.writeInstIndex(s, operand.toIndex().?, dies);
 
filename was Deleted added: 2056, removed: 2746, total 0
@@ -0,0 +1,354 @@
//! This type exists only for legacy purposes, and will be removed in the future.
//! It is a thin wrapper around a `Value` which also, redundantly, stores its `Type`.
 
const std = @import("std");
const Type = @import("type.zig").Type;
const Value = @import("Value.zig");
const Zcu = @import("Module.zig");
const Module = Zcu;
const Sema = @import("Sema.zig");
const InternPool = @import("InternPool.zig");
const Allocator = std.mem.Allocator;
const Target = std.Target;
 
const max_aggregate_items = 100;
const max_string_len = 256;
 
const FormatContext = struct {
val: Value,
mod: *Module,
};
 
pub fn format(
ctx: FormatContext,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = options;
comptime std.debug.assert(fmt.len == 0);
return print(ctx.val, writer, 3, ctx.mod, null) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function
error.ComptimeBreak, error.ComptimeReturn => unreachable,
error.AnalysisFail, error.NeededSourceLocation => unreachable, // TODO: re-evaluate when we actually pass `opt_sema`
else => |e| return e,
};
}
 
pub fn print(
val: Value,
writer: anytype,
level: u8,
mod: *Module,
/// If this `Sema` is provided, we will recurse through pointers where possible to provide friendly output.
opt_sema: ?*Sema,
) (@TypeOf(writer).Error || Module.CompileError)!void {
const ip = &mod.intern_pool;
switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.struct_type,
.anon_struct_type,
.union_type,
.opaque_type,
.enum_type,
.func_type,
.error_set_type,
.inferred_error_set_type,
=> try Type.print(val.toType(), writer, mod),
.undef => try writer.writeAll("undefined"),
.simple_value => |simple_value| switch (simple_value) {
.void => try writer.writeAll("{}"),
.empty_struct => try writer.writeAll(".{}"),
.generic_poison => try writer.writeAll("(generic poison)"),
else => try writer.writeAll(@tagName(simple_value)),
},
.variable => try writer.writeAll("(variable)"),
.extern_func => |extern_func| try writer.print("(extern function '{}')", .{
mod.declPtr(extern_func.decl).name.fmt(ip),
}),
.func => |func| try writer.print("(function '{}')", .{
mod.declPtr(func.owner_decl).name.fmt(ip),
}),
.int => |int| switch (int.storage) {
inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
.lazy_align => |ty| if (opt_sema) |sema| {
const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar;
try writer.print("{}", .{a.toByteUnits(0)});
} else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}),
.lazy_size => |ty| if (opt_sema) |sema| {
const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar;
try writer.print("{}", .{s});
} else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(mod)}),
},
.err => |err| try writer.print("error.{}", .{
err.name.fmt(ip),
}),
.error_union => |error_union| switch (error_union.val) {
.err_name => |err_name| try writer.print("error.{}", .{
err_name.fmt(ip),
}),
.payload => |payload| try print(Value.fromInterned(payload), writer, level, mod, opt_sema),
},
.enum_literal => |enum_literal| try writer.print(".{}", .{
enum_literal.fmt(ip),
}),
.enum_tag => |enum_tag| {
const enum_type = ip.loadEnumType(val.typeOf(mod).toIntern());
if (enum_type.tagValueIndex(ip, val.toIntern())) |tag_index| {
return writer.print(".{i}", .{enum_type.names.get(ip)[tag_index].fmt(ip)});
}
if (level == 0) {
return writer.writeAll("@enumFromInt(...)");
}
try writer.writeAll("@enumFromInt(");
try print(Value.fromInterned(enum_tag.int), writer, level - 1, mod, opt_sema);
try writer.writeAll(")");
},
.empty_enum_value => try writer.writeAll("(empty enum value)"),
.float => |float| switch (float.storage) {
inline else => |x| try writer.print("{d}", .{@as(f64, @floatCast(x))}),
},
.slice => |slice| {
const print_contents = switch (ip.getBackingAddrTag(slice.ptr).?) {
.field, .elem, .eu_payload, .opt_payload => unreachable,
.anon_decl, .comptime_alloc, .comptime_field => true,
.decl, .int => false,
};
if (print_contents) {
// TODO: eventually we want to load the slice as an array with `opt_sema`, but that's
// currently not possible without e.g. triggering compile errors.
}
try printPtr(slice.ptr, writer, false, false, 0, level, mod, opt_sema);
try writer.writeAll("[0..");
if (level == 0) {
try writer.writeAll("(...)");
} else {
try print(Value.fromInterned(slice.len), writer, level - 1, mod, opt_sema);
}
try writer.writeAll("]");
},
.ptr => {
const print_contents = switch (ip.getBackingAddrTag(val.toIntern()).?) {
.field, .elem, .eu_payload, .opt_payload => unreachable,
.anon_decl, .comptime_alloc, .comptime_field => true,
.decl, .int => false,
};
if (print_contents) {
// TODO: eventually we want to load the pointer with `opt_sema`, but that's
// currently not possible without e.g. triggering compile errors.
}
try printPtr(val.toIntern(), writer, false, false, 0, level, mod, opt_sema);
},
.opt => |opt| switch (opt.val) {
.none => try writer.writeAll("null"),
else => |payload| try print(Value.fromInterned(payload), writer, level, mod, opt_sema),
},
.aggregate => |aggregate| try printAggregate(val, aggregate, writer, level, false, mod, opt_sema),
.un => |un| {
if (level == 0) {
try writer.writeAll(".{ ... }");
return;
}
if (un.tag == .none) {
const backing_ty = try val.typeOf(mod).unionBackingType(mod);
try writer.print("@bitCast(@as({}, ", .{backing_ty.fmt(mod)});
try print(Value.fromInterned(un.val), writer, level - 1, mod, opt_sema);
try writer.writeAll("))");
} else {
try writer.writeAll(".{ ");
try print(Value.fromInterned(un.tag), writer, level - 1, mod, opt_sema);
try writer.writeAll(" = ");
try print(Value.fromInterned(un.val), writer, level - 1, mod, opt_sema);
try writer.writeAll(" }");
}
},
.memoized_call => unreachable,
}
}
 
fn printAggregate(
val: Value,
aggregate: InternPool.Key.Aggregate,
writer: anytype,
level: u8,
is_ref: bool,
zcu: *Zcu,
opt_sema: ?*Sema,
) (@TypeOf(writer).Error || Module.CompileError)!void {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const ip = &zcu.intern_pool;
const ty = Type.fromInterned(aggregate.ty);
switch (ty.zigTypeTag(zcu)) {
.Struct => if (!ty.isTuple(zcu)) {
if (is_ref) try writer.writeByte('&');
if (ty.structFieldCount(zcu) == 0) {
return writer.writeAll(".{}");
}
try writer.writeAll(".{ ");
const max_len = @min(ty.structFieldCount(zcu), max_aggregate_items);
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
const field_name = ty.structFieldName(@intCast(i), zcu).unwrap().?;
try writer.print(".{i} = ", .{field_name.fmt(ip)});
try print(try val.fieldValue(zcu, i), writer, level - 1, zcu, opt_sema);
}
try writer.writeAll(" }");
return;
},
.Array => if (aggregate.storage == .bytes and aggregate.storage.bytes.len > 0) {
const skip_terminator = aggregate.storage.bytes[aggregate.storage.bytes.len - 1] == 0;
const bytes = if (skip_terminator) b: {
break :b aggregate.storage.bytes[0 .. aggregate.storage.bytes.len - 1];
} else aggregate.storage.bytes;
try writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)});
if (!is_ref) try writer.writeAll(".*");
return;
} else if (ty.arrayLen(zcu) == 0) {
if (is_ref) try writer.writeByte('&');
return writer.writeAll(".{}");
} else if (ty.arrayLen(zcu) == 1) one_byte_str: {
// The repr isn't `bytes`, but we might still be able to print this as a string
if (ty.childType(zcu).toIntern() != .u8_type) break :one_byte_str;
const elem_val = Value.fromInterned(aggregate.storage.values()[0]);
if (elem_val.isUndef(zcu)) break :one_byte_str;
const byte = elem_val.toUnsignedInt(zcu);
try writer.print("\"{}\"", .{std.zig.fmtEscapes(&.{@intCast(byte)})});
if (!is_ref) try writer.writeAll(".*");
return;
},
.Vector => if (ty.arrayLen(zcu) == 0) {
if (is_ref) try writer.writeByte('&');
return writer.writeAll(".{}");
},
else => unreachable,
}
 
const len = ty.arrayLen(zcu);
 
if (is_ref) try writer.writeByte('&');
try writer.writeAll(".{ ");
 
const max_len = @min(len, max_aggregate_items);
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
try print(try val.fieldValue(zcu, i), writer, level - 1, zcu, opt_sema);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
}
 
fn printPtr(
ptr_val: InternPool.Index,
writer: anytype,
force_type: bool,
force_addrof: bool,
leading_parens: u32,
level: u8,
zcu: *Zcu,
opt_sema: ?*Sema,
) (@TypeOf(writer).Error || Module.CompileError)!void {
const ip = &zcu.intern_pool;
const ptr = switch (ip.indexToKey(ptr_val)) {
.undef => |ptr_ty| {
if (force_addrof) try writer.writeAll("&");
try writer.writeByteNTimes('(', leading_parens);
try writer.print("@as({}, undefined)", .{Type.fromInterned(ptr_ty).fmt(zcu)});
return;
},
.ptr => |ptr| ptr,
else => unreachable,
};
if (level == 0) {
return writer.writeAll("&...");
}
switch (ptr.addr) {
.int => |int| {
if (force_addrof) try writer.writeAll("&");
try writer.writeByteNTimes('(', leading_parens);
if (force_type) {
try writer.print("@as({}, @ptrFromInt(", .{Type.fromInterned(ptr.ty).fmt(zcu)});
try print(Value.fromInterned(int), writer, level - 1, zcu, opt_sema);
try writer.writeAll("))");
} else {
try writer.writeAll("@ptrFromInt(");
try print(Value.fromInterned(int), writer, level - 1, zcu, opt_sema);
try writer.writeAll(")");
}
},
.decl => |index| {
try writer.writeAll("&");
try zcu.declPtr(index).renderFullyQualifiedName(zcu, writer);
},
.comptime_alloc => try writer.writeAll("&(comptime alloc)"),
.anon_decl => |anon| switch (ip.indexToKey(anon.val)) {
.aggregate => |aggregate| try printAggregate(
Value.fromInterned(anon.val),
aggregate,
writer,
level - 1,
true,
zcu,
opt_sema,
),
else => {
const ty = Type.fromInterned(ip.typeOf(anon.val));
try writer.print("&@as({}, ", .{ty.fmt(zcu)});
try print(Value.fromInterned(anon.val), writer, level - 1, zcu, opt_sema);
try writer.writeAll(")");
},
},
.comptime_field => |val| {
const ty = Type.fromInterned(ip.typeOf(val));
try writer.print("&@as({}, ", .{ty.fmt(zcu)});
try print(Value.fromInterned(val), writer, level - 1, zcu, opt_sema);
try writer.writeAll(")");
},
.eu_payload => |base| {
try printPtr(base, writer, true, true, leading_parens, level, zcu, opt_sema);
try writer.writeAll(".?");
},
.opt_payload => |base| {
try writer.writeAll("(");
try printPtr(base, writer, true, true, leading_parens + 1, level, zcu, opt_sema);
try writer.writeAll(" catch unreachable");
},
.elem => |elem| {
try printPtr(elem.base, writer, true, true, leading_parens, level, zcu, opt_sema);
try writer.print("[{d}]", .{elem.index});
},
.field => |field| {
try printPtr(field.base, writer, true, true, leading_parens, level, zcu, opt_sema);
const base_ty = Type.fromInterned(ip.typeOf(field.base)).childType(zcu);
switch (base_ty.zigTypeTag(zcu)) {
.Struct => if (base_ty.isTuple(zcu)) {
try writer.print("[{d}]", .{field.index});
} else {
const field_name = base_ty.structFieldName(@intCast(field.index), zcu).unwrap().?;
try writer.print(".{i}", .{field_name.fmt(ip)});
},
.Union => {
const tag_ty = base_ty.unionTagTypeHypothetical(zcu);
const field_name = tag_ty.enumFieldName(@intCast(field.index), zcu);
try writer.print(".{i}", .{field_name.fmt(ip)});
},
.Pointer => switch (field.index) {
Value.slice_ptr_index => try writer.writeAll(".ptr"),
Value.slice_len_index => try writer.writeAll(".len"),
else => unreachable,
},
else => unreachable,
}
},
}
}
 
src/type.zig added: 2056, removed: 2746, total 0
@@ -7,7 +7,6 @@ const Module = @import("Module.zig");
const Zcu = Module;
const log = std.log.scoped(.Type);
const target_util = @import("target.zig");
const TypedValue = @import("TypedValue.zig");
const Sema = @import("Sema.zig");
const InternPool = @import("InternPool.zig");
const Alignment = InternPool.Alignment;
@@ -188,8 +187,8 @@ pub const Type = struct {
 
if (info.sentinel != .none) switch (info.flags.size) {
.One, .C => unreachable,
.Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(Type.fromInterned(info.child), mod)}),
.Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(Type.fromInterned(info.child), mod)}),
.Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod)}),
.Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod)}),
} else switch (info.flags.size) {
.One => try writer.writeAll("*"),
.Many => try writer.writeAll("[*]"),
@@ -235,7 +234,7 @@ pub const Type = struct {
} else {
try writer.print("[{d}:{}]", .{
array_type.len,
Value.fromInterned(array_type.sentinel).fmtValue(Type.fromInterned(array_type.child), mod),
Value.fromInterned(array_type.sentinel).fmtValue(mod),
});
try print(Type.fromInterned(array_type.child), writer, mod);
}
@@ -353,7 +352,7 @@ pub const Type = struct {
try print(Type.fromInterned(field_ty), writer, mod);
 
if (val != .none) {
try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(Type.fromInterned(field_ty), mod)});
try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod)});
}
}
try writer.writeAll("}");
@@ -2481,7 +2480,7 @@ pub const Type = struct {
}
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
if (try field_ty.onePossibleValue(mod)) |field_opv| {
field_val.* = try field_opv.intern(field_ty, mod);
field_val.* = field_opv.toIntern();
} else return null;
}
 
 
test/behavior/basic.zig added: 2056, removed: 2746, total 0
@@ -693,31 +693,6 @@ test "string concatenation" {
try expect(b[len] == 0);
}
 
fn manyptrConcat(comptime s: [*:0]const u8) [*:0]const u8 {
return "very " ++ s;
}
 
test "comptime manyptr concatenation" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
const s = "epic";
const actual = manyptrConcat(s);
const expected = "very epic";
 
const len = mem.len(actual);
const len_with_null = len + 1;
{
var i: u32 = 0;
while (i < len_with_null) : (i += 1) {
try expect(actual[i] == expected[i]);
}
}
try expect(actual[len] == 0);
try expect(expected[len] == 0);
}
 
test "result location is optional inside error union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
 
test/cases/compile_errors/compile_log.zig added: 2056, removed: 2746, total 0
@@ -21,7 +21,7 @@ export fn baz() void {
//
// Compile Log Output:
// @as(*const [5:0]u8, "begin")
// @as(*const [1:0]u8, "a"), @as(i32, 12), @as(*const [1:0]u8, "b"), @as([]const u8, "hi")
// @as(*const [1:0]u8, "a"), @as(i32, 12), @as(*const [1:0]u8, "b"), @as([]const u8, "hi"[0..2])
// @as(*const [3:0]u8, "end")
// @as(comptime_int, 4)
// @as(*const [5:0]u8, "begin")
 
test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig added: 2056, removed: 2746, total 0
@@ -9,4 +9,4 @@ export fn entry() void {
// :2:5: error: found compile log statement
//
// Compile Log Output:
// @as(*const anyopaque, (function 'entry'))
// @as(*const anyopaque, &tmp.entry)
 
test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig added: 2056, removed: 2746, total 0
@@ -14,4 +14,4 @@ export fn entry() void {
// backend=stage2
// target=native
//
// :1:13: error: use of undefined value here causes undefined behavior
// :1:20: error: use of undefined value here causes undefined behavior
 
test/cases/compile_errors/reify_type_union_payload_is_undefined.zig added: 2056, removed: 2746, total 0
@@ -9,4 +9,4 @@ comptime {
// backend=stage2
// target=native
//
// :1:13: error: use of undefined value here causes undefined behavior
// :1:20: error: use of undefined value here causes undefined behavior
 
test/cases/compile_errors/reify_type_with_undefined.zig added: 2056, removed: 2746, total 0
@@ -28,6 +28,6 @@ comptime {
// backend=stage2
// target=native
//
// :2:9: error: use of undefined value here causes undefined behavior
// :5:9: error: use of undefined value here causes undefined behavior
// :17:9: error: use of undefined value here causes undefined behavior
// :2:16: error: use of undefined value here causes undefined behavior
// :5:16: error: use of undefined value here causes undefined behavior
// :17:16: error: use of undefined value here causes undefined behavior
 
test/cases/comptime_aggregate_print.zig added: 2056, removed: 2746, total 0
@@ -31,5 +31,5 @@ pub fn main() !void {}
// :20:5: error: found compile log statement
//
// Compile Log Output:
// @as([]i32, .{ (reinterpreted data) })
// @as([]i32, .{ (reinterpreted data) })
// @as([]i32, &(comptime alloc).buf[0..2])
// @as([]i32, &(comptime alloc).buf[0..2])