srctree

Andrew Kelley parent 77abd3a9 23062a5b 1fb23813
Merge pull request #19630 from mlugg/comptime-ptr-access-5

compiler: rework comptime pointer representation and access

inlinesplit
lib/compiler/resinator/ico.zig added: 4888, removed: 2579, total 2309
@@ -232,7 +232,7 @@ test "icon data size too small" {
try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, fbs.reader(), data.len));
}
 
pub const ImageFormat = enum {
pub const ImageFormat = enum(u2) {
dib,
png,
riff,
@@ -272,7 +272,7 @@ pub const BitmapHeader = extern struct {
}
 
/// https://en.wikipedia.org/wiki/BMP_file_format#DIB_header_(bitmap_information_header)
pub const Version = enum {
pub const Version = enum(u3) {
unknown,
@"win2.0", // Windows 2.0 or later
@"nt3.1", // Windows NT, 3.1x or later
 
lib/docs/wasm/markdown/Document.zig added: 4888, removed: 2579, total 2309
@@ -131,7 +131,7 @@ pub const Node = struct {
}
};
 
pub const TableCellAlignment = enum {
pub const TableCellAlignment = enum(u2) {
unset,
left,
center,
 
lib/std/net.zig added: 4888, removed: 2579, total 2309
@@ -271,7 +271,7 @@ pub const Ip4Address = extern struct {
sa: posix.sockaddr.in,
 
pub fn parse(buf: []const u8, port: u16) IPv4ParseError!Ip4Address {
var result = Ip4Address{
var result: Ip4Address = .{
.sa = .{
.port = mem.nativeToBig(u16, port),
.addr = undefined,
 
src/InternPool.zig added: 4888, removed: 2579, total 2309
@@ -565,7 +565,7 @@ pub const OptionalNullTerminatedString = enum(u32) {
/// * decl val (so that we can analyze the value lazily)
/// * decl ref (so that we can analyze the reference lazily)
pub const CaptureValue = packed struct(u32) {
tag: enum { @"comptime", runtime, decl_val, decl_ref },
tag: enum(u2) { @"comptime", runtime, decl_val, decl_ref },
idx: u30,
 
pub fn wrap(val: Unwrapped) CaptureValue {
@@ -1026,22 +1026,76 @@ pub const Key = union(enum) {
pub const Ptr = struct {
/// This is the pointer type, not the element type.
ty: Index,
/// The value of the address that the pointer points to.
addr: Addr,
/// The base address which this pointer is offset from.
base_addr: BaseAddr,
/// The offset of this pointer from `base_addr` in bytes.
byte_offset: u64,
 
pub const Addr = union(enum) {
const Tag = @typeInfo(Addr).Union.tag_type.?;
pub const BaseAddr = union(enum) {
const Tag = @typeInfo(BaseAddr).Union.tag_type.?;
 
/// Points to the value of a single `Decl`, which may be constant or a `variable`.
decl: DeclIndex,
 
/// Points to the value of a single comptime alloc stored in `Sema`.
comptime_alloc: ComptimeAllocIndex,
 
/// Points to a single unnamed constant value.
anon_decl: AnonDecl,
 
/// Points to a comptime field of a struct. Index is the field's value.
///
/// TODO: this exists because these fields are semantically mutable. We
/// should probably change the language so that this isn't the case.
comptime_field: Index,
int: Index,
 
/// A pointer with a fixed integer address, usually from `@ptrFromInt`.
///
/// The address is stored entirely by `byte_offset`, which will be positive
/// and in-range of a `usize`. The base address is, for all intents and purposes, 0.
int,
 
/// A pointer to the payload of an error union. Index is the error union pointer.
/// To ensure a canonical representation, the type of the base pointer must:
/// * be a one-pointer
/// * be `const`, `volatile` and `allowzero`
/// * have alignment 1
/// * have the same address space as this pointer
/// * have a host size, bit offset, and vector index of 0
/// See `Value.canonicalizeBasePtr` which enforces these properties.
eu_payload: Index,
 
/// A pointer to the payload of a non-pointer-like optional. Index is the
/// optional pointer. To ensure a canonical representation, the base
/// pointer is subject to the same restrictions as in `eu_payload`.
opt_payload: Index,
elem: BaseIndex,
 
/// A pointer to a field of a slice, or of an auto-layout struct or union. Slice fields
/// are referenced according to `Value.slice_ptr_index` and `Value.slice_len_index`.
/// Base is the aggregate pointer, which is subject to the same restrictions as
/// in `eu_payload`.
field: BaseIndex,
 
/// A pointer to an element of a comptime-only array. Base is the
/// many-pointer we are indexing into. It is subject to the same restrictions
/// as in `eu_payload`, except it must be a many-pointer rather than a one-pointer.
///
/// The element type of the base pointer must NOT be an array. Additionally, the
/// base pointer is guaranteed to not be an `arr_elem` into a pointer with the
/// same child type. Thus, since there are no two comptime-only types which are
/// IMC to one another, the only case where the base pointer may also be an
/// `arr_elem` is when this pointer is semantically invalid (e.g. it reinterprets
/// a `type` as a `comptime_int`). These restrictions are in place to ensure
/// a canonical representation.
///
/// This kind of base address differs from others in that it may refer to any
/// sequence of values; for instance, an `arr_elem` at index 2 may refer to
/// any number of elements starting from index 2.
///
/// Index must not be 0. To refer to the element at index 0, simply reinterpret
/// the aggregate pointer.
arr_elem: BaseIndex,
 
pub const MutDecl = struct {
decl: DeclIndex,
runtime_index: RuntimeIndex,
@@ -1222,10 +1276,11 @@ pub const Key = union(enum) {
.ptr => |ptr| {
// Int-to-ptr pointers are hashed separately than decl-referencing pointers.
// This is sound due to pointer provenance rules.
const addr: @typeInfo(Key.Ptr.Addr).Union.tag_type.? = ptr.addr;
const seed2 = seed + @intFromEnum(addr);
const common = asBytes(&ptr.ty);
return switch (ptr.addr) {
const addr_tag: Key.Ptr.BaseAddr.Tag = ptr.base_addr;
const seed2 = seed + @intFromEnum(addr_tag);
const big_offset: i128 = ptr.byte_offset;
const common = asBytes(&ptr.ty) ++ asBytes(&big_offset);
return switch (ptr.base_addr) {
inline .decl,
.comptime_alloc,
.anon_decl,
@@ -1235,7 +1290,7 @@ pub const Key = union(enum) {
.comptime_field,
=> |x| Hash.hash(seed2, common ++ asBytes(&x)),
 
.elem, .field => |x| Hash.hash(
.arr_elem, .field => |x| Hash.hash(
seed2,
common ++ asBytes(&x.base) ++ asBytes(&x.index),
),
@@ -1494,21 +1549,21 @@ pub const Key = union(enum) {
.ptr => |a_info| {
const b_info = b.ptr;
if (a_info.ty != b_info.ty) return false;
if (a_info.byte_offset != b_info.byte_offset) return false;
 
const AddrTag = @typeInfo(Key.Ptr.Addr).Union.tag_type.?;
if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false;
if (@as(Key.Ptr.BaseAddr.Tag, a_info.base_addr) != @as(Key.Ptr.BaseAddr.Tag, b_info.base_addr)) return false;
 
return switch (a_info.addr) {
.decl => |a_decl| a_decl == b_info.addr.decl,
.comptime_alloc => |a_alloc| a_alloc == b_info.addr.comptime_alloc,
.anon_decl => |ad| ad.val == b_info.addr.anon_decl.val and
ad.orig_ty == b_info.addr.anon_decl.orig_ty,
.int => |a_int| a_int == b_info.addr.int,
.eu_payload => |a_eu_payload| a_eu_payload == b_info.addr.eu_payload,
.opt_payload => |a_opt_payload| a_opt_payload == b_info.addr.opt_payload,
.comptime_field => |a_comptime_field| a_comptime_field == b_info.addr.comptime_field,
.elem => |a_elem| std.meta.eql(a_elem, b_info.addr.elem),
.field => |a_field| std.meta.eql(a_field, b_info.addr.field),
return switch (a_info.base_addr) {
.decl => |a_decl| a_decl == b_info.base_addr.decl,
.comptime_alloc => |a_alloc| a_alloc == b_info.base_addr.comptime_alloc,
.anon_decl => |ad| ad.val == b_info.base_addr.anon_decl.val and
ad.orig_ty == b_info.base_addr.anon_decl.orig_ty,
.int => true,
.eu_payload => |a_eu_payload| a_eu_payload == b_info.base_addr.eu_payload,
.opt_payload => |a_opt_payload| a_opt_payload == b_info.base_addr.opt_payload,
.comptime_field => |a_comptime_field| a_comptime_field == b_info.base_addr.comptime_field,
.arr_elem => |a_elem| std.meta.eql(a_elem, b_info.base_addr.arr_elem),
.field => |a_field| std.meta.eql(a_field, b_info.base_addr.field),
};
},
 
@@ -2271,6 +2326,46 @@ pub const LoadedStructType = struct {
.struct_type = s,
};
}
 
pub const ReverseRuntimeOrderIterator = struct {
ip: *InternPool,
last_index: u32,
struct_type: InternPool.LoadedStructType,
 
pub fn next(it: *@This()) ?u32 {
if (it.last_index == 0)
return null;
 
if (it.struct_type.hasReorderedFields()) {
it.last_index -= 1;
const order = it.struct_type.runtime_order.get(it.ip);
while (order[it.last_index] == .omitted) {
it.last_index -= 1;
if (it.last_index == 0)
return null;
}
return order[it.last_index].toInt();
}
 
it.last_index -= 1;
while (it.struct_type.fieldIsComptime(it.ip, it.last_index)) {
it.last_index -= 1;
if (it.last_index == 0)
return null;
}
 
return it.last_index;
}
};
 
pub fn iterateRuntimeOrderReverse(s: @This(), ip: *InternPool) ReverseRuntimeOrderIterator {
assert(s.layout != .@"packed");
return .{
.ip = ip,
.last_index = s.field_types.len,
.struct_type = s,
};
}
};
 
pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
@@ -2836,7 +2931,7 @@ pub const Index = enum(u32) {
ptr_anon_decl: struct { data: *PtrAnonDecl },
ptr_anon_decl_aligned: struct { data: *PtrAnonDeclAligned },
ptr_comptime_field: struct { data: *PtrComptimeField },
ptr_int: struct { data: *PtrBase },
ptr_int: struct { data: *PtrInt },
ptr_eu_payload: struct { data: *PtrBase },
ptr_opt_payload: struct { data: *PtrBase },
ptr_elem: struct { data: *PtrBaseIndex },
@@ -3304,7 +3399,7 @@ pub const Tag = enum(u8) {
/// data is extra index of `PtrComptimeField`, which contains the pointer type and field value.
ptr_comptime_field,
/// A pointer with an integer value.
/// data is extra index of `PtrBase`, which contains the type and address.
/// data is extra index of `PtrInt`, which contains the type and address (byte offset from 0).
/// Only pointer types are allowed to have this encoding. Optional types must use
/// `opt_payload` or `opt_null`.
ptr_int,
@@ -3497,7 +3592,7 @@ pub const Tag = enum(u8) {
.ptr_anon_decl => PtrAnonDecl,
.ptr_anon_decl_aligned => PtrAnonDeclAligned,
.ptr_comptime_field => PtrComptimeField,
.ptr_int => PtrBase,
.ptr_int => PtrInt,
.ptr_eu_payload => PtrBase,
.ptr_opt_payload => PtrBase,
.ptr_elem => PtrBaseIndex,
@@ -4153,11 +4248,37 @@ pub const PackedU64 = packed struct(u64) {
pub const PtrDecl = struct {
ty: Index,
decl: DeclIndex,
byte_offset_a: u32,
byte_offset_b: u32,
fn init(ty: Index, decl: DeclIndex, byte_offset: u64) @This() {
return .{
.ty = ty,
.decl = decl,
.byte_offset_a = @intCast(byte_offset >> 32),
.byte_offset_b = @truncate(byte_offset),
};
}
fn byteOffset(data: @This()) u64 {
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
}
};
 
pub const PtrAnonDecl = struct {
ty: Index,
val: Index,
byte_offset_a: u32,
byte_offset_b: u32,
fn init(ty: Index, val: Index, byte_offset: u64) @This() {
return .{
.ty = ty,
.val = val,
.byte_offset_a = @intCast(byte_offset >> 32),
.byte_offset_b = @truncate(byte_offset),
};
}
fn byteOffset(data: @This()) u64 {
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
}
};
 
pub const PtrAnonDeclAligned = struct {
@@ -4165,27 +4286,110 @@ pub const PtrAnonDeclAligned = struct {
val: Index,
/// Must be nonequal to `ty`. Only the alignment from this value is important.
orig_ty: Index,
byte_offset_a: u32,
byte_offset_b: u32,
fn init(ty: Index, val: Index, orig_ty: Index, byte_offset: u64) @This() {
return .{
.ty = ty,
.val = val,
.orig_ty = orig_ty,
.byte_offset_a = @intCast(byte_offset >> 32),
.byte_offset_b = @truncate(byte_offset),
};
}
fn byteOffset(data: @This()) u64 {
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
}
};
 
pub const PtrComptimeAlloc = struct {
ty: Index,
index: ComptimeAllocIndex,
byte_offset_a: u32,
byte_offset_b: u32,
fn init(ty: Index, index: ComptimeAllocIndex, byte_offset: u64) @This() {
return .{
.ty = ty,
.index = index,
.byte_offset_a = @intCast(byte_offset >> 32),
.byte_offset_b = @truncate(byte_offset),
};
}
fn byteOffset(data: @This()) u64 {
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
}
};
 
pub const PtrComptimeField = struct {
ty: Index,
field_val: Index,
byte_offset_a: u32,
byte_offset_b: u32,
fn init(ty: Index, field_val: Index, byte_offset: u64) @This() {
return .{
.ty = ty,
.field_val = field_val,
.byte_offset_a = @intCast(byte_offset >> 32),
.byte_offset_b = @truncate(byte_offset),
};
}
fn byteOffset(data: @This()) u64 {
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
}
};
 
pub const PtrBase = struct {
ty: Index,
base: Index,
byte_offset_a: u32,
byte_offset_b: u32,
fn init(ty: Index, base: Index, byte_offset: u64) @This() {
return .{
.ty = ty,
.base = base,
.byte_offset_a = @intCast(byte_offset >> 32),
.byte_offset_b = @truncate(byte_offset),
};
}
fn byteOffset(data: @This()) u64 {
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
}
};
 
pub const PtrBaseIndex = struct {
ty: Index,
base: Index,
index: Index,
byte_offset_a: u32,
byte_offset_b: u32,
fn init(ty: Index, base: Index, index: Index, byte_offset: u64) @This() {
return .{
.ty = ty,
.base = base,
.index = index,
.byte_offset_a = @intCast(byte_offset >> 32),
.byte_offset_b = @truncate(byte_offset),
};
}
fn byteOffset(data: @This()) u64 {
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
}
};
 
pub const PtrInt = struct {
ty: Index,
byte_offset_a: u32,
byte_offset_b: u32,
fn init(ty: Index, byte_offset: u64) @This() {
return .{
.ty = ty,
.byte_offset_a = @intCast(byte_offset >> 32),
.byte_offset_b = @truncate(byte_offset),
};
}
fn byteOffset(data: @This()) u64 {
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
}
};
 
pub const PtrSlice = struct {
@@ -4569,78 +4773,55 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.ptr_decl => {
const info = ip.extraData(PtrDecl, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .decl = info.decl },
} };
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .decl = info.decl }, .byte_offset = info.byteOffset() } };
},
.ptr_comptime_alloc => {
const info = ip.extraData(PtrComptimeAlloc, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .comptime_alloc = info.index },
} };
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .comptime_alloc = info.index }, .byte_offset = info.byteOffset() } };
},
.ptr_anon_decl => {
const info = ip.extraData(PtrAnonDecl, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .anon_decl = .{
.val = info.val,
.orig_ty = info.ty,
} },
} };
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .anon_decl = .{
.val = info.val,
.orig_ty = info.ty,
} }, .byte_offset = info.byteOffset() } };
},
.ptr_anon_decl_aligned => {
const info = ip.extraData(PtrAnonDeclAligned, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .anon_decl = .{
.val = info.val,
.orig_ty = info.orig_ty,
} },
} };
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .anon_decl = .{
.val = info.val,
.orig_ty = info.orig_ty,
} }, .byte_offset = info.byteOffset() } };
},
.ptr_comptime_field => {
const info = ip.extraData(PtrComptimeField, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .comptime_field = info.field_val },
} };
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .comptime_field = info.field_val }, .byte_offset = info.byteOffset() } };
},
.ptr_int => {
const info = ip.extraData(PtrBase, data);
const info = ip.extraData(PtrInt, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .int = info.base },
.base_addr = .int,
.byte_offset = info.byteOffset(),
} };
},
.ptr_eu_payload => {
const info = ip.extraData(PtrBase, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .eu_payload = info.base },
} };
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .eu_payload = info.base }, .byte_offset = info.byteOffset() } };
},
.ptr_opt_payload => {
const info = ip.extraData(PtrBase, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .opt_payload = info.base },
} };
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .opt_payload = info.base }, .byte_offset = info.byteOffset() } };
},
.ptr_elem => {
// Avoid `indexToKey` recursion by asserting the tag encoding.
const info = ip.extraData(PtrBaseIndex, data);
const index_item = ip.items.get(@intFromEnum(info.index));
return switch (index_item.tag) {
.int_usize => .{ .ptr = .{
.ty = info.ty,
.addr = .{ .elem = .{
.base = info.base,
.index = index_item.data,
} },
} },
.int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .arr_elem = .{
.base = info.base,
.index = index_item.data,
} }, .byte_offset = info.byteOffset() } },
.int_positive => @panic("TODO"), // implement along with behavior test coverage
else => unreachable,
};
@@ -4650,13 +4831,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
const info = ip.extraData(PtrBaseIndex, data);
const index_item = ip.items.get(@intFromEnum(info.index));
return switch (index_item.tag) {
.int_usize => .{ .ptr = .{
.ty = info.ty,
.addr = .{ .field = .{
.base = info.base,
.index = index_item.data,
} },
} },
.int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .field = .{
.base = info.base,
.index = index_item.data,
} }, .byte_offset = info.byteOffset() } },
.int_positive => @panic("TODO"), // implement along with behavior test coverage
else => unreachable,
};
@@ -5211,57 +5389,40 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.ptr => |ptr| {
const ptr_type = ip.indexToKey(ptr.ty).ptr_type;
assert(ptr_type.flags.size != .Slice);
ip.items.appendAssumeCapacity(switch (ptr.addr) {
ip.items.appendAssumeCapacity(switch (ptr.base_addr) {
.decl => |decl| .{
.tag = .ptr_decl,
.data = try ip.addExtra(gpa, PtrDecl{
.ty = ptr.ty,
.decl = decl,
}),
.data = try ip.addExtra(gpa, PtrDecl.init(ptr.ty, decl, ptr.byte_offset)),
},
.comptime_alloc => |alloc_index| .{
.tag = .ptr_comptime_alloc,
.data = try ip.addExtra(gpa, PtrComptimeAlloc{
.ty = ptr.ty,
.index = alloc_index,
}),
.data = try ip.addExtra(gpa, PtrComptimeAlloc.init(ptr.ty, alloc_index, ptr.byte_offset)),
},
.anon_decl => |anon_decl| if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, anon_decl.orig_ty)) item: {
if (ptr.ty != anon_decl.orig_ty) {
_ = ip.map.pop();
var new_key = key;
new_key.ptr.addr.anon_decl.orig_ty = ptr.ty;
new_key.ptr.base_addr.anon_decl.orig_ty = ptr.ty;
const new_gop = try ip.map.getOrPutAdapted(gpa, new_key, adapter);
if (new_gop.found_existing) return @enumFromInt(new_gop.index);
}
break :item .{
.tag = .ptr_anon_decl,
.data = try ip.addExtra(gpa, PtrAnonDecl{
.ty = ptr.ty,
.val = anon_decl.val,
}),
.data = try ip.addExtra(gpa, PtrAnonDecl.init(ptr.ty, anon_decl.val, ptr.byte_offset)),
};
} else .{
.tag = .ptr_anon_decl_aligned,
.data = try ip.addExtra(gpa, PtrAnonDeclAligned{
.ty = ptr.ty,
.val = anon_decl.val,
.orig_ty = anon_decl.orig_ty,
}),
.data = try ip.addExtra(gpa, PtrAnonDeclAligned.init(ptr.ty, anon_decl.val, anon_decl.orig_ty, ptr.byte_offset)),
},
.comptime_field => |field_val| item: {
assert(field_val != .none);
break :item .{
.tag = .ptr_comptime_field,
.data = try ip.addExtra(gpa, PtrComptimeField{
.ty = ptr.ty,
.field_val = field_val,
}),
.data = try ip.addExtra(gpa, PtrComptimeField.init(ptr.ty, field_val, ptr.byte_offset)),
};
},
.int, .eu_payload, .opt_payload => |base| item: {
switch (ptr.addr) {
.int => assert(ip.typeOf(base) == .usize_type),
.eu_payload, .opt_payload => |base| item: {
switch (ptr.base_addr) {
.eu_payload => assert(ip.indexToKey(
ip.indexToKey(ip.typeOf(base)).ptr_type.child,
) == .error_union_type),
@@ -5271,40 +5432,40 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
else => unreachable,
}
break :item .{
.tag = switch (ptr.addr) {
.int => .ptr_int,
.tag = switch (ptr.base_addr) {
.eu_payload => .ptr_eu_payload,
.opt_payload => .ptr_opt_payload,
else => unreachable,
},
.data = try ip.addExtra(gpa, PtrBase{
.ty = ptr.ty,
.base = base,
}),
.data = try ip.addExtra(gpa, PtrBase.init(ptr.ty, base, ptr.byte_offset)),
};
},
.elem, .field => |base_index| item: {
.int => .{
.tag = .ptr_int,
.data = try ip.addExtra(gpa, PtrInt.init(ptr.ty, ptr.byte_offset)),
},
.arr_elem, .field => |base_index| item: {
const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type;
switch (ptr.addr) {
.elem => assert(base_ptr_type.flags.size == .Many),
switch (ptr.base_addr) {
.arr_elem => assert(base_ptr_type.flags.size == .Many),
.field => {
assert(base_ptr_type.flags.size == .One);
switch (ip.indexToKey(base_ptr_type.child)) {
.anon_struct_type => |anon_struct_type| {
assert(ptr.addr == .field);
assert(ptr.base_addr == .field);
assert(base_index.index < anon_struct_type.types.len);
},
.struct_type => {
assert(ptr.addr == .field);
assert(ptr.base_addr == .field);
assert(base_index.index < ip.loadStructType(base_ptr_type.child).field_types.len);
},
.union_type => {
const union_type = ip.loadUnionType(base_ptr_type.child);
assert(ptr.addr == .field);
assert(ptr.base_addr == .field);
assert(base_index.index < union_type.field_types.len);
},
.ptr_type => |slice_type| {
assert(ptr.addr == .field);
assert(ptr.base_addr == .field);
assert(slice_type.flags.size == .Slice);
assert(base_index.index < 2);
},
@@ -5321,16 +5482,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing);
try ip.items.ensureUnusedCapacity(gpa, 1);
break :item .{
.tag = switch (ptr.addr) {
.elem => .ptr_elem,
.tag = switch (ptr.base_addr) {
.arr_elem => .ptr_elem,
.field => .ptr_field,
else => unreachable,
},
.data = try ip.addExtra(gpa, PtrBaseIndex{
.ty = ptr.ty,
.base = base_index.base,
.index = index_index,
}),
.data = try ip.addExtra(gpa, PtrBaseIndex.init(ptr.ty, base_index.base, index_index, ptr.byte_offset)),
};
},
});
@@ -7584,13 +7741,15 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
if (ip.isPointerType(new_ty)) switch (ip.indexToKey(new_ty).ptr_type.flags.size) {
.One, .Many, .C => return ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
.addr = .{ .int = .zero_usize },
.base_addr = .int,
.byte_offset = 0,
} }),
.Slice => return ip.get(gpa, .{ .slice = .{
.ty = new_ty,
.ptr = try ip.get(gpa, .{ .ptr = .{
.ty = ip.slicePtrType(new_ty),
.addr = .{ .int = .zero_usize },
.base_addr = .int,
.byte_offset = 0,
} }),
.len = try ip.get(gpa, .{ .undef = .usize_type }),
} }),
@@ -7630,10 +7789,15 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.ty = new_ty,
.int = try ip.getCoerced(gpa, val, ip.loadEnumType(new_ty).tag_ty),
} }),
.ptr_type => return ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
.addr = .{ .int = try ip.getCoerced(gpa, val, .usize_type) },
} }),
.ptr_type => switch (int.storage) {
inline .u64, .i64 => |int_val| return ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
.base_addr = .int,
.byte_offset = @intCast(int_val),
} }),
.big_int => unreachable, // must be a usize
.lazy_align, .lazy_size => {},
},
else => if (ip.isIntegerType(new_ty))
return getCoercedInts(ip, gpa, int, new_ty),
},
@@ -7684,11 +7848,15 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.ptr => |ptr| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size != .Slice)
return ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
.addr = ptr.addr,
.base_addr = ptr.base_addr,
.byte_offset = ptr.byte_offset,
} })
else if (ip.isIntegerType(new_ty))
switch (ptr.addr) {
.int => |int| return ip.getCoerced(gpa, int, new_ty),
switch (ptr.base_addr) {
.int => return ip.get(gpa, .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = @intCast(ptr.byte_offset) },
} }),
else => {},
},
.opt => |opt| switch (ip.indexToKey(new_ty)) {
@@ -7696,13 +7864,15 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.none => switch (ptr_type.flags.size) {
.One, .Many, .C => try ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
.addr = .{ .int = .zero_usize },
.base_addr = .int,
.byte_offset = 0,
} }),
.Slice => try ip.get(gpa, .{ .slice = .{
.ty = new_ty,
.ptr = try ip.get(gpa, .{ .ptr = .{
.ty = ip.slicePtrType(new_ty),
.addr = .{ .int = .zero_usize },
.base_addr = .int,
.byte_offset = 0,
} }),
.len = try ip.get(gpa, .{ .undef = .usize_type }),
} }),
@@ -8181,7 +8351,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
.ptr_anon_decl => @sizeOf(PtrAnonDecl),
.ptr_anon_decl_aligned => @sizeOf(PtrAnonDeclAligned),
.ptr_comptime_field => @sizeOf(PtrComptimeField),
.ptr_int => @sizeOf(PtrBase),
.ptr_int => @sizeOf(PtrInt),
.ptr_eu_payload => @sizeOf(PtrBase),
.ptr_opt_payload => @sizeOf(PtrBase),
.ptr_elem => @sizeOf(PtrBaseIndex),
@@ -8854,13 +9024,15 @@ pub fn getBackingDecl(ip: *const InternPool, val: Index) OptionalDeclIndex {
}
}
 
pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.Addr.Tag {
pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Tag {
var base = @intFromEnum(val);
while (true) {
switch (ip.items.items(.tag)[base]) {
.ptr_decl => return .decl,
.ptr_comptime_alloc => return .comptime_alloc,
.ptr_anon_decl, .ptr_anon_decl_aligned => return .anon_decl,
.ptr_anon_decl,
.ptr_anon_decl_aligned,
=> return .anon_decl,
.ptr_comptime_field => return .comptime_field,
.ptr_int => return .int,
inline .ptr_eu_payload,
 
src/Module.zig added: 4888, removed: 2579, total 2309
@@ -528,21 +528,6 @@ pub const Decl = struct {
return zcu.namespacePtrUnwrap(decl.getInnerNamespaceIndex(zcu));
}
 
pub fn dump(decl: *Decl) void {
const loc = std.zig.findLineColumn(decl.scope.source.bytes, decl.src);
std.debug.print("{s}:{d}:{d} name={d} status={s}", .{
decl.scope.sub_file_path,
loc.line + 1,
loc.column + 1,
@intFromEnum(decl.name),
@tagName(decl.analysis),
});
if (decl.has_tv) {
std.debug.print(" val={}", .{decl.val});
}
std.debug.print("\n", .{});
}
 
pub fn getFileScope(decl: Decl, zcu: *Zcu) *File {
return zcu.namespacePtr(decl.src_namespace).file_scope;
}
@@ -660,6 +645,22 @@ pub const Decl = struct {
},
};
}
 
pub fn declPtrType(decl: Decl, zcu: *Zcu) !Type {
assert(decl.has_tv);
const decl_ty = decl.typeOf(zcu);
return zcu.ptrType(.{
.child = decl_ty.toIntern(),
.flags = .{
.alignment = if (decl.alignment == decl_ty.abiAlignment(zcu))
.none
else
decl.alignment,
.address_space = decl.@"addrspace",
.is_const = decl.getOwnedVariable(zcu) == null,
},
});
}
};
 
/// This state is attached to every Decl when Module emit_h is non-null.
@@ -3535,6 +3536,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
}
 
log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)});
log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)});
defer blk: {
log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)});
}
 
const old_has_tv = decl.has_tv;
// The following values are ignored if `!old_has_tv`
@@ -4122,10 +4127,11 @@ fn newEmbedFile(
})).toIntern();
const ptr_val = try ip.get(gpa, .{ .ptr = .{
.ty = ptr_ty,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.val = array_val,
.orig_ty = ptr_ty,
} },
.byte_offset = 0,
} });
 
result.* = new_file;
@@ -4489,6 +4495,11 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
 
log.debug("func name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)});
defer blk: {
log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)});
}
 
mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .func = func_index }));
 
var comptime_err_ret_trace = std.ArrayList(SrcLoc).init(gpa);
@@ -5332,7 +5343,7 @@ pub fn populateTestFunctions(
const decl = mod.declPtr(decl_index);
const test_fn_ty = decl.typeOf(mod).slicePtrFieldType(mod).childType(mod);
 
const array_anon_decl: InternPool.Key.Ptr.Addr.AnonDecl = array: {
const array_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = array: {
// Add mod.test_functions to an array decl then make the test_functions
// decl reference it as a slice.
const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count());
@@ -5342,7 +5353,7 @@ pub fn populateTestFunctions(
const test_decl = mod.declPtr(test_decl_index);
const test_decl_name = try test_decl.fullyQualifiedName(mod);
const test_decl_name_len = test_decl_name.length(ip);
const test_name_anon_decl: InternPool.Key.Ptr.Addr.AnonDecl = n: {
const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: {
const test_name_ty = try mod.arrayType(.{
.len = test_decl_name_len,
.child = .u8_type,
@@ -5363,7 +5374,8 @@ pub fn populateTestFunctions(
.ty = .slice_const_u8_type,
.ptr = try mod.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_type,
.addr = .{ .anon_decl = test_name_anon_decl },
.base_addr = .{ .anon_decl = test_name_anon_decl },
.byte_offset = 0,
} }),
.len = try mod.intern(.{ .int = .{
.ty = .usize_type,
@@ -5378,7 +5390,8 @@ pub fn populateTestFunctions(
.is_const = true,
},
} }),
.addr = .{ .decl = test_decl_index },
.base_addr = .{ .decl = test_decl_index },
.byte_offset = 0,
} }),
};
test_fn_val.* = try mod.intern(.{ .aggregate = .{
@@ -5415,7 +5428,8 @@ pub fn populateTestFunctions(
.ty = new_ty.toIntern(),
.ptr = try mod.intern(.{ .ptr = .{
.ty = new_ty.slicePtrFieldType(mod).toIntern(),
.addr = .{ .anon_decl = array_anon_decl },
.base_addr = .{ .anon_decl = array_anon_decl },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(),
} });
@@ -5680,9 +5694,11 @@ pub fn errorSetFromUnsortedNames(
/// Supports only pointers, not pointer-like optionals.
pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
assert(ty.zigTypeTag(mod) == .Pointer and !ty.isSlice(mod));
assert(x != 0 or ty.isAllowzeroPtr(mod));
const i = try intern(mod, .{ .ptr = .{
.ty = ty.toIntern(),
.addr = .{ .int = (try mod.intValue_u64(Type.usize, x)).toIntern() },
.base_addr = .int,
.byte_offset = x,
} });
return Value.fromInterned(i);
}
 
src/Sema.zig added: 4888, removed: 2579, total 2309
@@ -126,16 +126,14 @@ const MaybeComptimeAlloc = struct {
runtime_index: Value.RuntimeIndex,
/// Backed by sema.arena. Tracks all comptime-known stores to this `alloc`. Due to
/// RLS, a single comptime-known allocation may have arbitrarily many stores.
/// This may also contain `set_union_tag` instructions.
/// This list also contains `set_union_tag`, `optional_payload_ptr_set`, and
/// `errunion_payload_ptr_set` instructions.
/// If the instruction is one of these three tags, `src` may be `.unneeded`.
stores: std.MultiArrayList(struct {
inst: Air.Inst.Index,
src_decl: InternPool.DeclIndex,
src: LazySrcLoc,
}) = .{},
/// Backed by sema.arena. Contains instructions such as `optional_payload_ptr_set`
/// which have side effects so will not be elided by Liveness: we must rewrite these
/// instructions to be nops instead of relying on Liveness.
non_elideable_pointers: std.ArrayListUnmanaged(Air.Inst.Index) = .{},
};
 
const ComptimeAlloc = struct {
@@ -177,7 +175,8 @@ const MutableValue = @import("mutable_value.zig").MutableValue;
const Type = @import("type.zig").Type;
const Air = @import("Air.zig");
const Zir = std.zig.Zir;
const Module = @import("Module.zig");
const Zcu = @import("Module.zig");
const Module = Zcu;
const trace = @import("tracy.zig").trace;
const Namespace = Module.Namespace;
const CompileError = Module.CompileError;
@@ -2138,7 +2137,7 @@ fn resolveValueIntable(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
if (sema.mod.intern_pool.getBackingAddrTag(val.toIntern())) |addr| switch (addr) {
.decl, .anon_decl, .comptime_alloc, .comptime_field => return null,
.int => {},
.eu_payload, .opt_payload, .elem, .field => unreachable,
.eu_payload, .opt_payload, .arr_elem, .field => unreachable,
};
return try sema.resolveLazyValue(val);
}
@@ -2268,11 +2267,11 @@ fn failWithErrorSetCodeMissing(
}
 
fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError {
const mod = sema.mod;
if (int_ty.zigTypeTag(mod) == .Vector) {
const zcu = sema.mod;
if (int_ty.zigTypeTag(zcu) == .Vector) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "overflow of vector type '{}' with value '{}'", .{
int_ty.fmt(sema.mod), val.fmtValue(sema.mod),
int_ty.fmt(zcu), val.fmtValue(zcu, sema),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "when computing vector element at index '{d}'", .{vector_index});
@@ -2281,7 +2280,7 @@ fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty:
return sema.failWithOwnedErrorMsg(block, msg);
}
return sema.fail(block, src, "overflow of integer type '{}' with value '{}'", .{
int_ty.fmt(sema.mod), val.fmtValue(sema.mod),
int_ty.fmt(zcu), val.fmtValue(zcu, sema),
});
}
 
@@ -2440,7 +2439,7 @@ fn addFieldErrNote(
try mod.errNoteNonLazy(field_src, parent, format, args);
}
 
fn errMsg(
pub fn errMsg(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
@@ -2469,7 +2468,7 @@ pub fn fail(
return sema.failWithOwnedErrorMsg(block, err_msg);
}
 
fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.ErrorMsg) error{ AnalysisFail, OutOfMemory } {
pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.ErrorMsg) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
const gpa = sema.gpa;
const mod = sema.mod;
@@ -2922,7 +2921,7 @@ fn createAnonymousDeclTypeNamed(
return sema.createAnonymousDeclTypeNamed(block, src, val, .anon, anon_prefix, null);
 
if (arg_i != 0) try writer.writeByte(',');
try writer.print("{}", .{arg_val.fmtValue(sema.mod)});
try writer.print("{}", .{arg_val.fmtValue(sema.mod, sema)});
 
arg_i += 1;
continue;
@@ -3193,7 +3192,7 @@ fn zirEnumDecl(
}).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = conflict.prev_field_idx }).lazy;
const msg = msg: {
const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(sema.mod)});
const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(sema.mod, sema)});
errdefer msg.destroy(gpa);
try sema.errNote(block, other_field_src, msg, "other occurrence here", .{});
break :msg msg;
@@ -3213,7 +3212,7 @@ fn zirEnumDecl(
const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = conflict.prev_field_idx }).lazy;
const msg = msg: {
const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(sema.mod)});
const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(sema.mod, sema)});
errdefer msg.destroy(gpa);
try sema.errNote(block, other_field_src, msg, "other occurrence here", .{});
break :msg msg;
@@ -3235,7 +3234,7 @@ fn zirEnumDecl(
.range = if (has_tag_value) .value else .name,
}).lazy;
const msg = try sema.errMsg(block, value_src, "enumeration value '{}' too large for type '{}'", .{
last_tag_val.?.fmtValue(mod), int_tag_ty.fmt(mod),
last_tag_val.?.fmtValue(mod, sema), int_tag_ty.fmt(mod),
});
return sema.failWithOwnedErrorMsg(block, msg);
}
@@ -3766,7 +3765,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
// If this was a comptime inferred alloc, then `storeToInferredAllocComptime`
// might have already done our job and created an anon decl ref.
switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| switch (ptr.base_addr) {
.anon_decl => {
// The comptime-ification was already done for us.
// Just make sure the pointer is const.
@@ -3778,22 +3777,25 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
}
 
if (!sema.isComptimeMutablePtr(ptr_val)) break :already_ct;
const alloc_index = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr.comptime_alloc;
const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
assert(ptr.byte_offset == 0);
const alloc_index = ptr.base_addr.comptime_alloc;
const ct_alloc = sema.getComptimeAlloc(alloc_index);
const interned = try ct_alloc.val.intern(mod, sema.arena);
if (Value.fromInterned(interned).canMutateComptimeVarState(mod)) {
if (interned.canMutateComptimeVarState(mod)) {
// Preserve the comptime alloc, just make the pointer const.
ct_alloc.val = .{ .interned = interned };
ct_alloc.val = .{ .interned = interned.toIntern() };
ct_alloc.is_const = true;
return sema.makePtrConst(block, alloc);
} else {
// Promote the constant to an anon decl.
const new_mut_ptr = Air.internedToRef(try mod.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.addr = .{ .anon_decl = .{
.val = interned,
.base_addr = .{ .anon_decl = .{
.val = interned.toIntern(),
.orig_ty = alloc_ty.toIntern(),
} },
.byte_offset = 0,
} }));
return sema.makePtrConst(block, new_mut_ptr);
}
@@ -3818,10 +3820,10 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
/// If `alloc` is an inferred allocation, `resolved_inferred_ty` is taken to be its resolved
/// type. Otherwise, it may be `null`, and the type will be inferred from `alloc`.
fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, resolved_alloc_ty: ?Type) CompileError!?InternPool.Index {
const mod = sema.mod;
const zcu = sema.mod;
 
const alloc_ty = resolved_alloc_ty orelse sema.typeOf(alloc);
const ptr_info = alloc_ty.ptrInfo(mod);
const ptr_info = alloc_ty.ptrInfo(zcu);
const elem_ty = Type.fromInterned(ptr_info.child);
 
const alloc_inst = alloc.toIndex() orelse return null;
@@ -3843,12 +3845,16 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
 
simple: {
if (stores.len != 1) break :simple;
const store_inst = stores[0];
const store_data = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op;
if (store_data.lhs != alloc) break :simple;
const store_inst = sema.air_instructions.get(@intFromEnum(stores[0]));
switch (store_inst.tag) {
.store, .store_safe => {},
.set_union_tag, .optional_payload_ptr_set, .errunion_payload_ptr_set => break :simple, // there's OPV stuff going on!
else => unreachable,
}
if (store_inst.data.bin_op.lhs != alloc) break :simple;
 
const val = store_data.rhs.toInterned().?;
assert(mod.intern_pool.typeOf(val) == elem_ty.toIntern());
const val = store_inst.data.bin_op.rhs.toInterned().?;
assert(zcu.intern_pool.typeOf(val) == elem_ty.toIntern());
return sema.finishResolveComptimeKnownAllocPtr(block, alloc_ty, val, null, alloc_inst, comptime_info.value);
}
 
@@ -3857,9 +3863,10 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
 
const ct_alloc = try sema.newComptimeAlloc(block, elem_ty, ptr_info.flags.alignment);
 
const alloc_ptr = try mod.intern(.{ .ptr = .{
const alloc_ptr = try zcu.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.addr = .{ .comptime_alloc = ct_alloc },
.base_addr = .{ .comptime_alloc = ct_alloc },
.byte_offset = 0,
} });
 
// Maps from pointers into the runtime allocs, to comptime-mutable pointers into the comptime alloc
@@ -3867,10 +3874,18 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
try ptr_mapping.ensureTotalCapacity(@intCast(stores.len));
ptr_mapping.putAssumeCapacity(alloc_inst, alloc_ptr);
 
// Whilst constructing our mapping, we will also initialize optional and error union payloads when
// we encounter the corresponding pointers. For this reason, the ordering of `to_map` matters.
var to_map = try std.ArrayList(Air.Inst.Index).initCapacity(sema.arena, stores.len);
for (stores) |store_inst| {
const bin_op = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op;
to_map.appendAssumeCapacity(bin_op.lhs.toIndex().?);
for (stores) |store_inst_idx| {
const store_inst = sema.air_instructions.get(@intFromEnum(store_inst_idx));
const ptr_to_map = switch (store_inst.tag) {
.store, .store_safe => store_inst.data.bin_op.lhs.toIndex().?, // Map the pointer being stored to.
.set_union_tag => continue, // We can completely ignore these: we'll do it implicitly when we get the field pointer.
.optional_payload_ptr_set, .errunion_payload_ptr_set => store_inst_idx, // Map the generated pointer itself.
else => unreachable,
};
to_map.appendAssumeCapacity(ptr_to_map);
}
 
const tmp_air = sema.getTmpAir();
@@ -3950,53 +3965,68 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
try to_map.appendSlice(&.{ air_ptr, air_parent_ptr.toIndex().? });
continue;
};
const new_ptr_ty = tmp_air.typeOfIndex(air_ptr, &mod.intern_pool).toIntern();
const new_ptr_ty = tmp_air.typeOfIndex(air_ptr, &zcu.intern_pool).toIntern();
const new_ptr = switch (method) {
.same_addr => try mod.intern_pool.getCoerced(sema.gpa, decl_parent_ptr, new_ptr_ty),
.opt_payload => try mod.intern(.{ .ptr = .{
.ty = new_ptr_ty,
.addr = .{ .opt_payload = decl_parent_ptr },
} }),
.eu_payload => try mod.intern(.{ .ptr = .{
.ty = new_ptr_ty,
.addr = .{ .eu_payload = decl_parent_ptr },
} }),
.field => |field_idx| try mod.intern(.{ .ptr = .{
.ty = new_ptr_ty,
.addr = .{ .field = .{
.base = decl_parent_ptr,
.index = field_idx,
} },
} }),
.elem => |elem_idx| (try Value.fromInterned(decl_parent_ptr).elemPtr(Type.fromInterned(new_ptr_ty), @intCast(elem_idx), mod)).toIntern(),
.same_addr => try zcu.intern_pool.getCoerced(sema.gpa, decl_parent_ptr, new_ptr_ty),
.opt_payload => ptr: {
// Set the optional to non-null at comptime.
// If the payload is OPV, we must use that value instead of undef.
const opt_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu);
const payload_ty = opt_ty.optionalChild(zcu);
const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty);
const opt_val = try zcu.intern(.{ .opt = .{
.ty = opt_ty.toIntern(),
.val = payload_val.toIntern(),
} });
try sema.storePtrVal(block, .unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(opt_val), opt_ty);
break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(sema)).toIntern();
},
.eu_payload => ptr: {
// Set the error union to non-error at comptime.
// If the payload is OPV, we must use that value instead of undef.
const eu_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu);
const payload_ty = eu_ty.errorUnionPayload(zcu);
const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty);
const eu_val = try zcu.intern(.{ .error_union = .{
.ty = eu_ty.toIntern(),
.val = .{ .payload = payload_val.toIntern() },
} });
try sema.storePtrVal(block, .unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(eu_val), eu_ty);
break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(sema)).toIntern();
},
.field => |idx| ptr: {
const maybe_union_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu);
if (zcu.typeToUnion(maybe_union_ty)) |union_obj| {
// As this is a union field, we must store to the pointer now to set the tag.
// If the payload is OPV, there will not be a payload store, so we store that value.
// Otherwise, there will be a payload store to process later, so undef will suffice.
const payload_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]);
const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty);
const tag_val = try zcu.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), idx);
const store_val = try zcu.unionValue(maybe_union_ty, tag_val, payload_val);
try sema.storePtrVal(block, .unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty);
}
break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, sema)).toIntern();
},
.elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, sema)).toIntern(),
};
try ptr_mapping.put(air_ptr, new_ptr);
}
 
// We have a correlation between AIR pointers and decl pointers. Perform all stores at comptime.
// Any implicit stores performed by `optional_payload_ptr_set`, `errunion_payload_ptr_set`, or
// `set_union_tag` instructions were already done above.
 
for (stores) |store_inst| {
switch (sema.air_instructions.items(.tag)[@intFromEnum(store_inst)]) {
.set_union_tag => {
// If this tag has an OPV payload, there won't be a corresponding
// store instruction, so we must set the union payload now.
const bin_op = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op;
const air_ptr_inst = bin_op.lhs.toIndex().?;
const tag_val = (try sema.resolveValue(bin_op.rhs)).?;
const union_ty = sema.typeOf(bin_op.lhs).childType(mod);
const payload_ty = union_ty.unionFieldType(tag_val, mod).?;
if (try sema.typeHasOnePossibleValue(payload_ty)) |payload_val| {
const new_ptr = ptr_mapping.get(air_ptr_inst).?;
const store_val = try mod.unionValue(union_ty, tag_val, payload_val);
try sema.storePtrVal(block, .unneeded, Value.fromInterned(new_ptr), store_val, union_ty);
}
},
for (stores) |store_inst_idx| {
const store_inst = sema.air_instructions.get(@intFromEnum(store_inst_idx));
switch (store_inst.tag) {
.set_union_tag => {}, // Handled implicitly by field pointers above
.optional_payload_ptr_set, .errunion_payload_ptr_set => {}, // Handled explicitly above
.store, .store_safe => {
const bin_op = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op;
const air_ptr_inst = bin_op.lhs.toIndex().?;
const store_val = (try sema.resolveValue(bin_op.rhs)).?;
const air_ptr_inst = store_inst.data.bin_op.lhs.toIndex().?;
const store_val = (try sema.resolveValue(store_inst.data.bin_op.rhs)).?;
const new_ptr = ptr_mapping.get(air_ptr_inst).?;
try sema.storePtrVal(block, .unneeded, Value.fromInterned(new_ptr), store_val, Type.fromInterned(mod.intern_pool.typeOf(store_val.toIntern())));
try sema.storePtrVal(block, .unneeded, Value.fromInterned(new_ptr), store_val, Type.fromInterned(zcu.intern_pool.typeOf(store_val.toIntern())));
},
else => unreachable,
}
@@ -4040,9 +4070,6 @@ fn finishResolveComptimeKnownAllocPtr(
for (comptime_info.stores.items(.inst)) |store_inst| {
sema.air_instructions.set(@intFromEnum(store_inst), nop_inst);
}
for (comptime_info.non_elideable_pointers.items) |ptr_inst| {
sema.air_instructions.set(@intFromEnum(ptr_inst), nop_inst);
}
 
if (Value.fromInterned(result_val).canMutateComptimeVarState(zcu)) {
const alloc_index = existing_comptime_alloc orelse a: {
@@ -4054,15 +4081,17 @@ fn finishResolveComptimeKnownAllocPtr(
sema.getComptimeAlloc(alloc_index).is_const = true;
return try zcu.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.addr = .{ .comptime_alloc = alloc_index },
.base_addr = .{ .comptime_alloc = alloc_index },
.byte_offset = 0,
} });
} else {
return try zcu.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.orig_ty = alloc_ty.toIntern(),
.val = result_val,
} },
.byte_offset = 0,
} });
}
}
@@ -4207,11 +4236,11 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
sema.air_instructions.set(@intFromEnum(ptr_inst), .{ .tag = undefined, .data = undefined });
}
 
const val = switch (mod.intern_pool.indexToKey(resolved_ptr).ptr.addr) {
const val = switch (mod.intern_pool.indexToKey(resolved_ptr).ptr.base_addr) {
.anon_decl => |a| a.val,
.comptime_alloc => |i| val: {
const alloc = sema.getComptimeAlloc(i);
break :val try alloc.val.intern(mod, sema.arena);
break :val (try alloc.val.intern(mod, sema.arena)).toIntern();
},
else => unreachable,
};
@@ -4388,10 +4417,10 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.input_index = len_idx,
} };
try sema.errNote(block, a_src, msg, "length {} here", .{
v.fmtValue(sema.mod),
v.fmtValue(sema.mod, sema),
});
try sema.errNote(block, arg_src, msg, "length {} here", .{
arg_val.fmtValue(sema.mod),
arg_val.fmtValue(sema.mod, sema),
});
break :msg msg;
};
@@ -4869,7 +4898,7 @@ fn validateUnionInit(
 
const new_tag = Air.internedToRef(tag_val.toIntern());
const set_tag_inst = try block.addBinOp(.set_union_tag, union_ptr, new_tag);
try sema.checkComptimeKnownStore(block, set_tag_inst, init_src);
try sema.checkComptimeKnownStore(block, set_tag_inst, .unneeded); // `unneeded` since this isn't a "proper" store
}
 
fn validateStructInit(
@@ -5331,7 +5360,7 @@ fn zirValidatePtrArrayInit(
if (array_is_comptime) {
if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| {
switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| switch (ptr.base_addr) {
.comptime_field => return, // This store was validated by the individual elem ptrs.
else => {},
},
@@ -5619,17 +5648,19 @@ fn storeToInferredAllocComptime(
if (iac.is_const and !operand_val.canMutateComptimeVarState(zcu)) {
iac.ptr = try zcu.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.val = operand_val.toIntern(),
.orig_ty = alloc_ty.toIntern(),
} },
.byte_offset = 0,
} });
} else {
const alloc_index = try sema.newComptimeAlloc(block, operand_ty, iac.alignment);
sema.getComptimeAlloc(alloc_index).val = .{ .interned = operand_val.toIntern() };
iac.ptr = try zcu.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.addr = .{ .comptime_alloc = alloc_index },
.base_addr = .{ .comptime_alloc = alloc_index },
.byte_offset = 0,
} });
}
}
@@ -5724,10 +5755,11 @@ fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index {
})).toIntern();
return mod.intern(.{ .ptr = .{
.ty = ptr_ty,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.val = val,
.orig_ty = ptr_ty,
} },
.byte_offset = 0,
} });
}
 
@@ -5813,7 +5845,7 @@ fn zirCompileLog(
const arg_ty = sema.typeOf(arg);
if (try sema.resolveValueResolveLazy(arg)) |val| {
try writer.print("@as({}, {})", .{
arg_ty.fmt(mod), val.fmtValue(mod),
arg_ty.fmt(mod), val.fmtValue(mod, sema),
});
} else {
try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(mod)});
@@ -6404,7 +6436,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
else => |e| return e,
};
{
try mod.ensureDeclAnalyzed(decl_index);
try sema.ensureDeclAnalyzed(decl_index);
const exported_decl = mod.declPtr(decl_index);
if (exported_decl.val.getFunction(mod)) |function| {
return sema.analyzeExport(block, src, options, function.owner_decl);
@@ -6457,7 +6489,7 @@ pub fn analyzeExport(
if (options.linkage == .internal)
return;
 
try mod.ensureDeclAnalyzed(exported_decl_index);
try sema.ensureDeclAnalyzed(exported_decl_index);
const exported_decl = mod.declPtr(exported_decl_index);
const export_ty = exported_decl.typeOf(mod);
 
@@ -6880,8 +6912,8 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl {
const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
.extern_func => |extern_func| extern_func.decl,
.func => |func| func.owner_decl,
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| mod.declPtr(decl).val.getFunction(mod).?.owner_decl,
.ptr => |ptr| switch (ptr.base_addr) {
.decl => |decl| if (ptr.byte_offset == 0) mod.declPtr(decl).val.getFunction(mod).?.owner_decl else return null,
else => return null,
},
else => return null,
@@ -7638,22 +7670,23 @@ fn analyzeCall(
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
}),
.func => func_val.toIntern(),
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| blk: {
const func_val_ptr = mod.declPtr(decl).val.toIntern();
const intern_index = mod.intern_pool.indexToKey(func_val_ptr);
if (intern_index == .extern_func or (intern_index == .variable and intern_index.variable.is_extern))
return sema.fail(block, call_src, "{s} call of extern function pointer", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
});
break :blk func_val_ptr;
},
else => {
assert(callee_ty.isPtrAtRuntime(mod));
return sema.fail(block, call_src, "{s} call of function pointer", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
});
},
.ptr => |ptr| blk: {
switch (ptr.base_addr) {
.decl => |decl| if (ptr.byte_offset == 0) {
const func_val_ptr = mod.declPtr(decl).val.toIntern();
const intern_index = mod.intern_pool.indexToKey(func_val_ptr);
if (intern_index == .extern_func or (intern_index == .variable and intern_index.variable.is_extern))
return sema.fail(block, call_src, "{s} call of extern function pointer", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
});
break :blk func_val_ptr;
},
else => {},
}
assert(callee_ty.isPtrAtRuntime(mod));
return sema.fail(block, call_src, "{s} call of function pointer", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
});
},
else => unreachable,
};
@@ -7971,7 +8004,7 @@ fn analyzeCall(
if (try sema.resolveValue(func)) |func_val| {
switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
.func => break :skip_safety,
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.decl => |decl| if (!mod.declPtr(decl).isExtern(mod)) break :skip_safety,
else => {},
},
@@ -8167,7 +8200,7 @@ fn instantiateGenericCall(
});
const generic_owner = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
.func => func_val.toIntern(),
.ptr => |ptr| mod.declPtr(ptr.addr.decl).val.toIntern(),
.ptr => |ptr| mod.declPtr(ptr.base_addr.decl).val.toIntern(),
else => unreachable,
};
const generic_owner_func = mod.intern_pool.indexToKey(generic_owner).func;
@@ -8919,7 +8952,7 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern());
}
return sema.fail(block, src, "int value '{}' out of range of non-exhaustive enum '{}'", .{
int_val.fmtValue(mod), dest_ty.fmt(mod),
int_val.fmtValue(mod, sema), dest_ty.fmt(mod),
});
}
if (int_val.isUndef(mod)) {
@@ -8927,7 +8960,7 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
if (!(try sema.enumHasInt(dest_ty, int_val))) {
return sema.fail(block, src, "enum '{}' has no tag with value '{}'", .{
dest_ty.fmt(mod), int_val.fmtValue(mod),
dest_ty.fmt(mod), int_val.fmtValue(mod, sema),
});
}
return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern());
@@ -8984,47 +9017,47 @@ fn analyzeOptionalPayloadPtr(
safety_check: bool,
initializing: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const zcu = sema.mod;
const optional_ptr_ty = sema.typeOf(optional_ptr);
assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer);
assert(optional_ptr_ty.zigTypeTag(zcu) == .Pointer);
 
const opt_type = optional_ptr_ty.childType(mod);
if (opt_type.zigTypeTag(mod) != .Optional) {
return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(mod)});
const opt_type = optional_ptr_ty.childType(zcu);
if (opt_type.zigTypeTag(zcu) != .Optional) {
return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(zcu)});
}
 
const child_type = opt_type.optionalChild(mod);
const child_type = opt_type.optionalChild(zcu);
const child_pointer = try sema.ptrType(.{
.child = child_type.toIntern(),
.flags = .{
.is_const = optional_ptr_ty.isConstPtr(mod),
.address_space = optional_ptr_ty.ptrAddressSpace(mod),
.is_const = optional_ptr_ty.isConstPtr(zcu),
.address_space = optional_ptr_ty.ptrAddressSpace(zcu),
},
});
 
if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| {
if (initializing) {
if (!sema.isComptimeMutablePtr(ptr_val)) {
// If the pointer resulting from this function was stored at comptime,
// the optional non-null bit would be set that way. But in this case,
// we need to emit a runtime instruction to do it.
if (sema.isComptimeMutablePtr(ptr_val)) {
// Set the optional to non-null at comptime.
// If the payload is OPV, we must use that value instead of undef.
const payload_val = try sema.typeHasOnePossibleValue(child_type) orelse try zcu.undefValue(child_type);
const opt_val = try zcu.intern(.{ .opt = .{
.ty = opt_type.toIntern(),
.val = payload_val.toIntern(),
} });
try sema.storePtrVal(block, src, ptr_val, Value.fromInterned(opt_val), opt_type);
} else {
// Emit runtime instructions to set the optional non-null bit.
const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr);
try sema.checkKnownAllocPtr(block, optional_ptr, opt_payload_ptr);
}
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = child_pointer.toIntern(),
.addr = .{ .opt_payload = ptr_val.toIntern() },
} })));
return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern());
}
if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| {
if (val.isNull(mod)) {
if (val.isNull(zcu)) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
// The same Value represents the pointer to the optional and the payload.
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = child_pointer.toIntern(),
.addr = .{ .opt_payload = ptr_val.toIntern() },
} })));
return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern());
}
}
 
@@ -9173,49 +9206,50 @@ fn analyzeErrUnionPayloadPtr(
safety_check: bool,
initializing: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const zcu = sema.mod;
const operand_ty = sema.typeOf(operand);
assert(operand_ty.zigTypeTag(mod) == .Pointer);
assert(operand_ty.zigTypeTag(zcu) == .Pointer);
 
if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) {
if (operand_ty.childType(zcu).zigTypeTag(zcu) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
operand_ty.childType(mod).fmt(mod),
operand_ty.childType(zcu).fmt(zcu),
});
}
 
const err_union_ty = operand_ty.childType(mod);
const payload_ty = err_union_ty.errorUnionPayload(mod);
const err_union_ty = operand_ty.childType(zcu);
const payload_ty = err_union_ty.errorUnionPayload(zcu);
const operand_pointer_ty = try sema.ptrType(.{
.child = payload_ty.toIntern(),
.flags = .{
.is_const = operand_ty.isConstPtr(mod),
.address_space = operand_ty.ptrAddressSpace(mod),
.is_const = operand_ty.isConstPtr(zcu),
.address_space = operand_ty.ptrAddressSpace(zcu),
},
});
 
if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| {
if (initializing) {
if (!sema.isComptimeMutablePtr(ptr_val)) {
// If the pointer resulting from this function was stored at comptime,
// the error union error code would be set that way. But in this case,
// we need to emit a runtime instruction to do it.
if (sema.isComptimeMutablePtr(ptr_val)) {
// Set the error union to non-error at comptime.
// If the payload is OPV, we must use that value instead of undef.
const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty);
const eu_val = try zcu.intern(.{ .error_union = .{
.ty = err_union_ty.toIntern(),
.val = .{ .payload = payload_val.toIntern() },
} });
try sema.storePtrVal(block, src, ptr_val, Value.fromInterned(eu_val), err_union_ty);
} else {
// Emit runtime instructions to set the error union error code.
try sema.requireRuntimeBlock(block, src, null);
const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand);
try sema.checkKnownAllocPtr(block, operand, eu_payload_ptr);
}
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = operand_pointer_ty.toIntern(),
.addr = .{ .eu_payload = ptr_val.toIntern() },
} })));
return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern());
}
if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| {
if (val.getErrorName(mod).unwrap()) |name| {
if (val.getErrorName(zcu).unwrap()) |name| {
return sema.failWithComptimeErrorRetTrace(block, src, name);
}
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = operand_pointer_ty.toIntern(),
.addr = .{ .eu_payload = ptr_val.toIntern() },
} })));
return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern());
}
}
 
@@ -9223,7 +9257,7 @@ fn analyzeErrUnionPayloadPtr(
 
// If the error set has no fields then no safety check is needed.
if (safety_check and block.wantSafety() and
!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod))
!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu))
{
try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr);
}
@@ -10186,49 +10220,56 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const tracy = trace(@src());
defer tracy.end();
 
const mod = sema.mod;
const zcu = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const ptr_ty = operand_ty.scalarType(mod);
const is_vector = operand_ty.zigTypeTag(mod) == .Vector;
if (!ptr_ty.isPtrAtRuntime(mod)) {
return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(mod)});
const ptr_ty = operand_ty.scalarType(zcu);
const is_vector = operand_ty.zigTypeTag(zcu) == .Vector;
if (!ptr_ty.isPtrAtRuntime(zcu)) {
return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(zcu)});
}
const pointee_ty = ptr_ty.childType(mod);
const pointee_ty = ptr_ty.childType(zcu);
if (try sema.typeRequiresComptime(ptr_ty)) {
const msg = msg: {
const msg = try sema.errMsg(block, ptr_src, "comptime-only type '{}' has no pointer address", .{pointee_ty.fmt(mod)});
const msg = try sema.errMsg(block, ptr_src, "comptime-only type '{}' has no pointer address", .{pointee_ty.fmt(zcu)});
errdefer msg.destroy(sema.gpa);
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsComptime(msg, src_decl.toSrcLoc(ptr_src, mod), pointee_ty);
const src_decl = zcu.declPtr(block.src_decl);
try sema.explainWhyTypeIsComptime(msg, src_decl.toSrcLoc(ptr_src, zcu), pointee_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
if (try sema.resolveValueIntable(operand)) |operand_val| ct: {
if (!is_vector) {
return Air.internedToRef((try mod.intValue(
if (operand_val.isUndef(zcu)) {
return Air.internedToRef((try zcu.undefValue(Type.usize)).toIntern());
}
return Air.internedToRef((try zcu.intValue(
Type.usize,
(try operand_val.getUnsignedIntAdvanced(mod, sema)).?,
(try operand_val.getUnsignedIntAdvanced(zcu, sema)).?,
)).toIntern());
}
const len = operand_ty.vectorLen(mod);
const dest_ty = try mod.vectorType(.{ .child = .usize_type, .len = len });
const len = operand_ty.vectorLen(zcu);
const dest_ty = try zcu.vectorType(.{ .child = .usize_type, .len = len });
const new_elems = try sema.arena.alloc(InternPool.Index, len);
for (new_elems, 0..) |*new_elem, i| {
const ptr_val = try operand_val.elemValue(mod, i);
const addr = try ptr_val.getUnsignedIntAdvanced(mod, sema) orelse {
const ptr_val = try operand_val.elemValue(zcu, i);
if (ptr_val.isUndef(zcu)) {
new_elem.* = (try zcu.undefValue(Type.usize)).toIntern();
continue;
}
const addr = try ptr_val.getUnsignedIntAdvanced(zcu, sema) orelse {
// A vector element wasn't an integer pointer. This is a runtime operation.
break :ct;
};
new_elem.* = (try mod.intValue(
new_elem.* = (try zcu.intValue(
Type.usize,
addr,
)).toIntern();
}
return Air.internedToRef(try mod.intern(.{ .aggregate = .{
return Air.internedToRef(try zcu.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .elems = new_elems },
} }));
@@ -10238,11 +10279,11 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (!is_vector) {
return block.addUnOp(.int_from_ptr, operand);
}
const len = operand_ty.vectorLen(mod);
const dest_ty = try mod.vectorType(.{ .child = .usize_type, .len = len });
const len = operand_ty.vectorLen(zcu);
const dest_ty = try zcu.vectorType(.{ .child = .usize_type, .len = len });
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
const idx_ref = try mod.intRef(Type.usize, i);
const idx_ref = try zcu.intRef(Type.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addUnOp(.int_from_ptr, old_elem);
}
@@ -11077,8 +11118,8 @@ const SwitchProngAnalysis = struct {
inline_case_capture: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const sema = spa.sema;
const mod = sema.mod;
const ip = &mod.intern_pool;
const zcu = sema.mod;
const ip = &zcu.intern_pool;
 
const zir_datas = sema.code.instructions.items(.data);
const switch_node_offset = zir_datas[@intFromEnum(spa.switch_block_inst)].pl_node.src_node;
@@ -11089,27 +11130,21 @@ const SwitchProngAnalysis = struct {
 
if (inline_case_capture != .none) {
const item_val = sema.resolveConstDefinedValue(block, .unneeded, inline_case_capture, undefined) catch unreachable;
if (operand_ty.zigTypeTag(mod) == .Union) {
const field_index: u32 = @intCast(operand_ty.unionTagFieldIndex(item_val, mod).?);
const union_obj = mod.typeToUnion(operand_ty).?;
if (operand_ty.zigTypeTag(zcu) == .Union) {
const field_index: u32 = @intCast(operand_ty.unionTagFieldIndex(item_val, zcu).?);
const union_obj = zcu.typeToUnion(operand_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (capture_byref) {
const ptr_field_ty = try sema.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = !operand_ptr_ty.ptrIsMutable(mod),
.is_volatile = operand_ptr_ty.isVolatilePtr(mod),
.address_space = operand_ptr_ty.ptrAddressSpace(mod),
.is_const = !operand_ptr_ty.ptrIsMutable(zcu),
.is_volatile = operand_ptr_ty.isVolatilePtr(zcu),
.address_space = operand_ptr_ty.ptrAddressSpace(zcu),
},
});
if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| {
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.addr = .{ .field = .{
.base = union_ptr.toIntern(),
.index = field_index,
} },
} })));
return Air.internedToRef((try union_ptr.ptrField(field_index, sema)).toIntern());
}
return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty);
} else {
@@ -11131,7 +11166,7 @@ const SwitchProngAnalysis = struct {
return spa.operand_ptr;
}
 
switch (operand_ty.zigTypeTag(mod)) {
switch (operand_ty.zigTypeTag(zcu)) {
.ErrorSet => if (spa.else_error_ty) |ty| {
return sema.bitCast(block, ty, spa.operand, operand_src, null);
} else {
@@ -11142,25 +11177,25 @@ const SwitchProngAnalysis = struct {
}
}
 
switch (operand_ty.zigTypeTag(mod)) {
switch (operand_ty.zigTypeTag(zcu)) {
.Union => {
const union_obj = mod.typeToUnion(operand_ty).?;
const union_obj = zcu.typeToUnion(operand_ty).?;
const first_item_val = sema.resolveConstDefinedValue(block, .unneeded, case_vals[0], undefined) catch unreachable;
 
const first_field_index: u32 = mod.unionTagFieldIndex(union_obj, first_item_val).?;
const first_field_index: u32 = zcu.unionTagFieldIndex(union_obj, first_item_val).?;
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_field_index]);
 
const field_indices = try sema.arena.alloc(u32, case_vals.len);
for (case_vals, field_indices) |item, *field_idx| {
const item_val = sema.resolveConstDefinedValue(block, .unneeded, item, undefined) catch unreachable;
field_idx.* = mod.unionTagFieldIndex(union_obj, item_val).?;
field_idx.* = zcu.unionTagFieldIndex(union_obj, item_val).?;
}
 
// Fast path: if all the operands are the same type already, we don't need to hit
// PTR! This will also allow us to emit simpler code.
const same_types = for (field_indices[1..]) |field_idx| {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (!field_ty.eql(first_field_ty, sema.mod)) break false;
if (!field_ty.eql(first_field_ty, zcu)) break false;
} else true;
 
const capture_ty = if (same_types) first_field_ty else capture_ty: {
@@ -11168,7 +11203,7 @@ const SwitchProngAnalysis = struct {
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (dummy_captures, field_indices) |*dummy, field_idx| {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
dummy.* = try mod.undefRef(field_ty);
dummy.* = try zcu.undefRef(field_ty);
}
 
const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len);
@@ -11178,12 +11213,12 @@ const SwitchProngAnalysis = struct {
error.NeededSourceLocation => {
// This must be a multi-prong so this must be a `multi_capture` src
const multi_idx = raw_capture_src.multi_capture;
const src_decl_ptr = sema.mod.declPtr(block.src_decl);
const src_decl_ptr = zcu.declPtr(block.src_decl);
for (case_srcs, 0..) |*case_src, i| {
const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(i) } };
case_src.* = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
case_src.* = raw_case_src.resolve(zcu, src_decl_ptr, switch_node_offset, .none);
}
const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
const capture_src = raw_capture_src.resolve(zcu, src_decl_ptr, switch_node_offset, .none);
_ = sema.resolvePeerTypes(block, capture_src, dummy_captures, .{ .override = case_srcs }) catch |err1| switch (err1) {
error.AnalysisFail => {
const msg = sema.err orelse return error.AnalysisFail;
@@ -11200,7 +11235,7 @@ const SwitchProngAnalysis = struct {
 
// By-reference captures have some further restrictions which make them easier to emit
if (capture_byref) {
const operand_ptr_info = operand_ptr_ty.ptrInfo(mod);
const operand_ptr_info = operand_ptr_ty.ptrInfo(zcu);
const capture_ptr_ty = resolve: {
// By-ref captures of hetereogeneous types are only allowed if all field
// pointer types are peer resolvable to each other.
@@ -11217,7 +11252,7 @@ const SwitchProngAnalysis = struct {
.alignment = union_obj.fieldAlign(ip, field_idx),
},
});
dummy.* = try mod.undefRef(field_ptr_ty);
dummy.* = try zcu.undefRef(field_ptr_ty);
}
const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len);
@memset(case_srcs, .unneeded);
@@ -11226,12 +11261,12 @@ const SwitchProngAnalysis = struct {
error.NeededSourceLocation => {
// This must be a multi-prong so this must be a `multi_capture` src
const multi_idx = raw_capture_src.multi_capture;
const src_decl_ptr = sema.mod.declPtr(block.src_decl);
const src_decl_ptr = zcu.declPtr(block.src_decl);
for (case_srcs, 0..) |*case_src, i| {
const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(i) } };
case_src.* = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
case_src.* = raw_case_src.resolve(zcu, src_decl_ptr, switch_node_offset, .none);
}
const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
const capture_src = raw_capture_src.resolve(zcu, src_decl_ptr, switch_node_offset, .none);
_ = sema.resolvePeerTypes(block, capture_src, dummy_captures, .{ .override = case_srcs }) catch |err1| switch (err1) {
error.AnalysisFail => {
const msg = sema.err orelse return error.AnalysisFail;
@@ -11248,14 +11283,9 @@ const SwitchProngAnalysis = struct {
};
 
if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| {
if (op_ptr_val.isUndef(mod)) return mod.undefRef(capture_ptr_ty);
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = capture_ptr_ty.toIntern(),
.addr = .{ .field = .{
.base = op_ptr_val.toIntern(),
.index = first_field_index,
} },
} })));
if (op_ptr_val.isUndef(zcu)) return zcu.undefRef(capture_ptr_ty);
const field_ptr_val = try op_ptr_val.ptrField(first_field_index, sema);
return Air.internedToRef((try zcu.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern());
}
 
try sema.requireRuntimeBlock(block, operand_src, null);
@@ -11263,9 +11293,9 @@ const SwitchProngAnalysis = struct {
}
 
if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |operand_val| {
if (operand_val.isUndef(mod)) return mod.undefRef(capture_ty);
if (operand_val.isUndef(zcu)) return zcu.undefRef(capture_ty);
const union_val = ip.indexToKey(operand_val.toIntern()).un;
if (Value.fromInterned(union_val.tag).isUndef(mod)) return mod.undefRef(capture_ty);
if (Value.fromInterned(union_val.tag).isUndef(zcu)) return zcu.undefRef(capture_ty);
const uncoerced = Air.internedToRef(union_val.val);
return sema.coerce(block, capture_ty, uncoerced, operand_src);
}
@@ -11281,7 +11311,7 @@ const SwitchProngAnalysis = struct {
const first_non_imc = in_mem: {
for (field_indices, 0..) |field_idx, i| {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, sema.mod.getTarget(), .unneeded, .unneeded)) {
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), .unneeded, .unneeded)) {
break :in_mem i;
}
}
@@ -11304,7 +11334,7 @@ const SwitchProngAnalysis = struct {
const next = first_non_imc + 1;
for (field_indices[next..], next..) |field_idx, i| {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, sema.mod.getTarget(), .unneeded, .unneeded)) {
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), .unneeded, .unneeded)) {
in_mem_coercible.unset(i);
}
}
@@ -11339,9 +11369,9 @@ const SwitchProngAnalysis = struct {
const coerced = sema.coerce(&coerce_block, capture_ty, uncoerced, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
const multi_idx = raw_capture_src.multi_capture;
const src_decl_ptr = sema.mod.declPtr(block.src_decl);
const src_decl_ptr = zcu.declPtr(block.src_decl);
const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(idx) } };
const case_src = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
const case_src = raw_case_src.resolve(zcu, src_decl_ptr, switch_node_offset, .none);
_ = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src);
unreachable;
},
@@ -11400,7 +11430,7 @@ const SwitchProngAnalysis = struct {
},
.ErrorSet => {
if (capture_byref) {
const capture_src = raw_capture_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, .none);
const capture_src = raw_capture_src.resolve(zcu, zcu.declPtr(block.src_decl), switch_node_offset, .none);
return sema.fail(
block,
capture_src,
@@ -11411,7 +11441,7 @@ const SwitchProngAnalysis = struct {
 
if (case_vals.len == 1) {
const item_val = sema.resolveConstDefinedValue(block, .unneeded, case_vals[0], undefined) catch unreachable;
const item_ty = try mod.singleErrorSetType(item_val.getErrorName(mod).unwrap().?);
const item_ty = try zcu.singleErrorSetType(item_val.getErrorName(zcu).unwrap().?);
return sema.bitCast(block, item_ty, spa.operand, operand_src, null);
}
 
@@ -11419,9 +11449,9 @@ const SwitchProngAnalysis = struct {
try names.ensureUnusedCapacity(sema.arena, case_vals.len);
for (case_vals) |err| {
const err_val = sema.resolveConstDefinedValue(block, .unneeded, err, undefined) catch unreachable;
names.putAssumeCapacityNoClobber(err_val.getErrorName(mod).unwrap().?, {});
names.putAssumeCapacityNoClobber(err_val.getErrorName(zcu).unwrap().?, {});
}
const error_ty = try mod.errorSetFromUnsortedNames(names.keys());
const error_ty = try zcu.errorSetFromUnsortedNames(names.keys());
return sema.bitCast(block, error_ty, spa.operand, operand_src, null);
},
else => {
@@ -13989,7 +14019,7 @@ fn zirShl(
const rhs_elem = try rhs_val.elemValue(mod, i);
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
rhs_elem.fmtValue(mod),
rhs_elem.fmtValue(mod, sema),
i,
scalar_ty.fmt(mod),
});
@@ -13997,7 +14027,7 @@ fn zirShl(
}
} else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
rhs_val.fmtValue(mod),
rhs_val.fmtValue(mod, sema),
scalar_ty.fmt(mod),
});
}
@@ -14008,14 +14038,14 @@ fn zirShl(
const rhs_elem = try rhs_val.elemValue(mod, i);
if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
rhs_elem.fmtValue(mod),
rhs_elem.fmtValue(mod, sema),
i,
});
}
}
} else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
rhs_val.fmtValue(mod),
rhs_val.fmtValue(mod, sema),
});
}
}
@@ -14154,7 +14184,7 @@ fn zirShr(
const rhs_elem = try rhs_val.elemValue(mod, i);
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
rhs_elem.fmtValue(mod),
rhs_elem.fmtValue(mod, sema),
i,
scalar_ty.fmt(mod),
});
@@ -14162,7 +14192,7 @@ fn zirShr(
}
} else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
rhs_val.fmtValue(mod),
rhs_val.fmtValue(mod, sema),
scalar_ty.fmt(mod),
});
}
@@ -14173,14 +14203,14 @@ fn zirShr(
const rhs_elem = try rhs_val.elemValue(mod, i);
if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
rhs_elem.fmtValue(mod),
rhs_elem.fmtValue(mod, sema),
i,
});
}
}
} else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
rhs_val.fmtValue(mod),
rhs_val.fmtValue(mod, sema),
});
}
if (maybe_lhs_val) |lhs_val| {
@@ -15101,7 +15131,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
block,
src,
"ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'",
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(mod) },
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(mod, sema) },
);
}
}
@@ -16903,21 +16933,14 @@ fn analyzePtrArithmetic(
 
const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntAdvanced(sema));
if (offset_int == 0) return ptr;
if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| {
if (air_tag == .ptr_sub) {
const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child));
const new_addr = switch (air_tag) {
.ptr_add => addr + elem_size * offset_int,
.ptr_sub => addr - elem_size * offset_int,
else => unreachable,
};
const new_ptr_val = try mod.ptrIntValue(new_ptr_ty, new_addr);
const new_ptr_val = try sema.ptrSubtract(block, op_src, ptr_val, offset_int * elem_size, new_ptr_ty);
return Air.internedToRef(new_ptr_val.toIntern());
} else {
const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, sema), new_ptr_ty);
return Air.internedToRef(new_ptr_val.toIntern());
}
if (air_tag == .ptr_sub) {
return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{});
}
const new_ptr_val = try ptr_val.elemPtr(new_ptr_ty, offset_int, mod);
return Air.internedToRef(new_ptr_val.toIntern());
} else break :rs offset_src;
} else break :rs ptr_src;
};
@@ -17611,13 +17634,14 @@ fn zirBuiltinSrc(
.ty = .slice_const_u8_sentinel_0_type,
.ptr = try ip.get(gpa, .{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.orig_ty = .slice_const_u8_sentinel_0_type,
.val = try ip.get(gpa, .{ .aggregate = .{
.ty = array_ty,
.storage = .{ .bytes = fn_owner_decl.name.toString() },
} }),
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, func_name_len)).toIntern(),
} });
@@ -17635,7 +17659,7 @@ fn zirBuiltinSrc(
.ty = .slice_const_u8_sentinel_0_type,
.ptr = try ip.get(gpa, .{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.orig_ty = .slice_const_u8_sentinel_0_type,
.val = try ip.get(gpa, .{ .aggregate = .{
.ty = array_ty,
@@ -17644,6 +17668,7 @@ fn zirBuiltinSrc(
},
} }),
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, file_name.len)).toIntern(),
} });
@@ -17766,10 +17791,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = slice_ty,
.ptr = try mod.intern(.{ .ptr = .{
.ty = manyptr_ty,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.orig_ty = manyptr_ty,
.val = new_decl_val,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(),
} });
@@ -18046,10 +18072,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = .slice_const_u8_sentinel_0_type,
.ptr = try mod.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
.orig_ty = .slice_const_u8_sentinel_0_type,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, error_name_len)).toIntern(),
} });
@@ -18092,10 +18119,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = slice_errors_ty.toIntern(),
.ptr = try mod.intern(.{ .ptr = .{
.ty = manyptr_errors_ty,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.orig_ty = manyptr_errors_ty,
.val = new_decl_val,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, vals.len)).toIntern(),
} });
@@ -18184,10 +18212,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = .slice_const_u8_sentinel_0_type,
.ptr = try mod.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
.orig_ty = .slice_const_u8_sentinel_0_type,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, tag_name_len)).toIntern(),
} });
@@ -18226,10 +18255,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = slice_ty,
.ptr = try mod.intern(.{ .ptr = .{
.ty = manyptr_ty,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
.orig_ty = manyptr_ty,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(),
} });
@@ -18318,10 +18348,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = .slice_const_u8_sentinel_0_type,
.ptr = try mod.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
.orig_ty = .slice_const_u8_sentinel_0_type,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, field_name_len)).toIntern(),
} });
@@ -18368,10 +18399,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = slice_ty,
.ptr = try mod.intern(.{ .ptr = .{
.ty = manyptr_ty,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.orig_ty = manyptr_ty,
.val = new_decl_val,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(),
} });
@@ -18471,10 +18503,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = .slice_const_u8_sentinel_0_type,
.ptr = try mod.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
.orig_ty = .slice_const_u8_sentinel_0_type,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, field_name_len)).toIntern(),
} });
@@ -18534,10 +18567,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = .slice_const_u8_sentinel_0_type,
.ptr = try mod.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
.orig_ty = .slice_const_u8_sentinel_0_type,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, field_name_len)).toIntern(),
} });
@@ -18594,10 +18628,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ty = slice_ty,
.ptr = try mod.intern(.{ .ptr = .{
.ty = manyptr_ty,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.orig_ty = manyptr_ty,
.val = new_decl_val,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(),
} });
@@ -18733,10 +18768,11 @@ fn typeInfoDecls(
.ty = slice_ty,
.ptr = try mod.intern(.{ .ptr = .{
.ty = manyptr_ty,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.orig_ty = manyptr_ty,
.val = new_decl_val,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(),
} });
@@ -18765,7 +18801,7 @@ fn typeInfoNamespaceDecls(
if (!decl.is_pub) continue;
if (decl.kind == .@"usingnamespace") {
if (decl.analysis == .in_progress) continue;
try mod.ensureDeclAnalyzed(decl_index);
try sema.ensureDeclAnalyzed(decl_index);
try sema.typeInfoNamespaceDecls(block, decl.val.toType().getNamespaceIndex(mod), declaration_ty, decl_vals, seen_namespaces);
continue;
}
@@ -18785,10 +18821,11 @@ fn typeInfoNamespaceDecls(
.ty = .slice_const_u8_sentinel_0_type,
.ptr = try mod.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.addr = .{ .anon_decl = .{
.base_addr = .{ .anon_decl = .{
.orig_ty = .slice_const_u8_sentinel_0_type,
.val = new_decl_val,
} },
.byte_offset = 0,
} }),
.len = (try mod.intValue(Type.usize, decl_name_len)).toIntern(),
} });
@@ -19907,6 +19944,16 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
}
}
 
if (host_size != 0 and !try sema.validatePackedType(elem_ty)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, elem_ty_src, "bit-pointer cannot refer to value of type '{}'", .{elem_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotPacked(msg, src_decl.toSrcLoc(elem_ty_src, mod), elem_ty);
break :msg msg;
});
}
 
const ty = try sema.ptrType(.{
.child = elem_ty.toIntern(),
.sentinel = sentinel,
@@ -21176,7 +21223,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const enum_decl = mod.declPtr(enum_decl_index);
const msg = msg: {
const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{}'", .{
val.fmtValue(sema.mod), enum_decl.name.fmt(ip),
val.fmtValue(sema.mod, sema), enum_decl.name.fmt(ip),
});
errdefer msg.destroy(sema.gpa);
try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{});
@@ -21811,7 +21858,7 @@ fn reifyEnum(
// TODO: better source location
return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{
field_name.fmt(ip),
field_value_val.fmtValue(mod),
field_value_val.fmtValue(mod, sema),
tag_ty.fmt(mod),
});
}
@@ -21827,7 +21874,7 @@ fn reifyEnum(
break :msg msg;
},
.value => msg: {
const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{field_value_val.fmtValue(mod)});
const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{field_value_val.fmtValue(mod, sema)});
errdefer msg.destroy(gpa);
_ = conflict.prev_field_idx; // TODO: this note is incorrect
try sema.errNote(block, src, msg, "other enum tag value here", .{});
@@ -22681,19 +22728,25 @@ fn ptrFromIntVal(
ptr_ty: Type,
ptr_align: Alignment,
) !Value {
const mod = sema.mod;
const zcu = sema.mod;
if (operand_val.isUndef(zcu)) {
if (ptr_ty.isAllowzeroPtr(zcu) and ptr_align == .@"1") {
return zcu.undefValue(ptr_ty);
}
return sema.failWithUseOfUndef(block, operand_src);
}
const addr = try operand_val.toUnsignedIntAdvanced(sema);
if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0)
return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)});
if (!ptr_ty.isAllowzeroPtr(zcu) and addr == 0)
return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(zcu)});
if (addr != 0 and ptr_align != .none and !ptr_align.check(addr))
return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)});
return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(zcu)});
 
return switch (ptr_ty.zigTypeTag(mod)) {
.Optional => Value.fromInterned((try mod.intern(.{ .opt = .{
return switch (ptr_ty.zigTypeTag(zcu)) {
.Optional => Value.fromInterned((try zcu.intern(.{ .opt = .{
.ty = ptr_ty.toIntern(),
.val = if (addr == 0) .none else (try mod.ptrIntValue(ptr_ty.childType(mod), addr)).toIntern(),
.val = if (addr == 0) .none else (try zcu.ptrIntValue(ptr_ty.childType(zcu), addr)).toIntern(),
} }))),
.Pointer => try mod.ptrIntValue(ptr_ty, addr),
.Pointer => try zcu.ptrIntValue(ptr_ty, addr),
else => unreachable,
};
}
@@ -22980,12 +23033,12 @@ fn ptrCastFull(
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = if (src_info.sentinel == .none) blk: {
break :blk try sema.errMsg(block, src, "destination pointer requires '{}' sentinel", .{
Value.fromInterned(dest_info.sentinel).fmtValue(mod),
Value.fromInterned(dest_info.sentinel).fmtValue(mod, sema),
});
} else blk: {
break :blk try sema.errMsg(block, src, "pointer sentinel '{}' cannot coerce into pointer sentinel '{}'", .{
Value.fromInterned(src_info.sentinel).fmtValue(mod),
Value.fromInterned(dest_info.sentinel).fmtValue(mod),
Value.fromInterned(src_info.sentinel).fmtValue(mod, sema),
Value.fromInterned(dest_info.sentinel).fmtValue(mod, sema),
});
};
errdefer msg.destroy(sema.gpa);
@@ -23159,11 +23212,13 @@ fn ptrCastFull(
if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) {
if (ptr_val.isUndef(mod)) return mod.undefRef(dest_ty);
const arr_len = try mod.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod));
const ptr_val_key = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
return Air.internedToRef((try mod.intern(.{ .slice = .{
.ty = dest_ty.toIntern(),
.ptr = try mod.intern(.{ .ptr = .{
.ty = dest_ty.slicePtrFieldType(mod).toIntern(),
.addr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr,
.base_addr = ptr_val_key.base_addr,
.byte_offset = ptr_val_key.byte_offset,
} }),
.len = arr_len.toIntern(),
} })));
@@ -23834,36 +23889,6 @@ fn checkPtrIsNotComptimeMutable(
}
}
 
fn checkComptimeVarStore(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
alloc_index: ComptimeAllocIndex,
) CompileError!void {
const runtime_index = sema.getComptimeAlloc(alloc_index).runtime_index;
if (@intFromEnum(runtime_index) < @intFromEnum(block.runtime_index)) {
if (block.runtime_cond) |cond_src| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "store to comptime variable depends on runtime condition", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNoteNonLazy(cond_src, msg, "runtime condition here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
if (block.runtime_loop) |loop_src| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cannot store to comptime variable in non-inline loop", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNoteNonLazy(loop_src, msg, "non-inline loop here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
unreachable;
}
}
 
fn checkIntOrVector(
sema: *Sema,
block: *Block,
@@ -24926,8 +24951,8 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
 
fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
const zcu = sema.mod;
const ip = &zcu.intern_pool;
 
const extra = sema.code.extraData(Zir.Inst.FieldParentPtr, extended.operand).data;
const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
@@ -24939,23 +24964,23 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
 
const parent_ptr_ty = try sema.resolveDestType(block, inst_src, extra.parent_ptr_type, .remove_eu, "@fieldParentPtr");
try sema.checkPtrType(block, inst_src, parent_ptr_ty, true);
const parent_ptr_info = parent_ptr_ty.ptrInfo(mod);
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
if (parent_ptr_info.flags.size != .One) {
return sema.fail(block, inst_src, "expected single pointer type, found '{}'", .{parent_ptr_ty.fmt(sema.mod)});
return sema.fail(block, inst_src, "expected single pointer type, found '{}'", .{parent_ptr_ty.fmt(zcu)});
}
const parent_ty = Type.fromInterned(parent_ptr_info.child);
switch (parent_ty.zigTypeTag(mod)) {
switch (parent_ty.zigTypeTag(zcu)) {
.Struct, .Union => {},
else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(sema.mod)}),
else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(zcu)}),
}
try sema.resolveTypeLayout(parent_ty);
 
const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{
.needed_comptime_reason = "field name must be comptime-known",
});
const field_index = switch (parent_ty.zigTypeTag(mod)) {
const field_index = switch (parent_ty.zigTypeTag(zcu)) {
.Struct => blk: {
if (parent_ty.isTuple(mod)) {
if (parent_ty.isTuple(zcu)) {
if (field_name.eqlSlice("len", ip)) {
return sema.fail(block, inst_src, "cannot get @fieldParentPtr of 'len' field of tuple", .{});
}
@@ -24967,19 +24992,19 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
.Union => try sema.unionFieldIndex(block, parent_ty, field_name, field_name_src),
else => unreachable,
};
if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index, mod)) {
if (parent_ty.zigTypeTag(zcu) == .Struct and parent_ty.structFieldIsComptime(field_index, zcu)) {
return sema.fail(block, field_name_src, "cannot get @fieldParentPtr of a comptime field", .{});
}
 
const field_ptr = try sema.resolveInst(extra.field_ptr);
const field_ptr_ty = sema.typeOf(field_ptr);
try sema.checkPtrOperand(block, field_ptr_src, field_ptr_ty);
const field_ptr_info = field_ptr_ty.ptrInfo(mod);
const field_ptr_info = field_ptr_ty.ptrInfo(zcu);
 
var actual_parent_ptr_info: InternPool.Key.PtrType = .{
.child = parent_ty.toIntern(),
.flags = .{
.alignment = try parent_ptr_ty.ptrAlignmentAdvanced(mod, sema),
.alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema),
.is_const = field_ptr_info.flags.is_const,
.is_volatile = field_ptr_info.flags.is_volatile,
.is_allowzero = field_ptr_info.flags.is_allowzero,
@@ -24987,11 +25012,11 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
},
.packed_offset = parent_ptr_info.packed_offset,
};
const field_ty = parent_ty.structFieldType(field_index, mod);
const field_ty = parent_ty.structFieldType(field_index, zcu);
var actual_field_ptr_info: InternPool.Key.PtrType = .{
.child = field_ty.toIntern(),
.flags = .{
.alignment = try field_ptr_ty.ptrAlignmentAdvanced(mod, sema),
.alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, sema),
.is_const = field_ptr_info.flags.is_const,
.is_volatile = field_ptr_info.flags.is_volatile,
.is_allowzero = field_ptr_info.flags.is_allowzero,
@@ -24999,14 +25024,14 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
},
.packed_offset = field_ptr_info.packed_offset,
};
switch (parent_ty.containerLayout(mod)) {
switch (parent_ty.containerLayout(zcu)) {
.auto => {
actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(
if (mod.typeToStruct(parent_ty)) |struct_obj| try sema.structFieldAlignment(
if (zcu.typeToStruct(parent_ty)) |struct_obj| try sema.structFieldAlignment(
struct_obj.fieldAlign(ip, field_index),
field_ty,
struct_obj.layout,
) else if (mod.typeToUnion(parent_ty)) |union_obj|
) else if (zcu.typeToUnion(parent_ty)) |union_obj|
try sema.unionFieldAlignment(union_obj, field_index)
else
actual_field_ptr_info.flags.alignment,
@@ -25016,7 +25041,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
actual_field_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 };
},
.@"extern" => {
const field_offset = parent_ty.structFieldOffset(field_index, mod);
const field_offset = parent_ty.structFieldOffset(field_index, zcu);
actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (field_offset > 0)
Alignment.fromLog2Units(@ctz(field_offset))
else
@@ -25027,7 +25052,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
},
.@"packed" => {
const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) +
(if (mod.typeToStruct(parent_ty)) |struct_obj| mod.structPackedFieldBitOffset(struct_obj, field_index) else 0) -
(if (zcu.typeToStruct(parent_ty)) |struct_obj| zcu.structPackedFieldBitOffset(struct_obj, field_index) else 0) -
actual_field_ptr_info.packed_offset.bit_offset), 8) catch
return sema.fail(block, inst_src, "pointer bit-offset mismatch", .{});
actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (byte_offset > 0)
@@ -25040,18 +25065,60 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
const actual_field_ptr_ty = try sema.ptrType(actual_field_ptr_info);
const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, field_ptr_src);
const actual_parent_ptr_ty = try sema.ptrType(actual_parent_ptr_info);
 
const result = if (try sema.resolveDefinedValue(block, field_ptr_src, casted_field_ptr)) |field_ptr_val| result: {
const field = switch (ip.indexToKey(field_ptr_val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
switch (parent_ty.zigTypeTag(zcu)) {
.Struct => switch (parent_ty.containerLayout(zcu)) {
.auto => {},
.@"extern" => {
const byte_offset = parent_ty.structFieldOffset(field_index, zcu);
const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty);
break :result Air.internedToRef(parent_ptr_val.toIntern());
},
.@"packed" => {
// Logic lifted from type computation above - I'm just assuming it's correct.
// `catch unreachable` since error case handled above.
const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) +
zcu.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) -
actual_field_ptr_info.packed_offset.bit_offset), 8) catch unreachable;
const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty);
break :result Air.internedToRef(parent_ptr_val.toIntern());
},
},
.Union => switch (parent_ty.containerLayout(zcu)) {
.auto => {},
.@"extern", .@"packed" => {
// For an extern or packed union, just coerce the pointer.
const parent_ptr_val = try zcu.getCoerced(field_ptr_val, actual_parent_ptr_ty);
break :result Air.internedToRef(parent_ptr_val.toIntern());
},
},
else => unreachable,
}
 
const opt_field: ?InternPool.Key.Ptr.BaseAddr.BaseIndex = opt_field: {
const ptr = switch (ip.indexToKey(field_ptr_val.toIntern())) {
.ptr => |ptr| ptr,
else => break :opt_field null,
};
if (ptr.byte_offset != 0) break :opt_field null;
break :opt_field switch (ptr.base_addr) {
.field => |field| field,
else => null,
},
else => null,
} orelse return sema.fail(block, field_ptr_src, "pointer value not based on parent struct", .{});
};
};
 
const field = opt_field orelse {
return sema.fail(block, field_ptr_src, "pointer value not based on parent struct", .{});
};
 
if (Value.fromInterned(field.base).typeOf(zcu).childType(zcu).toIntern() != parent_ty.toIntern()) {
return sema.fail(block, field_ptr_src, "pointer value not based on parent struct", .{});
}
 
if (field.index != field_index) {
return sema.fail(block, inst_src, "field '{}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{
field_name.fmt(ip), field_index, field.index, parent_ty.fmt(sema.mod),
field_name.fmt(ip), field_index, field.index, parent_ty.fmt(zcu),
});
}
break :result try sema.coerce(block, actual_parent_ptr_ty, Air.internedToRef(field.base), inst_src);
@@ -25072,6 +25139,27 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
return sema.ptrCastFull(block, flags, inst_src, result, inst_src, parent_ptr_ty, "@fieldParentPtr");
}
 
fn ptrSubtract(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, byte_subtract: u64, new_ty: Type) !Value {
const zcu = sema.mod;
if (byte_subtract == 0) return zcu.getCoerced(ptr_val, new_ty);
var ptr = switch (zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
.undef => return sema.failWithUseOfUndef(block, src),
.ptr => |ptr| ptr,
else => unreachable,
};
if (ptr.byte_offset < byte_subtract) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "pointer computation here causes undefined behavior", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "resulting pointer exceeds bounds of containing value which may trigger overflow", .{});
break :msg msg;
});
}
ptr.byte_offset -= byte_subtract;
ptr.ty = new_ty.toIntern();
return Value.fromInterned(try zcu.intern(.{ .ptr = ptr }));
}
 
fn zirMinMax(
sema: *Sema,
block: *Block,
@@ -25424,10 +25512,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const msg = try sema.errMsg(block, src, "non-matching @memcpy lengths", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, dest_src, msg, "length {} here", .{
dest_len_val.fmtValue(sema.mod),
dest_len_val.fmtValue(sema.mod, sema),
});
try sema.errNote(block, src_src, msg, "length {} here", .{
src_len_val.fmtValue(sema.mod),
src_len_val.fmtValue(sema.mod, sema),
});
break :msg msg;
};
@@ -26340,7 +26428,8 @@ fn zirBuiltinExtern(
.opt_type => |child_type| child_type,
else => unreachable,
},
.addr = .{ .decl = new_decl_index },
.base_addr = .{ .decl = new_decl_index },
.byte_offset = 0,
} }))), ty)).toIntern());
}
 
@@ -26745,8 +26834,8 @@ fn explainWhyTypeIsNotExtern(
/// Returns true if `ty` is allowed in packed types.
/// Does not require `ty` to be resolved in any way, but may resolve whether it is comptime-only.
fn validatePackedType(sema: *Sema, ty: Type) !bool {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
const zcu = sema.mod;
return switch (ty.zigTypeTag(zcu)) {
.Type,
.ComptimeFloat,
.ComptimeInt,
@@ -26761,18 +26850,21 @@ fn validatePackedType(sema: *Sema, ty: Type) !bool {
.AnyFrame,
.Fn,
.Array,
=> return false,
.Optional => return ty.isPtrLikeOptional(mod),
=> false,
.Optional => return ty.isPtrLikeOptional(zcu),
.Void,
.Bool,
.Float,
.Int,
.Vector,
.Enum,
=> return true,
.Pointer => return !ty.isSlice(mod) and !try sema.typeRequiresComptime(ty),
.Struct, .Union => return ty.containerLayout(mod) == .@"packed",
}
=> true,
.Enum => switch (zcu.intern_pool.loadEnumType(ty.toIntern()).tag_mode) {
.auto => false,
.explicit, .nonexhaustive => true,
},
.Pointer => !ty.isSlice(zcu) and !try sema.typeRequiresComptime(ty),
.Struct, .Union => ty.containerLayout(zcu) == .@"packed",
};
}
 
fn explainWhyTypeIsNotPacked(
@@ -27443,13 +27535,7 @@ fn fieldPtr(
});
 
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = result_ty.toIntern(),
.addr = .{ .field = .{
.base = val.toIntern(),
.index = Value.slice_ptr_index,
} },
} })));
return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, sema)).toIntern());
}
try sema.requireRuntimeBlock(block, src, null);
 
@@ -27467,13 +27553,7 @@ fn fieldPtr(
});
 
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = result_ty.toIntern(),
.addr = .{ .field = .{
.base = val.toIntern(),
.index = Value.slice_len_index,
} },
} })));
return Air.internedToRef((try val.ptrField(Value.slice_len_index, sema)).toIntern());
}
try sema.requireRuntimeBlock(block, src, null);
 
@@ -27785,13 +27865,8 @@ fn finishFieldCallBind(
}
 
if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| {
const pointer = Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.addr = .{ .field = .{
.base = struct_ptr_val.toIntern(),
.index = field_index,
} },
} })));
const ptr_val = try struct_ptr_val.ptrField(field_index, sema);
const pointer = Air.internedToRef(ptr_val.toIntern());
return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) };
}
 
@@ -27903,6 +27978,11 @@ fn structFieldPtrByIndex(
return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing);
}
 
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
const val = try struct_ptr_val.ptrField(field_index, sema);
return Air.internedToRef(val.toIntern());
}
 
const struct_type = mod.typeToStruct(struct_ty).?;
const field_ty = struct_type.field_types.get(ip)[field_index];
const struct_ptr_ty = sema.typeOf(struct_ptr);
@@ -27917,57 +27997,20 @@ fn structFieldPtrByIndex(
},
};
 
const target = mod.getTarget();
 
const parent_align = if (struct_ptr_ty_info.flags.alignment != .none)
struct_ptr_ty_info.flags.alignment
else
try sema.typeAbiAlignment(Type.fromInterned(struct_ptr_ty_info.child));
 
if (struct_type.layout == .@"packed") {
comptime assert(Type.packed_struct_layout_version == 2);
 
var running_bits: u16 = 0;
for (0..struct_type.field_types.len) |i| {
const f_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
if (!(try sema.typeHasRuntimeBits(f_ty))) continue;
 
if (i == field_index) {
ptr_ty_data.packed_offset.bit_offset = running_bits;
}
running_bits += @intCast(f_ty.bitSize(mod));
}
ptr_ty_data.packed_offset.host_size = (running_bits + 7) / 8;
 
// If this is a packed struct embedded in another one, we need to offset
// the bits against each other.
if (struct_ptr_ty_info.packed_offset.host_size != 0) {
ptr_ty_data.packed_offset.host_size = struct_ptr_ty_info.packed_offset.host_size;
ptr_ty_data.packed_offset.bit_offset += struct_ptr_ty_info.packed_offset.bit_offset;
}
 
ptr_ty_data.flags.alignment = parent_align;
 
// If the field happens to be byte-aligned, simplify the pointer type.
// The pointee type bit size must match its ABI byte size so that loads and stores
// do not interfere with the surrounding packed bits.
// We do not attempt this with big-endian targets yet because of nested
// structs and floats. I need to double-check the desired behavior for big endian
// targets before adding the necessary complications to this code. This will not
// cause miscompilations; it only means the field pointer uses bit masking when it
// might not be strictly necessary.
if (parent_align != .none and ptr_ty_data.packed_offset.bit_offset % 8 == 0 and
target.cpu.arch.endian() == .little)
{
const elem_size_bytes = try sema.typeAbiSize(Type.fromInterned(ptr_ty_data.child));
const elem_size_bits = Type.fromInterned(ptr_ty_data.child).bitSize(mod);
if (elem_size_bytes * 8 == elem_size_bits) {
const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8;
const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnits().?));
assert(new_align != .none);
ptr_ty_data.flags.alignment = new_align;
ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 };
}
switch (struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, mod)) {
.bit_ptr => |packed_offset| {
ptr_ty_data.flags.alignment = parent_align;
ptr_ty_data.packed_offset = packed_offset;
},
.byte_ptr => |ptr_info| {
ptr_ty_data.flags.alignment = ptr_info.alignment;
},
}
} else if (struct_type.layout == .@"extern") {
// For extern structs, field alignment might be bigger than type's
@@ -27997,18 +28040,8 @@ fn structFieldPtrByIndex(
try sema.resolveStructFieldInits(struct_ty);
const val = try mod.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] },
} });
return Air.internedToRef(val);
}
 
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
const val = try mod.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.addr = .{ .field = .{
.base = struct_ptr_val.toIntern(),
.index = field_index,
} },
.base_addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] },
.byte_offset = 0,
} });
return Air.internedToRef(val);
}
@@ -28206,7 +28239,13 @@ fn unionFieldPtr(
 
if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| ct: {
switch (union_obj.getLayout(ip)) {
.auto => if (!initializing) {
.auto => if (initializing) {
// Store to the union to initialize the tag.
const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const payload_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
const new_union_val = try mod.unionValue(union_ty, field_tag, try mod.undefValue(payload_ty));
try sema.storePtrVal(block, src, union_ptr_val, new_union_val, union_ty);
} else {
const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse
break :ct;
if (union_val.isUndef(mod)) {
@@ -28232,13 +28271,8 @@ fn unionFieldPtr(
},
.@"packed", .@"extern" => {},
}
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.addr = .{ .field = .{
.base = union_ptr_val.toIntern(),
.index = field_index,
} },
} })));
const field_ptr_val = try union_ptr_val.ptrField(field_index, sema);
return Air.internedToRef(field_ptr_val.toIntern());
}
 
try sema.requireRuntimeBlock(block, src, null);
@@ -28268,21 +28302,21 @@ fn unionFieldVal(
field_name_src: LazySrcLoc,
union_ty: Type,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
assert(union_ty.zigTypeTag(mod) == .Union);
const zcu = sema.mod;
const ip = &zcu.intern_pool;
assert(union_ty.zigTypeTag(zcu) == .Union);
 
try sema.resolveTypeFields(union_ty);
const union_obj = mod.typeToUnion(union_ty).?;
const union_obj = zcu.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, mod).?);
const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, zcu).?);
 
if (try sema.resolveValue(union_byval)) |union_val| {
if (union_val.isUndef(mod)) return mod.undefRef(field_ty);
if (union_val.isUndef(zcu)) return zcu.undefRef(field_ty);
 
const un = ip.indexToKey(union_val.toIntern()).un;
const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const field_tag = try zcu.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const tag_matches = un.tag == field_tag.toIntern();
switch (union_obj.getLayout(ip)) {
.auto => {
@@ -28290,8 +28324,8 @@ fn unionFieldVal(
return Air.internedToRef(un.val);
} else {
const msg = msg: {
const active_index = Type.fromInterned(union_obj.enum_tag_ty).enumTagFieldIndex(Value.fromInterned(un.tag), mod).?;
const active_field_name = Type.fromInterned(union_obj.enum_tag_ty).enumFieldName(active_index, mod);
const active_index = Type.fromInterned(union_obj.enum_tag_ty).enumTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
const active_field_name = Type.fromInterned(union_obj.enum_tag_ty).enumFieldName(active_index, zcu);
const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{
field_name.fmt(ip), active_field_name.fmt(ip),
});
@@ -28302,33 +28336,31 @@ fn unionFieldVal(
return sema.failWithOwnedErrorMsg(block, msg);
}
},
.@"packed", .@"extern" => |layout| {
if (tag_matches) {
return Air.internedToRef(un.val);
} else {
const old_ty = if (un.tag == .none)
Type.fromInterned(ip.typeOf(un.val))
else
union_ty.unionFieldType(Value.fromInterned(un.tag), mod).?;
 
if (try sema.bitCastUnionFieldVal(block, src, Value.fromInterned(un.val), old_ty, field_ty, layout)) |new_val| {
return Air.internedToRef(new_val.toIntern());
}
}
.@"extern" => if (tag_matches) {
// Fast path - no need to use bitcast logic.
return Air.internedToRef(un.val);
} else if (try sema.bitCastVal(union_val, field_ty, 0, 0, 0)) |field_val| {
return Air.internedToRef(field_val.toIntern());
},
.@"packed" => if (tag_matches) {
// Fast path - no need to use bitcast logic.
return Air.internedToRef(un.val);
} else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, sema), 0)) |field_val| {
return Air.internedToRef(field_val.toIntern());
},
}
}
 
try sema.requireRuntimeBlock(block, src, null);
if (union_obj.getLayout(ip) == .auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1)
{
const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag_val = try zcu.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_byval);
try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag);
}
if (field_ty.zigTypeTag(mod) == .NoReturn) {
if (field_ty.zigTypeTag(zcu) == .NoReturn) {
_ = try block.addNoOp(.unreach);
return .unreachable_value;
}
@@ -28402,8 +28434,7 @@ fn elemPtrOneLayerOnly(
const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
const index_val = maybe_index_val orelse break :rs elem_index_src;
const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema));
const result_ty = try sema.elemPtrType(indexable_ty, index);
const elem_ptr = try ptr_val.elemPtr(result_ty, index, mod);
const elem_ptr = try ptr_val.ptrElem(index, sema);
return Air.internedToRef(elem_ptr.toIntern());
};
const result_ty = try sema.elemPtrType(indexable_ty, null);
@@ -28465,7 +28496,7 @@ fn elemVal(
const many_ptr_ty = try mod.manyConstPtrType(elem_ty);
const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty);
const elem_ptr_ty = try mod.singleConstPtrType(elem_ty);
const elem_ptr_val = try many_ptr_val.elemPtr(elem_ptr_ty, index, mod);
const elem_ptr_val = try many_ptr_val.ptrElem(index, sema);
if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| {
return Air.internedToRef((try mod.getCoerced(elem_val, elem_ty)).toIntern());
}
@@ -28571,21 +28602,18 @@ fn tupleFieldPtr(
 
if (tuple_ty.structFieldIsComptime(field_index, mod))
try sema.resolveStructFieldInits(tuple_ty);
 
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.addr = .{ .comptime_field = default_val.toIntern() },
.base_addr = .{ .comptime_field = default_val.toIntern() },
.byte_offset = 0,
} })));
}
 
if (try sema.resolveValue(tuple_ptr)) |tuple_ptr_val| {
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.addr = .{ .field = .{
.base = tuple_ptr_val.toIntern(),
.index = field_index,
} },
} })));
const field_ptr_val = try tuple_ptr_val.ptrField(field_index, sema);
return Air.internedToRef(field_ptr_val.toIntern());
}
 
if (!init) {
@@ -28747,7 +28775,7 @@ fn elemPtrArray(
return mod.undefRef(elem_ptr_ty);
}
if (offset) |index| {
const elem_ptr = try array_ptr_val.elemPtr(elem_ptr_ty, index, mod);
const elem_ptr = try array_ptr_val.ptrElem(index, sema);
return Air.internedToRef(elem_ptr.toIntern());
}
}
@@ -28804,7 +28832,7 @@ fn elemValSlice(
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
}
const elem_ptr_ty = try sema.elemPtrType(slice_ty, index);
const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod);
const elem_ptr_val = try slice_val.ptrElem(index, sema);
if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| {
return Air.internedToRef(elem_val.toIntern());
}
@@ -28864,7 +28892,7 @@ fn elemPtrSlice(
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
}
const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod);
const elem_ptr_val = try slice_val.ptrElem(index, sema);
return Air.internedToRef(elem_ptr_val.toIntern());
}
}
@@ -28943,14 +28971,14 @@ fn coerceExtra(
opts: CoerceOpts,
) CoersionError!Air.Inst.Ref {
if (dest_ty.isGenericPoison()) return inst;
const mod = sema.mod;
const zcu = sema.mod;
const dest_ty_src = inst_src; // TODO better source location
try sema.resolveTypeFields(dest_ty);
const inst_ty = sema.typeOf(inst);
try sema.resolveTypeFields(inst_ty);
const target = mod.getTarget();
const target = zcu.getTarget();
// If the types are the same, we can return the operand.
if (dest_ty.eql(inst_ty, mod))
if (dest_ty.eql(inst_ty, zcu))
return inst;
 
const maybe_inst_val = try sema.resolveValue(inst);
@@ -28967,17 +28995,17 @@ fn coerceExtra(
return new_val;
}
 
switch (dest_ty.zigTypeTag(mod)) {
switch (dest_ty.zigTypeTag(zcu)) {
.Optional => optional: {
if (maybe_inst_val) |val| {
// undefined sets the optional bit also to undefined.
if (val.toIntern() == .undef) {
return mod.undefRef(dest_ty);
return zcu.undefRef(dest_ty);
}
 
// null to ?T
if (val.toIntern() == .null_value) {
return Air.internedToRef((try mod.intern(.{ .opt = .{
return Air.internedToRef((try zcu.intern(.{ .opt = .{
.ty = dest_ty.toIntern(),
.val = .none,
} })));
@@ -28986,13 +29014,13 @@ fn coerceExtra(
 
// cast from ?*T and ?[*]T to ?*anyopaque
// but don't do it if the source type is a double pointer
if (dest_ty.isPtrLikeOptional(mod) and
dest_ty.elemType2(mod).toIntern() == .anyopaque_type and
inst_ty.isPtrAtRuntime(mod))
if (dest_ty.isPtrLikeOptional(zcu) and
dest_ty.elemType2(zcu).toIntern() == .anyopaque_type and
inst_ty.isPtrAtRuntime(zcu))
anyopaque_check: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional;
const elem_ty = inst_ty.elemType2(mod);
if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) {
const elem_ty = inst_ty.elemType2(zcu);
if (elem_ty.zigTypeTag(zcu) == .Pointer or elem_ty.isPtrLikeOptional(zcu)) {
in_memory_result = .{ .double_ptr_to_anyopaque = .{
.actual = inst_ty,
.wanted = dest_ty,
@@ -29001,12 +29029,12 @@ fn coerceExtra(
}
// Let the logic below handle wrapping the optional now that
// it has been checked to correctly coerce.
if (!inst_ty.isPtrLikeOptional(mod)) break :anyopaque_check;
if (!inst_ty.isPtrLikeOptional(zcu)) break :anyopaque_check;
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
}
 
// T to ?T
const child_type = dest_ty.optionalChild(mod);
const child_type = dest_ty.optionalChild(zcu);
const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => {
if (in_memory_result == .no_match) {
@@ -29020,12 +29048,12 @@ fn coerceExtra(
return try sema.wrapOptional(block, dest_ty, intermediate, inst_src);
},
.Pointer => pointer: {
const dest_info = dest_ty.ptrInfo(mod);
const dest_info = dest_ty.ptrInfo(zcu);
 
// Function body to function pointer.
if (inst_ty.zigTypeTag(mod) == .Fn) {
if (inst_ty.zigTypeTag(zcu) == .Fn) {
const fn_val = try sema.resolveConstDefinedValue(block, .unneeded, inst, undefined);
const fn_decl = fn_val.pointerDecl(mod).?;
const fn_decl = fn_val.pointerDecl(zcu).?;
const inst_as_ptr = try sema.analyzeDeclRef(fn_decl);
return sema.coerce(block, dest_ty, inst_as_ptr, inst_src);
}
@@ -29033,13 +29061,13 @@ fn coerceExtra(
// *T to *[1]T
single_item: {
if (dest_info.flags.size != .One) break :single_item;
if (!inst_ty.isSinglePointer(mod)) break :single_item;
if (!inst_ty.isSinglePointer(zcu)) break :single_item;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const ptr_elem_ty = inst_ty.childType(mod);
const ptr_elem_ty = inst_ty.childType(zcu);
const array_ty = Type.fromInterned(dest_info.child);
if (array_ty.zigTypeTag(mod) != .Array) break :single_item;
const array_elem_ty = array_ty.childType(mod);
if (array_ty.arrayLen(mod) != 1) break :single_item;
if (array_ty.zigTypeTag(zcu) != .Array) break :single_item;
const array_elem_ty = array_ty.childType(zcu);
if (array_ty.arrayLen(zcu) != 1) break :single_item;
const dest_is_mut = !dest_info.flags.is_const;
switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
.ok => {},
@@ -29050,11 +29078,11 @@ fn coerceExtra(
 
// Coercions where the source is a single pointer to an array.
src_array_ptr: {
if (!inst_ty.isSinglePointer(mod)) break :src_array_ptr;
if (!inst_ty.isSinglePointer(zcu)) break :src_array_ptr;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const array_ty = inst_ty.childType(mod);
if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr;
const array_elem_type = array_ty.childType(mod);
const array_ty = inst_ty.childType(zcu);
if (array_ty.zigTypeTag(zcu) != .Array) break :src_array_ptr;
const array_elem_type = array_ty.childType(zcu);
const dest_is_mut = !dest_info.flags.is_const;
 
const dst_elem_type = Type.fromInterned(dest_info.child);
@@ -29072,7 +29100,7 @@ fn coerceExtra(
}
 
if (dest_info.sentinel != .none) {
if (array_ty.sentinel(mod)) |inst_sent| {
if (array_ty.sentinel(zcu)) |inst_sent| {
if (Air.internedToRef(dest_info.sentinel) !=
try sema.coerceInMemory(inst_sent, dst_elem_type))
{
@@ -29111,12 +29139,12 @@ fn coerceExtra(
}
 
// coercion from C pointer
if (inst_ty.isCPtr(mod)) src_c_ptr: {
if (inst_ty.isCPtr(zcu)) src_c_ptr: {
if (dest_info.flags.size == .Slice) break :src_c_ptr;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr;
// In this case we must add a safety check because the C pointer
// could be null.
const src_elem_ty = inst_ty.childType(mod);
const src_elem_ty = inst_ty.childType(zcu);
const dest_is_mut = !dest_info.flags.is_const;
const dst_elem_type = Type.fromInterned(dest_info.child);
switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
@@ -29128,18 +29156,18 @@ fn coerceExtra(
 
// cast from *T and [*]T to *anyopaque
// but don't do it if the source type is a double pointer
if (dest_info.child == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: {
if (dest_info.child == .anyopaque_type and inst_ty.zigTypeTag(zcu) == .Pointer) to_anyopaque: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const elem_ty = inst_ty.elemType2(mod);
if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) {
const elem_ty = inst_ty.elemType2(zcu);
if (elem_ty.zigTypeTag(zcu) == .Pointer or elem_ty.isPtrLikeOptional(zcu)) {
in_memory_result = .{ .double_ptr_to_anyopaque = .{
.actual = inst_ty,
.wanted = dest_ty,
} };
break :pointer;
}
if (dest_ty.isSlice(mod)) break :to_anyopaque;
if (inst_ty.isSlice(mod)) {
if (dest_ty.isSlice(zcu)) break :to_anyopaque;
if (inst_ty.isSlice(zcu)) {
in_memory_result = .{ .slice_to_anyopaque = .{
.actual = inst_ty,
.wanted = dest_ty,
@@ -29151,10 +29179,11 @@ fn coerceExtra(
 
switch (dest_info.flags.size) {
// coercion to C pointer
.C => switch (inst_ty.zigTypeTag(mod)) {
.Null => return Air.internedToRef(try mod.intern(.{ .ptr = .{
.C => switch (inst_ty.zigTypeTag(zcu)) {
.Null => return Air.internedToRef(try zcu.intern(.{ .ptr = .{
.ty = dest_ty.toIntern(),
.addr = .{ .int = .zero_usize },
.base_addr = .int,
.byte_offset = 0,
} })),
.ComptimeInt => {
const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
@@ -29164,7 +29193,7 @@ fn coerceExtra(
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
},
.Int => {
const ptr_size_ty = switch (inst_ty.intInfo(mod).signedness) {
const ptr_size_ty = switch (inst_ty.intInfo(zcu).signedness) {
.signed => Type.isize,
.unsigned => Type.usize,
};
@@ -29180,7 +29209,7 @@ fn coerceExtra(
},
.Pointer => p: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p;
const inst_info = inst_ty.ptrInfo(mod);
const inst_info = inst_ty.ptrInfo(zcu);
switch (try sema.coerceInMemoryAllowed(
block,
Type.fromInterned(dest_info.child),
@@ -29196,7 +29225,7 @@ fn coerceExtra(
if (inst_info.flags.size == .Slice) {
assert(dest_info.sentinel == .none);
if (inst_info.sentinel == .none or
inst_info.sentinel != (try mod.intValue(Type.fromInterned(inst_info.child), 0)).toIntern())
inst_info.sentinel != (try zcu.intValue(Type.fromInterned(inst_info.child), 0)).toIntern())
break :p;
 
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
@@ -29206,11 +29235,11 @@ fn coerceExtra(
},
else => {},
},
.One => switch (Type.fromInterned(dest_info.child).zigTypeTag(mod)) {
.One => switch (Type.fromInterned(dest_info.child).zigTypeTag(zcu)) {
.Union => {
// pointer to anonymous struct to pointer to union
if (inst_ty.isSinglePointer(mod) and
inst_ty.childType(mod).isAnonStruct(mod) and
if (inst_ty.isSinglePointer(zcu) and
inst_ty.childType(zcu).isAnonStruct(zcu) and
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
{
return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
@@ -29218,8 +29247,8 @@ fn coerceExtra(
},
.Struct => {
// pointer to anonymous struct to pointer to struct
if (inst_ty.isSinglePointer(mod) and
inst_ty.childType(mod).isAnonStruct(mod) and
if (inst_ty.isSinglePointer(zcu) and
inst_ty.childType(zcu).isAnonStruct(zcu) and
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
{
return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) {
@@ -29230,8 +29259,8 @@ fn coerceExtra(
},
.Array => {
// pointer to tuple to pointer to array
if (inst_ty.isSinglePointer(mod) and
inst_ty.childType(mod).isTuple(mod) and
if (inst_ty.isSinglePointer(zcu) and
inst_ty.childType(zcu).isTuple(zcu) and
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
{
return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
@@ -29240,50 +29269,38 @@ fn coerceExtra(
else => {},
},
.Slice => to_slice: {
if (inst_ty.zigTypeTag(mod) == .Array) {
if (inst_ty.zigTypeTag(zcu) == .Array) {
return sema.fail(
block,
inst_src,
"array literal requires address-of operator (&) to coerce to slice type '{}'",
.{dest_ty.fmt(mod)},
.{dest_ty.fmt(zcu)},
);
}
 
if (!inst_ty.isSinglePointer(mod)) break :to_slice;
const inst_child_ty = inst_ty.childType(mod);
if (!inst_child_ty.isTuple(mod)) break :to_slice;
if (!inst_ty.isSinglePointer(zcu)) break :to_slice;
const inst_child_ty = inst_ty.childType(zcu);
if (!inst_child_ty.isTuple(zcu)) break :to_slice;
 
// empty tuple to zero-length slice
// note that this allows coercing to a mutable slice.
if (inst_child_ty.structFieldCount(mod) == 0) {
// Optional slice is represented with a null pointer so
// we use a dummy pointer value with the required alignment.
return Air.internedToRef((try mod.intern(.{ .slice = .{
if (inst_child_ty.structFieldCount(zcu) == 0) {
const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, sema);
return Air.internedToRef(try zcu.intern(.{ .slice = .{
.ty = dest_ty.toIntern(),
.ptr = try mod.intern(.{ .ptr = .{
.ty = dest_ty.slicePtrFieldType(mod).toIntern(),
.addr = .{ .int = if (dest_info.flags.alignment != .none)
(try mod.intValue(
Type.usize,
dest_info.flags.alignment.toByteUnits().?,
)).toIntern()
else
try mod.intern_pool.getCoercedInts(
mod.gpa,
mod.intern_pool.indexToKey(
(try Type.fromInterned(dest_info.child).lazyAbiAlignment(mod)).toIntern(),
).int,
.usize_type,
) },
.ptr = try zcu.intern(.{ .ptr = .{
.ty = dest_ty.slicePtrFieldType(zcu).toIntern(),
.base_addr = .int,
.byte_offset = align_val.toByteUnits().?,
} }),
.len = (try mod.intValue(Type.usize, 0)).toIntern(),
} })));
.len = .zero_usize,
} }));
}
 
// pointer to tuple to slice
if (!dest_info.flags.is_const) {
const err_msg = err_msg: {
const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(mod)});
const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(zcu)});
errdefer err_msg.destroy(sema.gpa);
try sema.errNote(block, dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{});
break :err_msg err_msg;
@@ -29293,9 +29310,9 @@ fn coerceExtra(
return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src);
},
.Many => p: {
if (!inst_ty.isSlice(mod)) break :p;
if (!inst_ty.isSlice(zcu)) break :p;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p;
const inst_info = inst_ty.ptrInfo(mod);
const inst_info = inst_ty.ptrInfo(zcu);
 
switch (try sema.coerceInMemoryAllowed(
block,
@@ -29320,10 +29337,10 @@ fn coerceExtra(
},
}
},
.Int, .ComptimeInt => switch (inst_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => switch (inst_ty.zigTypeTag(zcu)) {
.Float, .ComptimeFloat => float: {
const val = maybe_inst_val orelse {
if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
if (dest_ty.zigTypeTag(zcu) == .ComptimeInt) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, .{
.needed_comptime_reason = "value being casted to 'comptime_int' must be comptime-known",
@@ -29339,17 +29356,17 @@ fn coerceExtra(
// comptime-known integer to other number
if (!(try sema.intFitsInType(val, dest_ty, null))) {
if (!opts.report_err) return error.NotCoercible;
return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(mod), val.fmtValue(mod) });
return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(zcu), val.fmtValue(zcu, sema) });
}
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => try mod.undefRef(dest_ty),
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
.undef => try zcu.undefRef(dest_ty),
.int => |int| Air.internedToRef(
try mod.intern_pool.getCoercedInts(mod.gpa, int, dest_ty.toIntern()),
try zcu.intern_pool.getCoercedInts(zcu.gpa, int, dest_ty.toIntern()),
),
else => unreachable,
};
}
if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
if (dest_ty.zigTypeTag(zcu) == .ComptimeInt) {
if (!opts.report_err) return error.NotCoercible;
if (opts.no_cast_to_comptime_int) return inst;
return sema.failWithNeededComptime(block, inst_src, .{
@@ -29358,8 +29375,8 @@ fn coerceExtra(
}
 
// integer widening
const dst_info = dest_ty.intInfo(mod);
const src_info = inst_ty.intInfo(mod);
const dst_info = dest_ty.intInfo(zcu);
const src_info = inst_ty.intInfo(zcu);
if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or
// small enough unsigned ints can get casted to large enough signed ints
(dst_info.signedness == .signed and dst_info.bits > src_info.bits))
@@ -29370,25 +29387,25 @@ fn coerceExtra(
},
else => {},
},
.Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) {
.Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(zcu)) {
.ComptimeFloat => {
const val = try sema.resolveConstDefinedValue(block, .unneeded, inst, undefined);
const result_val = try val.floatCast(dest_ty, mod);
const result_val = try val.floatCast(dest_ty, zcu);
return Air.internedToRef(result_val.toIntern());
},
.Float => {
if (maybe_inst_val) |val| {
const result_val = try val.floatCast(dest_ty, mod);
if (!val.eql(try result_val.floatCast(inst_ty, mod), inst_ty, mod)) {
const result_val = try val.floatCast(dest_ty, zcu);
if (!val.eql(try result_val.floatCast(inst_ty, zcu), inst_ty, zcu)) {
return sema.fail(
block,
inst_src,
"type '{}' cannot represent float value '{}'",
.{ dest_ty.fmt(mod), val.fmtValue(mod) },
.{ dest_ty.fmt(zcu), val.fmtValue(zcu, sema) },
);
}
return Air.internedToRef(result_val.toIntern());
} else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
} else if (dest_ty.zigTypeTag(zcu) == .ComptimeFloat) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, .{
.needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known",
@@ -29405,7 +29422,7 @@ fn coerceExtra(
},
.Int, .ComptimeInt => int: {
const val = maybe_inst_val orelse {
if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
if (dest_ty.zigTypeTag(zcu) == .ComptimeFloat) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, .{
.needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known",
@@ -29413,52 +29430,52 @@ fn coerceExtra(
}
break :int;
};
const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, mod, sema);
const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, sema);
// TODO implement this compile error
//const int_again_val = try result_val.intFromFloat(sema.arena, inst_ty);
//if (!int_again_val.eql(val, inst_ty, mod)) {
//if (!int_again_val.eql(val, inst_ty, zcu)) {
// return sema.fail(
// block,
// inst_src,
// "type '{}' cannot represent integer value '{}'",
// .{ dest_ty.fmt(mod), val },
// .{ dest_ty.fmt(zcu), val },
// );
//}
return Air.internedToRef(result_val.toIntern());
},
else => {},
},
.Enum => switch (inst_ty.zigTypeTag(mod)) {
.Enum => switch (inst_ty.zigTypeTag(zcu)) {
.EnumLiteral => {
// enum literal to enum
const val = try sema.resolveConstDefinedValue(block, .unneeded, inst, undefined);
const string = mod.intern_pool.indexToKey(val.toIntern()).enum_literal;
const field_index = dest_ty.enumFieldIndex(string, mod) orelse {
const string = zcu.intern_pool.indexToKey(val.toIntern()).enum_literal;
const field_index = dest_ty.enumFieldIndex(string, zcu) orelse {
return sema.fail(block, inst_src, "no field named '{}' in enum '{}'", .{
string.fmt(&mod.intern_pool), dest_ty.fmt(mod),
string.fmt(&zcu.intern_pool), dest_ty.fmt(zcu),
});
};
return Air.internedToRef((try mod.enumValueFieldIndex(dest_ty, @intCast(field_index))).toIntern());
return Air.internedToRef((try zcu.enumValueFieldIndex(dest_ty, @intCast(field_index))).toIntern());
},
.Union => blk: {
// union to its own tag type
const union_tag_ty = inst_ty.unionTagType(mod) orelse break :blk;
if (union_tag_ty.eql(dest_ty, mod)) {
const union_tag_ty = inst_ty.unionTagType(zcu) orelse break :blk;
if (union_tag_ty.eql(dest_ty, zcu)) {
return sema.unionToTag(block, dest_ty, inst, inst_src);
}
},
else => {},
},
.ErrorUnion => switch (inst_ty.zigTypeTag(mod)) {
.ErrorUnion => switch (inst_ty.zigTypeTag(zcu)) {
.ErrorUnion => eu: {
if (maybe_inst_val) |inst_val| {
switch (inst_val.toIntern()) {
.undef => return mod.undefRef(dest_ty),
else => switch (mod.intern_pool.indexToKey(inst_val.toIntern())) {
.undef => return zcu.undefRef(dest_ty),
else => switch (zcu.intern_pool.indexToKey(inst_val.toIntern())) {
.error_union => |error_union| switch (error_union.val) {
.err_name => |err_name| {
const error_set_ty = inst_ty.errorUnionSet(mod);
const error_set_val = Air.internedToRef((try mod.intern(.{ .err = .{
const error_set_ty = inst_ty.errorUnionSet(zcu);
const error_set_val = Air.internedToRef((try zcu.intern(.{ .err = .{
.ty = error_set_ty.toIntern(),
.name = err_name,
} })));
@@ -29489,31 +29506,54 @@ fn coerceExtra(
};
},
},
.Union => switch (inst_ty.zigTypeTag(mod)) {
.Union => switch (inst_ty.zigTypeTag(zcu)) {
.Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
.Struct => {
if (inst_ty.isAnonStruct(mod)) {
if (inst_ty.isAnonStruct(zcu)) {
return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
else => {},
},
.Array => switch (inst_ty.zigTypeTag(mod)) {
.Array => switch (inst_ty.zigTypeTag(zcu)) {
.Array => array_to_array: {
// Array coercions are allowed only if the child is IMC and the sentinel is unchanged or removed.
if (.ok != try sema.coerceInMemoryAllowed(
block,
dest_ty.childType(zcu),
inst_ty.childType(zcu),
false,
target,
dest_ty_src,
inst_src,
)) {
break :array_to_array;
}
 
if (dest_ty.sentinel(zcu)) |dest_sent| {
const src_sent = inst_ty.sentinel(zcu) orelse break :array_to_array;
if (dest_sent.toIntern() != (try zcu.getCoerced(src_sent, dest_ty.childType(zcu))).toIntern()) {
break :array_to_array;
}
}
 
return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src);
},
.Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
.Struct => {
if (inst == .empty_struct) {
return sema.arrayInitEmpty(block, inst_src, dest_ty);
}
if (inst_ty.isTuple(mod)) {
if (inst_ty.isTuple(zcu)) {
return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
else => {},
},
.Vector => switch (inst_ty.zigTypeTag(mod)) {
.Vector => switch (inst_ty.zigTypeTag(zcu)) {
.Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
.Struct => {
if (inst_ty.isTuple(mod)) {
if (inst_ty.isTuple(zcu)) {
return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
@@ -29523,7 +29563,7 @@ fn coerceExtra(
if (inst == .empty_struct) {
return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src);
}
if (inst_ty.isTupleOrAnonStruct(mod)) {
if (inst_ty.isTupleOrAnonStruct(zcu)) {
return sema.coerceTupleToStruct(block, dest_ty, inst, inst_src) catch |err| switch (err) {
error.NotCoercible => break :blk,
else => |e| return e,
@@ -29536,38 +29576,38 @@ fn coerceExtra(
// undefined to anything. We do this after the big switch above so that
// special logic has a chance to run first, such as `*[N]T` to `[]T` which
// should initialize the length field of the slice.
if (maybe_inst_val) |val| if (val.toIntern() == .undef) return mod.undefRef(dest_ty);
if (maybe_inst_val) |val| if (val.toIntern() == .undef) return zcu.undefRef(dest_ty);
 
if (!opts.report_err) return error.NotCoercible;
 
if (opts.is_ret and dest_ty.zigTypeTag(mod) == .NoReturn) {
if (opts.is_ret and dest_ty.zigTypeTag(zcu) == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "function declared 'noreturn' returns", .{});
errdefer msg.destroy(sema.gpa);
 
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
const src_decl = mod.funcOwnerDeclPtr(sema.func_index);
try mod.errNoteNonLazy(src_decl.toSrcLoc(ret_ty_src, mod), msg, "'noreturn' declared here", .{});
const src_decl = zcu.funcOwnerDeclPtr(sema.func_index);
try zcu.errNoteNonLazy(src_decl.toSrcLoc(ret_ty_src, zcu), msg, "'noreturn' declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
 
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), inst_ty.fmt(mod) });
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(zcu), inst_ty.fmt(zcu) });
errdefer msg.destroy(sema.gpa);
 
// E!T to T
if (inst_ty.zigTypeTag(mod) == .ErrorUnion and
(try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
if (inst_ty.zigTypeTag(zcu) == .ErrorUnion and
(try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(zcu), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{
try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{});
try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{});
}
 
// ?T to T
if (inst_ty.zigTypeTag(mod) == .Optional and
(try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
if (inst_ty.zigTypeTag(zcu) == .Optional and
(try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(zcu), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{
try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{});
try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{});
@@ -29577,19 +29617,19 @@ fn coerceExtra(
 
// Add notes about function return type
if (opts.is_ret and
mod.test_functions.get(mod.funcOwnerDeclIndex(sema.func_index)) == null)
zcu.test_functions.get(zcu.funcOwnerDeclIndex(sema.func_index)) == null)
{
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
const src_decl = mod.funcOwnerDeclPtr(sema.func_index);
if (inst_ty.isError(mod) and !dest_ty.isError(mod)) {
try mod.errNoteNonLazy(src_decl.toSrcLoc(ret_ty_src, mod), msg, "function cannot return an error", .{});
const src_decl = zcu.funcOwnerDeclPtr(sema.func_index);
if (inst_ty.isError(zcu) and !dest_ty.isError(zcu)) {
try zcu.errNoteNonLazy(src_decl.toSrcLoc(ret_ty_src, zcu), msg, "function cannot return an error", .{});
} else {
try mod.errNoteNonLazy(src_decl.toSrcLoc(ret_ty_src, mod), msg, "function return type declared here", .{});
try zcu.errNoteNonLazy(src_decl.toSrcLoc(ret_ty_src, zcu), msg, "function return type declared here", .{});
}
}
 
if (try opts.param_src.get(sema)) |param_src| {
try mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{});
try zcu.errNoteNonLazy(param_src, msg, "parameter type declared here", .{});
}
 
// TODO maybe add "cannot store an error in type '{}'" note
@@ -29755,11 +29795,11 @@ const InMemoryCoercionResult = union(enum) {
.array_sentinel => |sentinel| {
if (sentinel.actual.toIntern() != .unreachable_value) {
try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{
sentinel.actual.fmtValue(mod), sentinel.wanted.fmtValue(mod),
sentinel.actual.fmtValue(mod, sema), sentinel.wanted.fmtValue(mod, sema),
});
} else {
try sema.errNote(block, src, msg, "destination array requires '{}' sentinel", .{
sentinel.wanted.fmtValue(mod),
sentinel.wanted.fmtValue(mod, sema),
});
}
break;
@@ -29881,11 +29921,11 @@ const InMemoryCoercionResult = union(enum) {
.ptr_sentinel => |sentinel| {
if (sentinel.actual.toIntern() != .unreachable_value) {
try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{
sentinel.actual.fmtValue(mod), sentinel.wanted.fmtValue(mod),
sentinel.actual.fmtValue(mod, sema), sentinel.wanted.fmtValue(mod, sema),
});
} else {
try sema.errNote(block, src, msg, "destination pointer requires '{}' sentinel", .{
sentinel.wanted.fmtValue(mod),
sentinel.wanted.fmtValue(mod, sema),
});
}
break;
@@ -29972,7 +30012,7 @@ fn pointerSizeString(size: std.builtin.Type.Pointer.Size) []const u8 {
/// * bit offset attributes must match exactly
/// * `*`/`[*]` must match exactly, but `[*c]` matches either one
/// * sentinel-terminated pointers can coerce into `[*]`
fn coerceInMemoryAllowed(
pub fn coerceInMemoryAllowed(
sema: *Sema,
block: *Block,
dest_ty: Type,
@@ -30082,8 +30122,9 @@ fn coerceInMemoryAllowed(
.wanted = dest_info.elem_type,
} };
}
const ok_sent = dest_info.sentinel == null or
const ok_sent = (dest_info.sentinel == null and src_info.sentinel == null) or
(src_info.sentinel != null and
dest_info.sentinel != null and
dest_info.sentinel.?.eql(
try mod.getCoerced(src_info.sentinel.?, dest_info.elem_type),
dest_info.elem_type,
@@ -30420,9 +30461,9 @@ fn coerceInMemoryAllowedPtrs(
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
const mod = sema.mod;
const dest_info = dest_ptr_ty.ptrInfo(mod);
const src_info = src_ptr_ty.ptrInfo(mod);
const zcu = sema.mod;
const dest_info = dest_ptr_ty.ptrInfo(zcu);
const src_info = src_ptr_ty.ptrInfo(zcu);
 
const ok_ptr_size = src_info.flags.size == dest_info.flags.size or
src_info.flags.size == .C or dest_info.flags.size == .C;
@@ -30453,8 +30494,18 @@ fn coerceInMemoryAllowedPtrs(
} };
}
 
const child = try sema.coerceInMemoryAllowed(block, Type.fromInterned(dest_info.child), Type.fromInterned(src_info.child), !dest_info.flags.is_const, target, dest_src, src_src);
if (child != .ok) {
const dest_child = Type.fromInterned(dest_info.child);
const src_child = Type.fromInterned(src_info.child);
const child = try sema.coerceInMemoryAllowed(block, dest_child, src_child, !dest_info.flags.is_const, target, dest_src, src_src);
if (child != .ok) allow: {
// As a special case, we also allow coercing `*[n:s]T` to `*[n]T`, akin to dropping the sentinel from a slice.
// `*[n:s]T` cannot coerce in memory to `*[n]T` since they have different sizes.
if (src_child.zigTypeTag(zcu) == .Array and dest_child.zigTypeTag(zcu) == .Array and
src_child.sentinel(zcu) != null and dest_child.sentinel(zcu) == null and
.ok == try sema.coerceInMemoryAllowed(block, dest_child.childType(zcu), src_child.childType(zcu), !dest_info.flags.is_const, target, dest_src, src_src))
{
break :allow;
}
return InMemoryCoercionResult{ .ptr_child = .{
.child = try child.dupe(sema.arena),
.actual = Type.fromInterned(src_info.child),
@@ -30462,8 +30513,8 @@ fn coerceInMemoryAllowedPtrs(
} };
}
 
const dest_allow_zero = dest_ty.ptrAllowsZero(mod);
const src_allow_zero = src_ty.ptrAllowsZero(mod);
const dest_allow_zero = dest_ty.ptrAllowsZero(zcu);
const src_allow_zero = src_ty.ptrAllowsZero(zcu);
 
const ok_allows_zero = (dest_allow_zero and
(src_allow_zero or !dest_is_mut)) or
@@ -30488,7 +30539,7 @@ fn coerceInMemoryAllowedPtrs(
 
const ok_sent = dest_info.sentinel == .none or src_info.flags.size == .C or
(src_info.sentinel != .none and
dest_info.sentinel == try mod.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child));
dest_info.sentinel == try zcu.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child));
if (!ok_sent) {
return InMemoryCoercionResult{ .ptr_sentinel = .{
.actual = switch (src_info.sentinel) {
@@ -30787,7 +30838,18 @@ fn checkKnownAllocPtr(sema: *Sema, block: *Block, base_ptr: Air.Inst.Ref, new_pt
switch (sema.air_instructions.items(.tag)[@intFromEnum(new_ptr_inst)]) {
.optional_payload_ptr_set, .errunion_payload_ptr_set => {
const maybe_comptime_alloc = sema.maybe_comptime_allocs.getPtr(alloc_inst) orelse return;
try maybe_comptime_alloc.non_elideable_pointers.append(sema.arena, new_ptr_inst);
 
// This is functionally a store, since it writes the optional payload bit.
// Thus, if it is behind a runtime condition, we must mark the alloc as runtime appropriately.
if (block.runtime_index != maybe_comptime_alloc.runtime_index) {
return sema.markMaybeComptimeAllocRuntime(block, alloc_inst);
}
 
try maybe_comptime_alloc.stores.append(sema.arena, .{
.inst = new_ptr_inst,
.src_decl = block.src_decl,
.src = .unneeded,
});
},
.ptr_elem_ptr => {
const tmp_air = sema.getTmpAir();
@@ -30812,6 +30874,12 @@ fn markMaybeComptimeAllocRuntime(sema: *Sema, block: *Block, alloc_inst: Air.Ins
const mod = sema.mod;
const slice = maybe_comptime_alloc.stores.slice();
for (slice.items(.inst), slice.items(.src_decl), slice.items(.src)) |other_inst, other_src_decl, other_src| {
if (other_src == .unneeded) {
switch (sema.air_instructions.items(.tag)[@intFromEnum(other_inst)]) {
.set_union_tag, .optional_payload_ptr_set, .errunion_payload_ptr_set => continue,
else => unreachable, // assertion failure
}
}
const other_data = sema.air_instructions.items(.data)[@intFromEnum(other_inst)].bin_op;
const other_operand = other_data.rhs;
if (!sema.checkRuntimeValue(other_operand)) {
@@ -30866,748 +30934,46 @@ fn storePtrVal(
operand_val: Value,
operand_ty: Type,
) !void {
const mod = sema.mod;
var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty);
switch (mut_kit.root) {
.alloc => |a| try sema.checkComptimeVarStore(block, src, a),
.comptime_field => {},
const zcu = sema.mod;
const ip = &zcu.intern_pool;
// TODO: audit use sites to eliminate this coercion
const coerced_operand_val = try zcu.getCoerced(operand_val, operand_ty);
// TODO: audit use sites to eliminate this coercion
const ptr_ty = try zcu.ptrType(info: {
var info = ptr_val.typeOf(zcu).ptrInfo(zcu);
info.child = operand_ty.toIntern();
break :info info;
});
const coerced_ptr_val = try zcu.getCoerced(ptr_val, ptr_ty);
 
switch (try sema.storeComptimePtr(block, src, coerced_ptr_val, coerced_operand_val)) {
.success => {},
.runtime_store => unreachable, // use sites check this
// TODO use failWithInvalidComptimeFieldStore
.comptime_field_mismatch => return sema.fail(
block,
src,
"value stored in comptime field does not match the default value of the field",
.{},
),
.undef => return sema.failWithUseOfUndef(block, src),
.err_payload => |err_name| return sema.fail(block, src, "attempt to unwrap error: {}", .{err_name.fmt(ip)}),
.null_payload => return sema.fail(block, src, "attempt to use null value", .{}),
.inactive_union_field => return sema.fail(block, src, "access of inactive union field", .{}),
.needed_well_defined => |ty| return sema.fail(
block,
src,
"comptime dereference requires '{}' to have a well-defined layout",
.{ty.fmt(zcu)},
),
.out_of_bounds => |ty| return sema.fail(
block,
src,
"dereference of '{}' exceeds bounds of containing decl of type '{}'",
.{ ptr_ty.fmt(zcu), ty.fmt(zcu) },
),
.exceeds_host_size => return sema.fail(block, src, "bit-pointer target exceeds host size", .{}),
}
 
try sema.resolveTypeLayout(operand_ty);
switch (mut_kit.pointee) {
.opv => {},
.direct => |val_ptr| {
if (mut_kit.root == .comptime_field) {
val_ptr.* = .{ .interned = try val_ptr.intern(mod, sema.arena) };
if (operand_val.toIntern() != val_ptr.interned) {
// TODO use failWithInvalidComptimeFieldStore
return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{});
}
return;
}
val_ptr.* = .{ .interned = operand_val.toIntern() };
},
.reinterpret => |reinterpret| {
try sema.resolveTypeLayout(mut_kit.ty);
const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod));
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
const interned_old = Value.fromInterned(try reinterpret.val_ptr.intern(mod, sema.arena));
interned_old.writeToMemory(mut_kit.ty, mod, buffer) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReinterpretDeclRef => unreachable,
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}),
};
if (reinterpret.write_packed) {
operand_val.writeToPackedMemory(operand_ty, mod, buffer[reinterpret.byte_offset..], 0) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReinterpretDeclRef => unreachable,
};
} else {
operand_val.writeToMemory(operand_ty, mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReinterpretDeclRef => unreachable,
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{operand_ty.fmt(mod)}),
};
}
const val = Value.readFromMemory(mut_kit.ty, mod, buffer, sema.arena) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.IllDefinedMemoryLayout => unreachable,
error.Unimplemented => return sema.fail(block, src, "TODO: implement readFromMemory for type '{}'", .{mut_kit.ty.fmt(mod)}),
};
reinterpret.val_ptr.* = .{ .interned = val.toIntern() };
},
.bad_decl_ty, .bad_ptr_ty => {
// TODO show the decl declaration site in a note and explain whether the decl
// or the pointer is the problematic type
return sema.fail(
block,
src,
"comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout",
.{mut_kit.ty.fmt(mod)},
);
},
}
}
 
const ComptimePtrMutationKit = struct {
const Root = union(enum) {
alloc: ComptimeAllocIndex,
comptime_field,
};
root: Root,
pointee: union(enum) {
opv,
/// The pointer type matches the actual comptime Value so a direct
/// modification is possible.
direct: *MutableValue,
/// The largest parent Value containing pointee and having a well-defined memory layout.
/// This is used for bitcasting, if direct dereferencing failed.
reinterpret: struct {
val_ptr: *MutableValue,
byte_offset: usize,
/// If set, write the operand to packed memory
write_packed: bool = false,
},
/// If the root decl could not be used as parent, this means `ty` is the type that
/// caused that by not having a well-defined layout.
/// This one means the Decl that owns the value trying to be modified does not
/// have a well defined memory layout.
bad_decl_ty,
/// If the root decl could not be used as parent, this means `ty` is the type that
/// caused that by not having a well-defined layout.
/// This one means the pointer type that is being stored through does not
/// have a well defined memory layout.
bad_ptr_ty,
},
ty: Type,
};
 
fn beginComptimePtrMutation(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr_val: Value,
ptr_elem_ty: Type,
) CompileError!ComptimePtrMutationKit {
const mod = sema.mod;
const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
switch (ptr.addr) {
.decl, .anon_decl, .int => unreachable, // isComptimeMutablePtr has been checked already
.comptime_alloc => |alloc_index| {
const alloc = sema.getComptimeAlloc(alloc_index);
return sema.beginComptimePtrMutationInner(block, src, alloc.val.typeOf(mod), &alloc.val, ptr_elem_ty, .{ .alloc = alloc_index });
},
.comptime_field => |comptime_field| {
const duped = try sema.arena.create(MutableValue);
duped.* = .{ .interned = comptime_field };
return sema.beginComptimePtrMutationInner(
block,
src,
duped.typeOf(mod),
duped,
ptr_elem_ty,
.comptime_field,
);
},
.eu_payload => |eu_ptr| {
const eu_ty = Type.fromInterned(mod.intern_pool.typeOf(eu_ptr)).childType(mod);
var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(eu_ptr), eu_ty);
switch (parent.pointee) {
.opv => unreachable,
.direct => |val_ptr| {
const payload_ty = parent.ty.errorUnionPayload(mod);
try val_ptr.unintern(mod, sema.arena, false, false);
if (val_ptr.* == .interned) {
// An error union has been initialized to undefined at comptime and now we
// are for the first time setting the payload. We must change the
// representation of the error union to `eu_payload`.
const child = try sema.arena.create(MutableValue);
child.* = .{ .interned = try mod.intern(.{ .undef = payload_ty.toIntern() }) };
val_ptr.* = .{ .eu_payload = .{
.ty = parent.ty.toIntern(),
.child = child,
} };
}
return .{
.root = parent.root,
.pointee = .{ .direct = val_ptr.eu_payload.child },
.ty = payload_ty,
};
},
.bad_decl_ty, .bad_ptr_ty => return parent,
// Even though the parent value type has well-defined memory layout, our
// pointer type does not.
.reinterpret => return .{
.root = parent.root,
.pointee = .bad_ptr_ty,
.ty = eu_ty,
},
}
},
.opt_payload => |opt_ptr| {
const opt_ty = Type.fromInterned(mod.intern_pool.typeOf(opt_ptr)).childType(mod);
var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(opt_ptr), opt_ty);
switch (parent.pointee) {
.opv => unreachable,
.direct => |val_ptr| {
const payload_ty = parent.ty.optionalChild(mod);
try val_ptr.unintern(mod, sema.arena, false, false);
if (val_ptr.* == .interned) {
// An optional has been initialized to undefined at comptime and now we
// are for the first time setting the payload. We must change the
// representation of the optional to `opt_payload`.
const child = try sema.arena.create(MutableValue);
child.* = .{ .interned = try mod.intern(.{ .undef = payload_ty.toIntern() }) };
val_ptr.* = .{ .opt_payload = .{
.ty = parent.ty.toIntern(),
.child = child,
} };
}
return .{
.root = parent.root,
.pointee = .{ .direct = val_ptr.opt_payload.child },
.ty = payload_ty,
};
},
.bad_decl_ty, .bad_ptr_ty => return parent,
// Even though the parent value type has well-defined memory layout, our
// pointer type does not.
.reinterpret => return .{
.root = parent.root,
.pointee = .bad_ptr_ty,
.ty = opt_ty,
},
}
},
.elem => |elem_ptr| {
const base_elem_ty = Type.fromInterned(mod.intern_pool.typeOf(elem_ptr.base)).elemType2(mod);
var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(elem_ptr.base), base_elem_ty);
 
switch (parent.pointee) {
.opv => unreachable,
.direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) {
.Array, .Vector => {
const elem_ty = parent.ty.childType(mod);
const check_len = parent.ty.arrayLenIncludingSentinel(mod);
if ((try sema.typeHasOnePossibleValue(ptr_elem_ty)) != null) {
if (elem_ptr.index > check_len) {
// TODO have the parent include the decl so we can say "declared here"
return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{
elem_ptr.index, check_len,
});
}
return .{
.root = parent.root,
.pointee = .opv,
.ty = elem_ty,
};
}
if (elem_ptr.index >= check_len) {
// TODO have the parent include the decl so we can say "declared here"
return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{
elem_ptr.index, check_len,
});
}
 
// We might have a pointer to multiple elements of the array (e.g. a pointer
// to a sub-array). In this case, we just have to reinterpret the relevant
// bytes of the whole array rather than any single element.
reinterp_multi_elem: {
if (try sema.typeRequiresComptime(base_elem_ty)) break :reinterp_multi_elem;
if (try sema.typeRequiresComptime(ptr_elem_ty)) break :reinterp_multi_elem;
 
const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty);
if (elem_abi_size_u64 >= try sema.typeAbiSize(ptr_elem_ty)) break :reinterp_multi_elem;
 
const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64);
const elem_idx = try sema.usizeCast(block, src, elem_ptr.index);
return .{
.root = parent.root,
.pointee = .{ .reinterpret = .{
.val_ptr = val_ptr,
.byte_offset = elem_abi_size * elem_idx,
} },
.ty = parent.ty,
};
}
 
try val_ptr.unintern(mod, sema.arena, false, false);
 
const aggregate = switch (val_ptr.*) {
.interned,
.bytes,
.repeated,
.eu_payload,
.opt_payload,
.slice,
.un,
=> unreachable,
.aggregate => |*a| a,
};
 
return sema.beginComptimePtrMutationInner(
block,
src,
elem_ty,
&aggregate.elems[@intCast(elem_ptr.index)],
ptr_elem_ty,
parent.root,
);
},
else => {
if (elem_ptr.index != 0) {
// TODO include a "declared here" note for the decl
return sema.fail(block, src, "out of bounds comptime store of index {d}", .{
elem_ptr.index,
});
}
return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty,
val_ptr,
ptr_elem_ty,
parent.root,
);
},
},
.reinterpret => |reinterpret| {
if (!base_elem_ty.hasWellDefinedLayout(mod)) {
// Even though the parent value type has well-defined memory layout, our
// pointer type does not.
return .{
.root = parent.root,
.pointee = .bad_ptr_ty,
.ty = base_elem_ty,
};
}
 
const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty);
const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64);
const elem_idx = try sema.usizeCast(block, src, elem_ptr.index);
return .{
.root = parent.root,
.pointee = .{ .reinterpret = .{
.val_ptr = reinterpret.val_ptr,
.byte_offset = reinterpret.byte_offset + elem_abi_size * elem_idx,
} },
.ty = parent.ty,
};
},
.bad_decl_ty, .bad_ptr_ty => return parent,
}
},
.field => |field_ptr| {
const base_child_ty = Type.fromInterned(mod.intern_pool.typeOf(field_ptr.base)).childType(mod);
const field_index: u32 = @intCast(field_ptr.index);
 
var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(field_ptr.base), base_child_ty);
switch (parent.pointee) {
.opv => unreachable,
.direct => |val_ptr| {
try val_ptr.unintern(mod, sema.arena, false, false);
switch (val_ptr.*) {
.interned,
.eu_payload,
.opt_payload,
.repeated,
.bytes,
=> unreachable,
.aggregate => |*a| return sema.beginComptimePtrMutationInner(
block,
src,
parent.ty.structFieldType(field_index, mod),
&a.elems[field_index],
ptr_elem_ty,
parent.root,
),
.slice => |*s| switch (field_index) {
Value.slice_ptr_index => return sema.beginComptimePtrMutationInner(
block,
src,
parent.ty.slicePtrFieldType(mod),
s.ptr,
ptr_elem_ty,
parent.root,
),
Value.slice_len_index => return sema.beginComptimePtrMutationInner(
block,
src,
Type.usize,
s.len,
ptr_elem_ty,
parent.root,
),
else => unreachable,
},
.un => |*un| {
const layout = base_child_ty.containerLayout(mod);
 
const tag_type = base_child_ty.unionTagTypeHypothetical(mod);
const hypothetical_tag = try mod.enumValueFieldIndex(tag_type, field_index);
if (un.tag == .none and un.payload.* == .interned and un.payload.interned == .undef) {
// A union has been initialized to undefined at comptime and now we
// are for the first time setting the payload. We must change the
// tag implicitly.
const payload_ty = parent.ty.structFieldType(field_index, mod);
un.tag = hypothetical_tag.toIntern();
un.payload.* = .{ .interned = try mod.intern(.{ .undef = payload_ty.toIntern() }) };
return beginComptimePtrMutationInner(
sema,
block,
src,
payload_ty,
un.payload,
ptr_elem_ty,
parent.root,
);
}
 
if (layout == .auto or hypothetical_tag.toIntern() == un.tag) {
// We need to set the active field of the union.
un.tag = hypothetical_tag.toIntern();
 
const field_ty = parent.ty.structFieldType(field_index, mod);
return beginComptimePtrMutationInner(
sema,
block,
src,
field_ty,
un.payload,
ptr_elem_ty,
parent.root,
);
} else {
// Writing to a different field (a different or unknown tag is active) requires reinterpreting
// memory of the entire union, which requires knowing its abiSize.
try sema.resolveTypeLayout(parent.ty);
// This union value no longer has a well-defined tag type.
// The reinterpretation will read it back out as .none.
try un.payload.unintern(mod, sema.arena, false, false);
return .{
.root = parent.root,
.pointee = .{ .reinterpret = .{
.val_ptr = val_ptr,
.byte_offset = 0,
.write_packed = layout == .@"packed",
} },
.ty = parent.ty,
};
}
},
}
},
.reinterpret => |reinterpret| {
const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod);
const field_offset = try sema.usizeCast(block, src, field_offset_u64);
return .{
.root = parent.root,
.pointee = .{ .reinterpret = .{
.val_ptr = reinterpret.val_ptr,
.byte_offset = reinterpret.byte_offset + field_offset,
} },
.ty = parent.ty,
};
},
.bad_decl_ty, .bad_ptr_ty => return parent,
}
},
}
}
 
fn beginComptimePtrMutationInner(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
decl_ty: Type,
decl_val: *MutableValue,
ptr_elem_ty: Type,
root: ComptimePtrMutationKit.Root,
) CompileError!ComptimePtrMutationKit {
const mod = sema.mod;
const target = mod.getTarget();
const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok;
 
const old_decl_val = decl_val.*;
try decl_val.unintern(mod, sema.arena, false, false);
if (decl_val.* == .un and decl_val.un.tag == .none and decl_val.un.payload.* == .interned and decl_val.un.payload.interned == .undef) {
// HACKHACK: undefined union - re-intern it for now
// `unintern` probably should just leave these as is, but I'm leaving it until I rewrite comptime pointer access.
decl_val.* = old_decl_val;
}
 
if (coerce_ok) {
return ComptimePtrMutationKit{
.root = root,
.pointee = .{ .direct = decl_val },
.ty = decl_ty,
};
}
 
// Handle the case that the decl is an array and we're actually trying to point to an element.
if (decl_ty.isArrayOrVector(mod)) {
const decl_elem_ty = decl_ty.childType(mod);
if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) {
return ComptimePtrMutationKit{
.root = root,
.pointee = .{ .direct = decl_val },
.ty = decl_ty,
};
}
}
 
if (!decl_ty.hasWellDefinedLayout(mod)) {
return ComptimePtrMutationKit{
.root = root,
.pointee = .bad_decl_ty,
.ty = decl_ty,
};
}
if (!ptr_elem_ty.hasWellDefinedLayout(mod)) {
return ComptimePtrMutationKit{
.root = root,
.pointee = .bad_ptr_ty,
.ty = ptr_elem_ty,
};
}
return ComptimePtrMutationKit{
.root = root,
.pointee = .{ .reinterpret = .{
.val_ptr = decl_val,
.byte_offset = 0,
} },
.ty = decl_ty,
};
}
 
const ComptimePtrLoadKit = struct {
/// The Value and Type corresponding to the pointee of the provided pointer.
/// If a direct dereference is not possible, this is null.
pointee: ?MutableValue,
/// The largest parent Value containing `pointee` and having a well-defined memory layout.
/// This is used for bitcasting, if direct dereferencing failed (i.e. `pointee` is null).
parent: ?struct {
val: MutableValue,
byte_offset: usize,
},
/// If the root decl could not be used as `parent`, this is the type that
/// caused that by not having a well-defined layout
ty_without_well_defined_layout: ?Type,
};
 
const ComptimePtrLoadError = CompileError || error{
RuntimeLoad,
};
 
/// If `maybe_array_ty` is provided, it will be used to directly dereference an
/// .elem_ptr of type T to a value of [N]T, if necessary.
fn beginComptimePtrLoad(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr_val: Value,
maybe_array_ty: ?Type,
) ComptimePtrLoadError!ComptimePtrLoadKit {
const mod = sema.mod;
const ip = &mod.intern_pool;
const target = mod.getTarget();
 
var deref: ComptimePtrLoadKit = switch (ip.indexToKey(ptr_val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl_index| blk: {
const decl = mod.declPtr(decl_index);
try sema.declareDependency(.{ .decl_val = decl_index });
if (decl.val.getVariable(mod) != null) return error.RuntimeLoad;
const decl_val: MutableValue = .{ .interned = decl.val.toIntern() };
const layout_defined = decl.typeOf(mod).hasWellDefinedLayout(mod);
break :blk ComptimePtrLoadKit{
.parent = if (layout_defined) .{ .val = decl_val, .byte_offset = 0 } else null,
.pointee = decl_val,
.ty_without_well_defined_layout = if (!layout_defined) decl.typeOf(mod) else null,
};
},
.comptime_alloc => |alloc_index| kit: {
const alloc = sema.getComptimeAlloc(alloc_index);
const alloc_ty = alloc.val.typeOf(mod);
const layout_defined = alloc_ty.hasWellDefinedLayout(mod);
break :kit .{
.parent = if (layout_defined) .{ .val = alloc.val, .byte_offset = 0 } else null,
.pointee = alloc.val,
.ty_without_well_defined_layout = if (!layout_defined) alloc_ty else null,
};
},
.anon_decl => |anon_decl| blk: {
const decl_val = anon_decl.val;
if (Value.fromInterned(decl_val).getVariable(mod) != null) return error.RuntimeLoad;
const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
const decl_mv: MutableValue = .{ .interned = decl_val };
const layout_defined = decl_ty.hasWellDefinedLayout(mod);
break :blk ComptimePtrLoadKit{
.parent = if (layout_defined) .{ .val = decl_mv, .byte_offset = 0 } else null,
.pointee = decl_mv,
.ty_without_well_defined_layout = if (!layout_defined) decl_ty else null,
};
},
.int => return error.RuntimeLoad,
.eu_payload, .opt_payload => |container_ptr| blk: {
const container_ty = Type.fromInterned(ip.typeOf(container_ptr)).childType(mod);
var deref = try sema.beginComptimePtrLoad(block, src, Value.fromInterned(container_ptr), container_ty);
 
// eu_payload and opt_payload never have a well-defined layout
if (deref.parent != null) {
deref.parent = null;
deref.ty_without_well_defined_layout = container_ty;
}
 
if (deref.pointee) |pointee| {
const pointee_ty = pointee.typeOf(mod);
const coerce_in_mem_ok =
(try sema.coerceInMemoryAllowed(block, container_ty, pointee_ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, pointee_ty, container_ty, false, target, src, src)) == .ok;
if (coerce_in_mem_ok) {
deref.pointee = switch (pointee) {
.interned => |ip_index| .{ .interned = switch (ip.indexToKey(ip_index)) {
.error_union => |error_union| switch (error_union.val) {
.err_name => |err_name| return sema.fail(
block,
src,
"attempt to unwrap error: {}",
.{err_name.fmt(ip)},
),
.payload => |payload| payload,
},
.opt => |opt| switch (opt.val) {
.none => return sema.fail(block, src, "attempt to use null value", .{}),
else => |payload| payload,
},
else => unreachable,
} },
.eu_payload, .opt_payload => |p| p.child.*,
else => unreachable,
};
break :blk deref;
}
}
deref.pointee = null;
break :blk deref;
},
.comptime_field => |field_val| .{
.parent = null,
.pointee = .{ .interned = field_val },
.ty_without_well_defined_layout = Type.fromInterned(ip.typeOf(field_val)),
},
.elem => |elem_ptr| blk: {
const elem_ty = Type.fromInterned(ip.typeOf(elem_ptr.base)).elemType2(mod);
var deref = try sema.beginComptimePtrLoad(block, src, Value.fromInterned(elem_ptr.base), null);
 
// This code assumes that elem_ptrs have been "flattened" in order for direct dereference
// to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that
// our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened"
switch (ip.indexToKey(elem_ptr.base)) {
.ptr => |base_ptr| switch (base_ptr.addr) {
.elem => |base_elem| assert(!Type.fromInterned(ip.typeOf(base_elem.base)).elemType2(mod).eql(elem_ty, mod)),
else => {},
},
else => {},
}
 
if (elem_ptr.index != 0) {
if (elem_ty.hasWellDefinedLayout(mod)) {
if (deref.parent) |*parent| {
// Update the byte offset (in-place)
const elem_size = try sema.typeAbiSize(elem_ty);
const offset = parent.byte_offset + elem_size * elem_ptr.index;
parent.byte_offset = try sema.usizeCast(block, src, offset);
}
} else {
deref.parent = null;
deref.ty_without_well_defined_layout = elem_ty;
}
}
 
// If we're loading an elem that was derived from a different type
// than the true type of the underlying decl, we cannot deref directly
const ty_matches = if (deref.pointee) |pointee| match: {
const ty = pointee.typeOf(mod);
if (!ty.isArrayOrVector(mod)) break :match false;
const deref_elem_ty = ty.childType(mod);
if ((try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok) break :match true;
if ((try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok) break :match true;
break :match false;
} else false;
if (!ty_matches) {
deref.pointee = null;
break :blk deref;
}
 
var array_val = deref.pointee.?;
const check_len = array_val.typeOf(mod).arrayLenIncludingSentinel(mod);
if (maybe_array_ty) |load_ty| {
// It's possible that we're loading a [N]T, in which case we'd like to slice
// the pointee array directly from our parent array.
if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) {
const len = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod));
const elem_idx = try sema.usizeCast(block, src, elem_ptr.index);
deref.pointee = if (elem_ptr.index + len <= check_len) switch (array_val) {
.aggregate => |a| .{ .aggregate = .{
.ty = (try mod.arrayType(.{ .len = len, .child = elem_ty.toIntern() })).toIntern(),
.elems = a.elems[elem_idx..][0..len],
} },
else => .{
.interned = (try (Value.fromInterned(
try array_val.intern(mod, sema.arena),
).sliceArray(sema, elem_idx, elem_idx + len))).toIntern(),
},
} else null;
break :blk deref;
}
}
 
if (elem_ptr.index >= check_len) {
deref.pointee = null;
break :blk deref;
}
if (elem_ptr.index == check_len - 1) {
if (array_val.typeOf(mod).sentinel(mod)) |sent| {
deref.pointee = .{ .interned = sent.toIntern() };
break :blk deref;
}
}
deref.pointee = try array_val.getElem(mod, @intCast(elem_ptr.index));
break :blk deref;
},
.field => |field_ptr| blk: {
const field_index: u32 = @intCast(field_ptr.index);
const container_ty = Type.fromInterned(ip.typeOf(field_ptr.base)).childType(mod);
var deref = try sema.beginComptimePtrLoad(block, src, Value.fromInterned(field_ptr.base), container_ty);
 
if (container_ty.hasWellDefinedLayout(mod)) {
const struct_obj = mod.typeToStruct(container_ty);
if (struct_obj != null and struct_obj.?.layout == .@"packed") {
// packed structs are not byte addressable
deref.parent = null;
} else if (deref.parent) |*parent| {
// Update the byte offset (in-place)
try sema.resolveTypeLayout(container_ty);
const field_offset = container_ty.structFieldOffset(field_index, mod);
parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset);
}
} else {
deref.parent = null;
deref.ty_without_well_defined_layout = container_ty;
}
 
const pointee = deref.pointee orelse break :blk deref;
const pointee_ty = pointee.typeOf(mod);
const coerce_in_mem_ok =
(try sema.coerceInMemoryAllowed(block, container_ty, pointee_ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, pointee_ty, container_ty, false, target, src, src)) == .ok;
if (!coerce_in_mem_ok) {
deref.pointee = null;
break :blk deref;
}
 
deref.pointee = try pointee.getElem(mod, field_index);
break :blk deref;
},
},
.opt => |opt| switch (opt.val) {
.none => return sema.fail(block, src, "attempt to use null value", .{}),
else => |payload| try sema.beginComptimePtrLoad(block, src, Value.fromInterned(payload), null),
},
else => unreachable,
};
 
if (deref.pointee) |val| {
if (deref.parent == null and val.typeOf(mod).hasWellDefinedLayout(mod)) {
deref.parent = .{ .val = val, .byte_offset = 0 };
}
}
return deref;
}
 
fn bitCast(
@@ -31618,28 +30984,33 @@ fn bitCast(
inst_src: LazySrcLoc,
operand_src: ?LazySrcLoc,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const zcu = sema.mod;
try sema.resolveTypeLayout(dest_ty);
 
const old_ty = sema.typeOf(inst);
try sema.resolveTypeLayout(old_ty);
 
const dest_bits = dest_ty.bitSize(mod);
const old_bits = old_ty.bitSize(mod);
const dest_bits = dest_ty.bitSize(zcu);
const old_bits = old_ty.bitSize(zcu);
 
if (old_bits != dest_bits) {
return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{
dest_ty.fmt(mod),
dest_ty.fmt(zcu),
dest_bits,
old_ty.fmt(mod),
old_ty.fmt(zcu),
old_bits,
});
}
 
if (try sema.resolveValue(inst)) |val| {
if (val.isUndef(mod))
return mod.undefRef(dest_ty);
if (try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty, 0)) |result_val| {
if (val.isUndef(zcu))
return zcu.undefRef(dest_ty);
if (old_ty.zigTypeTag(zcu) == .ErrorSet and dest_ty.zigTypeTag(zcu) == .ErrorSet) {
// Special case: we sometimes call `bitCast` on error set values, but they
// don't have a well-defined layout, so we can't use `bitCastVal` on them.
return Air.internedToRef((try zcu.getCoerced(val, dest_ty)).toIntern());
}
if (try sema.bitCastVal(val, dest_ty, 0, 0, 0)) |result_val| {
return Air.internedToRef(result_val.toIntern());
}
}
@@ -31648,98 +31019,6 @@ fn bitCast(
return block.addBitCast(dest_ty, inst);
}
 
fn bitCastVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
val: Value,
old_ty: Type,
new_ty: Type,
buffer_offset: usize,
) !?Value {
const mod = sema.mod;
if (old_ty.eql(new_ty, mod)) return val;
 
// For types with well-defined memory layouts, we serialize them a byte buffer,
// then deserialize to the new type.
const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(mod));
 
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReinterpretDeclRef => return null,
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}),
};
 
return Value.readFromMemory(new_ty, mod, buffer[buffer_offset..], sema.arena) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.IllDefinedMemoryLayout => unreachable,
error.Unimplemented => return sema.fail(block, src, "TODO: implement readFromMemory for type '{}'", .{new_ty.fmt(mod)}),
};
}
 
fn bitCastUnionFieldVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
val: Value,
old_ty: Type,
field_ty: Type,
layout: std.builtin.Type.ContainerLayout,
) !?Value {
const mod = sema.mod;
if (old_ty.eql(field_ty, mod)) return val;
 
// Bitcasting a union field value requires that that field's layout be known
try sema.resolveTypeLayout(field_ty);
 
const old_size = try sema.usizeCast(block, src, old_ty.abiSize(mod));
const field_size = try sema.usizeCast(block, src, field_ty.abiSize(mod));
const endian = mod.getTarget().cpu.arch.endian();
 
const buffer = try sema.gpa.alloc(u8, @max(old_size, field_size));
defer sema.gpa.free(buffer);
 
// Reading a larger value means we need to reinterpret from undefined bytes.
const offset = switch (layout) {
.@"extern" => offset: {
if (field_size > old_size) @memset(buffer[old_size..], 0xaa);
val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReinterpretDeclRef => return null,
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}),
};
break :offset 0;
},
.@"packed" => offset: {
if (field_size > old_size) {
const min_size = @max(old_size, 1);
switch (endian) {
.little => @memset(buffer[min_size - 1 ..], 0xaa),
.big => @memset(buffer[0 .. buffer.len - min_size + 1], 0xaa),
}
}
 
val.writeToPackedMemory(old_ty, mod, buffer, 0) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReinterpretDeclRef => return null,
};
 
break :offset if (endian == .big) buffer.len - field_size else 0;
},
.auto => unreachable,
};
 
return Value.readFromMemory(field_ty, mod, buffer[offset..], sema.arena) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.IllDefinedMemoryLayout => unreachable,
error.Unimplemented => return sema.fail(block, src, "TODO: implement readFromMemory for type '{}'", .{field_ty.fmt(mod)}),
};
}
 
fn coerceArrayPtrToSlice(
sema: *Sema,
block: *Block,
@@ -31885,7 +31164,7 @@ fn coerceEnumToUnion(
if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| {
const field_index = union_ty.unionTagFieldIndex(val, sema.mod) orelse {
return sema.fail(block, inst_src, "union '{}' has no tag with value '{}'", .{
union_ty.fmt(sema.mod), val.fmtValue(sema.mod),
union_ty.fmt(sema.mod), val.fmtValue(sema.mod, sema),
});
};
 
@@ -32595,7 +31874,7 @@ fn addReferencedBy(
});
}
 
fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!void {
pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
const decl = mod.declPtr(decl_index);
@@ -32673,7 +31952,8 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn
}
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = ptr_ty.toIntern(),
.addr = .{ .decl = decl_index },
.base_addr = .{ .decl = decl_index },
.byte_offset = 0,
} })));
}
 
@@ -33102,8 +32382,8 @@ fn analyzeSlice(
msg,
"expected '{}', found '{}'",
.{
Value.zero_comptime_int.fmtValue(mod),
start_value.fmtValue(mod),
Value.zero_comptime_int.fmtValue(mod, sema),
start_value.fmtValue(mod, sema),
},
);
break :msg msg;
@@ -33119,8 +32399,8 @@ fn analyzeSlice(
msg,
"expected '{}', found '{}'",
.{
Value.one_comptime_int.fmtValue(mod),
end_value.fmtValue(mod),
Value.one_comptime_int.fmtValue(mod, sema),
end_value.fmtValue(mod, sema),
},
);
break :msg msg;
@@ -33133,7 +32413,7 @@ fn analyzeSlice(
block,
end_src,
"end index {} out of bounds for slice of single-item pointer",
.{end_value.fmtValue(mod)},
.{end_value.fmtValue(mod, sema)},
);
}
}
@@ -33228,8 +32508,8 @@ fn analyzeSlice(
end_src,
"end index {} out of bounds for array of length {}{s}",
.{
end_val.fmtValue(mod),
len_val.fmtValue(mod),
end_val.fmtValue(mod, sema),
len_val.fmtValue(mod, sema),
sentinel_label,
},
);
@@ -33273,7 +32553,7 @@ fn analyzeSlice(
end_src,
"end index {} out of bounds for slice of length {d}{s}",
.{
end_val.fmtValue(mod),
end_val.fmtValue(mod, sema),
try slice_val.sliceLen(sema),
sentinel_label,
},
@@ -33333,8 +32613,8 @@ fn analyzeSlice(
start_src,
"start index {} is larger than end index {}",
.{
start_val.fmtValue(mod),
end_val.fmtValue(mod),
start_val.fmtValue(mod, sema),
end_val.fmtValue(mod, sema),
},
);
}
@@ -33347,16 +32627,15 @@ fn analyzeSlice(
 
const many_ptr_ty = try mod.manyConstPtrType(elem_ty);
const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty);
const elem_ptr_ty = try mod.singleConstPtrType(elem_ty);
const elem_ptr = try many_ptr_val.elemPtr(elem_ptr_ty, sentinel_index, mod);
const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty);
const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, sema);
const res = try sema.pointerDerefExtra(block, src, elem_ptr);
const actual_sentinel = switch (res) {
.runtime_load => break :sentinel_check,
.val => |v| v,
.needed_well_defined => |ty| return sema.fail(
block,
src,
"comptime dereference requires '{}' to have a well-defined layout, but it does not.",
"comptime dereference requires '{}' to have a well-defined layout",
.{ty.fmt(mod)},
),
.out_of_bounds => |ty| return sema.fail(
@@ -33372,8 +32651,8 @@ fn analyzeSlice(
const msg = try sema.errMsg(block, src, "value in memory does not match slice sentinel", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "expected '{}', found '{}'", .{
expected_sentinel.fmtValue(mod),
actual_sentinel.fmtValue(mod),
expected_sentinel.fmtValue(mod, sema),
actual_sentinel.fmtValue(mod, sema),
});
 
break :msg msg;
@@ -35599,8 +34878,8 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
} }));
},
.ptr => |ptr| {
switch (ptr.addr) {
.decl, .comptime_alloc, .anon_decl => return val,
switch (ptr.base_addr) {
.decl, .comptime_alloc, .anon_decl, .int => return val,
.comptime_field => |field_val| {
const resolved_field_val =
(try sema.resolveLazyValue(Value.fromInterned(field_val))).toIntern();
@@ -35609,17 +34888,8 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
else
Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = ptr.ty,
.addr = .{ .comptime_field = resolved_field_val },
} })));
},
.int => |int| {
const resolved_int = (try sema.resolveLazyValue(Value.fromInterned(int))).toIntern();
return if (resolved_int == int)
val
else
Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = ptr.ty,
.addr = .{ .int = resolved_int },
.base_addr = .{ .comptime_field = resolved_field_val },
.byte_offset = ptr.byte_offset,
} })));
},
.eu_payload, .opt_payload => |base| {
@@ -35629,22 +34899,23 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
else
Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = ptr.ty,
.addr = switch (ptr.addr) {
.base_addr = switch (ptr.base_addr) {
.eu_payload => .{ .eu_payload = resolved_base },
.opt_payload => .{ .opt_payload = resolved_base },
else => unreachable,
},
.byte_offset = ptr.byte_offset,
} })));
},
.elem, .field => |base_index| {
.arr_elem, .field => |base_index| {
const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base_index.base))).toIntern();
return if (resolved_base == base_index.base)
val
else
Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = ptr.ty,
.addr = switch (ptr.addr) {
.elem => .{ .elem = .{
.base_addr = switch (ptr.base_addr) {
.arr_elem => .{ .arr_elem = .{
.base = resolved_base,
.index = base_index.index,
} },
@@ -35654,6 +34925,7 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
} },
else => unreachable,
},
.byte_offset = ptr.byte_offset,
} })));
},
}
@@ -36166,7 +35438,8 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
var max_align: Alignment = .@"1";
for (0..union_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
 
if (try sema.typeRequiresComptime(field_ty) or field_ty.zigTypeTag(mod) == .NoReturn) continue; // TODO: should this affect alignment?
 
max_size = @max(max_size, sema.typeAbiSize(field_ty) catch |err| switch (err) {
error.AnalysisFail => {
@@ -36496,7 +35769,15 @@ pub fn resolveTypeFieldsStruct(
}
defer struct_type.clearTypesWip(ip);
 
try semaStructFields(mod, sema.arena, struct_type);
semaStructFields(mod, sema.arena, struct_type) catch |err| switch (err) {
error.AnalysisFail => {
if (mod.declPtr(owner_decl).analysis == .complete) {
mod.declPtr(owner_decl).analysis = .dependency_failure;
}
return error.AnalysisFail;
},
else => |e| return e,
};
}
 
pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void {
@@ -36521,7 +35802,15 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void {
}
defer struct_type.clearInitsWip(ip);
 
try semaStructFieldInits(mod, sema.arena, struct_type);
semaStructFieldInits(mod, sema.arena, struct_type) catch |err| switch (err) {
error.AnalysisFail => {
if (mod.declPtr(owner_decl).analysis == .complete) {
mod.declPtr(owner_decl).analysis = .dependency_failure;
}
return error.AnalysisFail;
},
else => |e| return e,
};
struct_type.setHaveFieldInits(ip);
}
 
@@ -36560,7 +35849,15 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load
 
union_type.flagsPtr(ip).status = .field_types_wip;
errdefer union_type.flagsPtr(ip).status = .none;
try semaUnionFields(mod, sema.arena, union_type);
semaUnionFields(mod, sema.arena, union_type) catch |err| switch (err) {
error.AnalysisFail => {
if (owner_decl.analysis == .complete) {
owner_decl.analysis = .dependency_failure;
}
return error.AnalysisFail;
},
else => |e| return e,
};
union_type.flagsPtr(ip).status = .have_field_types;
}
 
@@ -37391,7 +36688,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded
const field_src = mod.fieldSrcLoc(union_type.decl, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(union_type.decl, .{ .index = gop.index }).lazy;
const msg = msg: {
const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(mod)});
const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(mod, &sema)});
errdefer msg.destroy(gpa);
try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{});
break :msg msg;
@@ -38158,7 +37455,8 @@ fn analyzeComptimeAlloc(
 
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = ptr_type.toIntern(),
.addr = .{ .comptime_alloc = alloc },
.base_addr = .{ .comptime_alloc = alloc },
.byte_offset = 0,
} })));
}
 
@@ -38247,16 +37545,15 @@ pub fn analyzeAsAddressSpace(
/// Asserts the value is a pointer and dereferences it.
/// Returns `null` if the pointer contents cannot be loaded at comptime.
fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value {
const mod = sema.mod;
const load_ty = ptr_ty.childType(mod);
const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty);
switch (res) {
// TODO: audit use sites to eliminate this coercion
const coerced_ptr_val = try sema.mod.getCoerced(ptr_val, ptr_ty);
switch (try sema.pointerDerefExtra(block, src, coerced_ptr_val)) {
.runtime_load => return null,
.val => |v| return v,
.needed_well_defined => |ty| return sema.fail(
block,
src,
"comptime dereference requires '{}' to have a well-defined layout, but it does not.",
"comptime dereference requires '{}' to have a well-defined layout",
.{ty.fmt(sema.mod)},
),
.out_of_bounds => |ty| return sema.fail(
@@ -38275,68 +37572,19 @@ const DerefResult = union(enum) {
out_of_bounds: Type,
};
 
fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type) CompileError!DerefResult {
const mod = sema.mod;
const target = mod.getTarget();
const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) {
error.RuntimeLoad => return DerefResult{ .runtime_load = {} },
else => |e| return e,
};
 
if (deref.pointee) |pointee| {
const uncoerced_val = Value.fromInterned(try pointee.intern(mod, sema.arena));
const ty = Type.fromInterned(mod.intern_pool.typeOf(uncoerced_val.toIntern()));
const coerce_in_mem_ok =
(try sema.coerceInMemoryAllowed(block, load_ty, ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, ty, load_ty, false, target, src, src)) == .ok;
if (coerce_in_mem_ok) {
// We have a Value that lines up in virtual memory exactly with what we want to load,
// and it is in-memory coercible to load_ty. It may be returned without modifications.
// Move mutable decl values to the InternPool and assert other decls are already in
// the InternPool.
const coerced_val = try mod.getCoerced(uncoerced_val, load_ty);
return .{ .val = coerced_val };
}
}
 
// The type is not in-memory coercible or the direct dereference failed, so it must
// be bitcast according to the pointer type we are performing the load through.
if (!load_ty.hasWellDefinedLayout(mod)) {
return DerefResult{ .needed_well_defined = load_ty };
}
 
const load_sz = try sema.typeAbiSize(load_ty);
 
// Try the smaller bit-cast first, since that's more efficient than using the larger `parent`
if (deref.pointee) |pointee| {
const val_ip_index = try pointee.intern(mod, sema.arena);
const val = Value.fromInterned(val_ip_index);
const ty = Type.fromInterned(mod.intern_pool.typeOf(val_ip_index));
if (load_sz <= try sema.typeAbiSize(ty)) {
return .{ .val = (try sema.bitCastVal(block, src, val, ty, load_ty, 0)) orelse return .runtime_load };
}
}
 
// If that fails, try to bit-cast from the largest parent value with a well-defined layout
if (deref.parent) |parent| {
const parent_ip_index = try parent.val.intern(mod, sema.arena);
const parent_val = Value.fromInterned(parent_ip_index);
const parent_ty = Type.fromInterned(mod.intern_pool.typeOf(parent_ip_index));
if (load_sz + parent.byte_offset <= try sema.typeAbiSize(parent_ty)) {
return .{ .val = (try sema.bitCastVal(block, src, parent_val, parent_ty, load_ty, parent.byte_offset)) orelse return .runtime_load };
}
}
 
if (deref.ty_without_well_defined_layout) |bad_ty| {
// We got no parent for bit-casting, or the parent we got was too small. Either way, the problem
// is that some type we encountered when de-referencing does not have a well-defined layout.
return .{ .needed_well_defined = bad_ty };
} else {
// If all encountered types had well-defined layouts, the parent is the root decl and it just
// wasn't big enough for the load.
const parent_ip_index = try deref.parent.?.val.intern(mod, sema.arena);
const parent_ty = Type.fromInterned(mod.intern_pool.typeOf(parent_ip_index));
return .{ .out_of_bounds = parent_ty };
fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value) CompileError!DerefResult {
const zcu = sema.mod;
const ip = &zcu.intern_pool;
switch (try sema.loadComptimePtr(block, src, ptr_val)) {
.success => |mv| return .{ .val = try mv.intern(zcu, sema.arena) },
.runtime_load => return .runtime_load,
.undef => return sema.failWithUseOfUndef(block, src),
.err_payload => |err_name| return sema.fail(block, src, "attempt to unwrap error: {}", .{err_name.fmt(ip)}),
.null_payload => return sema.fail(block, src, "attempt to use null value", .{}),
.inactive_union_field => return sema.fail(block, src, "access of inactive union field", .{}),
.needed_well_defined => |ty| return .{ .needed_well_defined = ty },
.out_of_bounds => |ty| return .{ .out_of_bounds = ty },
.exceeds_host_size => return sema.fail(block, src, "bit-pointer target exceeds host size", .{}),
}
}
 
@@ -38394,18 +37642,18 @@ pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
};
}
 
fn typeAbiSize(sema: *Sema, ty: Type) !u64 {
pub fn typeAbiSize(sema: *Sema, ty: Type) !u64 {
try sema.resolveTypeLayout(ty);
return ty.abiSize(sema.mod);
}
 
fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment {
pub fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment {
return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar;
}
 
/// Not valid to call for packed unions.
/// Keep implementation in sync with `Module.unionFieldNormalAlignment`.
fn unionFieldAlignment(sema: *Sema, u: InternPool.LoadedUnionType, field_index: u32) !Alignment {
pub fn unionFieldAlignment(sema: *Sema, u: InternPool.LoadedUnionType, field_index: u32) !Alignment {
const mod = sema.mod;
const ip = &mod.intern_pool;
const field_align = u.fieldAlign(ip, field_index);
@@ -38416,7 +37664,7 @@ fn unionFieldAlignment(sema: *Sema, u: InternPool.LoadedUnionType, field_index:
}
 
/// Keep implementation in sync with `Module.structFieldAlignment`.
fn structFieldAlignment(
pub fn structFieldAlignment(
sema: *Sema,
explicit_alignment: InternPool.Alignment,
field_ty: Type,
@@ -38724,6 +37972,13 @@ fn intSubWithOverflowScalar(
const mod = sema.mod;
const info = ty.intInfo(mod);
 
if (lhs.isUndef(mod) or rhs.isUndef(mod)) {
return .{
.overflow_bit = try mod.undefValue(Type.u1),
.wrapped_result = try mod.undefValue(ty),
};
}
 
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
@@ -38808,7 +38063,7 @@ fn intFromFloatScalar(
block,
src,
"fractional component prevents float value '{}' from coercion to type '{}'",
.{ val.fmtValue(mod), int_ty.fmt(mod) },
.{ val.fmtValue(mod, sema), int_ty.fmt(mod) },
);
 
const float = val.toFloat(f128, mod);
@@ -38830,7 +38085,7 @@ fn intFromFloatScalar(
 
if (!(try sema.intFitsInType(cti_result, int_ty, null))) {
return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{
val.fmtValue(sema.mod), int_ty.fmt(sema.mod),
val.fmtValue(sema.mod, sema), int_ty.fmt(sema.mod),
});
}
return mod.getCoerced(cti_result, int_ty);
@@ -38975,6 +38230,13 @@ fn intAddWithOverflowScalar(
const mod = sema.mod;
const info = ty.intInfo(mod);
 
if (lhs.isUndef(mod) or rhs.isUndef(mod)) {
return .{
.overflow_bit = try mod.undefValue(Type.u1),
.wrapped_result = try mod.undefValue(ty),
};
}
 
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
@@ -39070,12 +38332,14 @@ fn compareVector(
 
/// Returns the type of a pointer to an element.
/// Asserts that the type is a pointer, and that the element type is indexable.
/// If the element index is comptime-known, it must be passed in `offset`.
/// For *@Vector(n, T), return *align(a:b:h:v) T
/// For *[N]T, return *T
/// For [*]T, returns *T
/// For []T, returns *T
/// Handles const-ness and address spaces in particular.
/// This code is duplicated in `analyzePtrArithmetic`.
fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
pub fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
const mod = sema.mod;
const ptr_info = ptr_ty.ptrInfo(mod);
const elem_ty = ptr_ty.elemType2(mod);
@@ -39180,7 +38444,7 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool
return sema.typeOf(ref).zigTypeTag(sema.mod) == tag;
}
 
fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type {
pub fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type {
if (info.flags.alignment != .none) {
_ = try sema.typeAbiAlignment(Type.fromInterned(info.child));
}
@@ -39210,12 +38474,12 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
fn isComptimeMutablePtr(sema: *Sema, val: Value) bool {
return switch (sema.mod.intern_pool.indexToKey(val.toIntern())) {
.slice => |slice| sema.isComptimeMutablePtr(Value.fromInterned(slice.ptr)),
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| switch (ptr.base_addr) {
.anon_decl, .decl, .int => false,
.comptime_field => true,
.comptime_alloc => |alloc_index| !sema.getComptimeAlloc(alloc_index).is_const,
.eu_payload, .opt_payload => |base| sema.isComptimeMutablePtr(Value.fromInterned(base)),
.elem, .field => |bi| sema.isComptimeMutablePtr(Value.fromInterned(bi.base)),
.arr_elem, .field => |bi| sema.isComptimeMutablePtr(Value.fromInterned(bi.base)),
},
else => false,
};
@@ -39321,3 +38585,11 @@ fn maybeDerefSliceAsArray(
const casted_ptr = try zcu.getCoerced(Value.fromInterned(slice.ptr), ptr_ty);
return sema.pointerDeref(block, src, casted_ptr, ptr_ty);
}
 
pub const bitCastVal = @import("Sema/bitcast.zig").bitCast;
pub const bitCastSpliceVal = @import("Sema/bitcast.zig").bitCastSplice;
 
const loadComptimePtr = @import("Sema/comptime_ptr_access.zig").loadComptimePtr;
const ComptimeLoadResult = @import("Sema/comptime_ptr_access.zig").ComptimeLoadResult;
const storeComptimePtr = @import("Sema/comptime_ptr_access.zig").storeComptimePtr;
const ComptimeStoreResult = @import("Sema/comptime_ptr_access.zig").ComptimeStoreResult;
 
filename was Deleted added: 4888, removed: 2579, total 2309
@@ -0,0 +1,772 @@
//! This file contains logic for bit-casting arbitrary values at comptime, including splicing
//! bits together for comptime stores of bit-pointers. The strategy is to "flatten" values to
//! a sequence of values in *packed* memory, and then unflatten through a combination of special
//! cases (particularly for pointers and `undefined` values) and in-memory buffer reinterprets.
//!
//! This is a little awkward on big-endian targets, as non-packed datastructures (e.g. `extern struct`)
//! have their fields reversed when represented as packed memory on such targets.
 
/// If `host_bits` is `0`, attempts to convert the memory at offset
/// `byte_offset` into `val` to a non-packed value of type `dest_ty`,
/// ignoring `bit_offset`.
///
/// Otherwise, `byte_offset` is an offset in bytes into `val` to a
/// non-packed value consisting of `host_bits` bits. A value of type
/// `dest_ty` will be interpreted at a packed offset of `bit_offset`
/// into this value.
///
/// Returns `null` if the operation must be performed at runtime.
pub fn bitCast(
sema: *Sema,
val: Value,
dest_ty: Type,
byte_offset: u64,
host_bits: u64,
bit_offset: u64,
) CompileError!?Value {
return bitCastInner(sema, val, dest_ty, byte_offset, host_bits, bit_offset) catch |err| switch (err) {
error.ReinterpretDeclRef => return null,
error.IllDefinedMemoryLayout => unreachable,
error.Unimplemented => @panic("unimplemented bitcast"),
else => |e| return e,
};
}
 
/// Uses bitcasting to splice the value `splice_val` into `val`,
/// replacing overlapping bits and returning the modified value.
///
/// If `host_bits` is `0`, splices `splice_val` at an offset
/// `byte_offset` bytes into the virtual memory of `val`, ignoring
/// `bit_offset`.
///
/// Otherwise, `byte_offset` is an offset into bytes into `val` to
/// a non-packed value consisting of `host_bits` bits. The value
/// `splice_val` will be placed at a packed offset of `bit_offset`
/// into this value.
pub fn bitCastSplice(
sema: *Sema,
val: Value,
splice_val: Value,
byte_offset: u64,
host_bits: u64,
bit_offset: u64,
) CompileError!?Value {
return bitCastSpliceInner(sema, val, splice_val, byte_offset, host_bits, bit_offset) catch |err| switch (err) {
error.ReinterpretDeclRef => return null,
error.IllDefinedMemoryLayout => unreachable,
error.Unimplemented => @panic("unimplemented bitcast"),
else => |e| return e,
};
}
 
const BitCastError = CompileError || error{ ReinterpretDeclRef, IllDefinedMemoryLayout, Unimplemented };
 
fn bitCastInner(
sema: *Sema,
val: Value,
dest_ty: Type,
byte_offset: u64,
host_bits: u64,
bit_offset: u64,
) BitCastError!Value {
const zcu = sema.mod;
const endian = zcu.getTarget().cpu.arch.endian();
 
if (dest_ty.toIntern() == val.typeOf(zcu).toIntern() and bit_offset == 0) {
return val;
}
 
const val_ty = val.typeOf(zcu);
 
try sema.resolveTypeLayout(val_ty);
try sema.resolveTypeLayout(dest_ty);
 
assert(val_ty.hasWellDefinedLayout(zcu));
 
const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
.{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
else
.{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
 
const skip_bits = switch (endian) {
.little => bit_offset + byte_offset * 8,
.big => if (host_bits > 0)
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
else
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - dest_ty.bitSize(zcu),
};
 
var unpack: UnpackValueBits = .{
.zcu = zcu,
.arena = sema.arena,
.skip_bits = skip_bits,
.remaining_bits = dest_ty.bitSize(zcu),
.unpacked = std.ArrayList(InternPool.Index).init(sema.arena),
};
switch (endian) {
.little => {
try unpack.add(val);
try unpack.padding(abi_pad_bits);
},
.big => {
try unpack.padding(abi_pad_bits);
try unpack.add(val);
},
}
try unpack.padding(host_pad_bits);
 
var pack: PackValueBits = .{
.zcu = zcu,
.arena = sema.arena,
.unpacked = unpack.unpacked.items,
};
return pack.get(dest_ty);
}
 
fn bitCastSpliceInner(
sema: *Sema,
val: Value,
splice_val: Value,
byte_offset: u64,
host_bits: u64,
bit_offset: u64,
) BitCastError!Value {
const zcu = sema.mod;
const endian = zcu.getTarget().cpu.arch.endian();
const val_ty = val.typeOf(zcu);
const splice_val_ty = splice_val.typeOf(zcu);
 
try sema.resolveTypeLayout(val_ty);
try sema.resolveTypeLayout(splice_val_ty);
 
const splice_bits = splice_val_ty.bitSize(zcu);
 
const splice_offset = switch (endian) {
.little => bit_offset + byte_offset * 8,
.big => if (host_bits > 0)
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
else
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - splice_bits,
};
 
assert(splice_offset + splice_bits <= val_ty.abiSize(zcu) * 8);
 
const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
.{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
else
.{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
 
var unpack: UnpackValueBits = .{
.zcu = zcu,
.arena = sema.arena,
.skip_bits = 0,
.remaining_bits = splice_offset,
.unpacked = std.ArrayList(InternPool.Index).init(sema.arena),
};
switch (endian) {
.little => {
try unpack.add(val);
try unpack.padding(abi_pad_bits);
},
.big => {
try unpack.padding(abi_pad_bits);
try unpack.add(val);
},
}
try unpack.padding(host_pad_bits);
 
unpack.remaining_bits = splice_bits;
try unpack.add(splice_val);
 
unpack.skip_bits = splice_offset + splice_bits;
unpack.remaining_bits = val_ty.abiSize(zcu) * 8 - splice_offset - splice_bits;
switch (endian) {
.little => {
try unpack.add(val);
try unpack.padding(abi_pad_bits);
},
.big => {
try unpack.padding(abi_pad_bits);
try unpack.add(val);
},
}
try unpack.padding(host_pad_bits);
 
var pack: PackValueBits = .{
.zcu = zcu,
.arena = sema.arena,
.unpacked = unpack.unpacked.items,
};
switch (endian) {
.little => {},
.big => try pack.padding(abi_pad_bits),
}
return pack.get(val_ty);
}
 
/// Recurses through struct fields, array elements, etc, to get a sequence of "primitive" values
/// which are bit-packed in memory to represent a single value. `unpacked` represents a series
/// of values in *packed* memory - therefore, on big-endian targets, the first element of this
/// list contains bits from the *final* byte of the value.
const UnpackValueBits = struct {
zcu: *Zcu,
arena: Allocator,
skip_bits: u64,
remaining_bits: u64,
extra_bits: u64 = undefined,
unpacked: std.ArrayList(InternPool.Index),
 
fn add(unpack: *UnpackValueBits, val: Value) BitCastError!void {
const zcu = unpack.zcu;
const endian = zcu.getTarget().cpu.arch.endian();
const ip = &zcu.intern_pool;
 
if (unpack.remaining_bits == 0) {
return;
}
 
const ty = val.typeOf(zcu);
const bit_size = ty.bitSize(zcu);
 
if (unpack.skip_bits >= bit_size) {
unpack.skip_bits -= bit_size;
return;
}
 
switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.struct_type,
.anon_struct_type,
.union_type,
.opaque_type,
.enum_type,
.func_type,
.error_set_type,
.inferred_error_set_type,
.variable,
.extern_func,
.func,
.err,
.error_union,
.enum_literal,
.slice,
.memoized_call,
=> unreachable, // ill-defined layout or not real values
 
.undef,
.int,
.enum_tag,
.simple_value,
.empty_enum_value,
.float,
.ptr,
.opt,
=> try unpack.primitive(val),
 
.aggregate => switch (ty.zigTypeTag(zcu)) {
.Vector => {
const len: usize = @intCast(ty.arrayLen(zcu));
for (0..len) |i| {
// We reverse vector elements in packed memory on BE targets.
const real_idx = switch (endian) {
.little => i,
.big => len - i - 1,
};
const elem_val = try val.elemValue(zcu, real_idx);
try unpack.add(elem_val);
}
},
.Array => {
// Each element is padded up to its ABI size. Padding bits are undefined.
// The final element does not have trailing padding.
// Elements are reversed in packed memory on BE targets.
const elem_ty = ty.childType(zcu);
const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
const len = ty.arrayLen(zcu);
const maybe_sent = ty.sentinel(zcu);
 
if (endian == .big) if (maybe_sent) |s| {
try unpack.add(s);
if (len != 0) try unpack.padding(pad_bits);
};
 
for (0..@intCast(len)) |i| {
// We reverse array elements in packed memory on BE targets.
const real_idx = switch (endian) {
.little => i,
.big => len - i - 1,
};
const elem_val = try val.elemValue(zcu, @intCast(real_idx));
try unpack.add(elem_val);
if (i != len - 1) try unpack.padding(pad_bits);
}
 
if (endian == .little) if (maybe_sent) |s| {
if (len != 0) try unpack.padding(pad_bits);
try unpack.add(s);
};
},
.Struct => switch (ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"extern" => switch (endian) {
.little => {
var cur_bit_off: u64 = 0;
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
while (it.next()) |field_idx| {
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
const pad_bits = want_bit_off - cur_bit_off;
const field_val = try val.fieldValue(zcu, field_idx);
try unpack.padding(pad_bits);
try unpack.add(field_val);
cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(zcu);
}
// Add trailing padding bits.
try unpack.padding(bit_size - cur_bit_off);
},
.big => {
var cur_bit_off: u64 = bit_size;
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
while (it.next()) |field_idx| {
const field_val = try val.fieldValue(zcu, field_idx);
const field_ty = field_val.typeOf(zcu);
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
const pad_bits = cur_bit_off - want_bit_off;
try unpack.padding(pad_bits);
try unpack.add(field_val);
cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
}
assert(cur_bit_off == 0);
},
},
.@"packed" => {
// Just add all fields in order. There are no padding bits.
// This is identical between LE and BE targets.
for (0..ty.structFieldCount(zcu)) |i| {
const field_val = try val.fieldValue(zcu, i);
try unpack.add(field_val);
}
},
},
else => unreachable,
},
 
.un => |un| {
// We actually don't care about the tag here!
// Instead, we just need to write the payload value, plus any necessary padding.
// This correctly handles the case where `tag == .none`, since the payload is then
// either an integer or a byte array, both of which we can unpack.
const payload_val = Value.fromInterned(un.val);
const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(zcu);
if (endian == .little or ty.containerLayout(zcu) == .@"packed") {
try unpack.add(payload_val);
try unpack.padding(pad_bits);
} else {
try unpack.padding(pad_bits);
try unpack.add(payload_val);
}
},
}
}
 
fn padding(unpack: *UnpackValueBits, pad_bits: u64) BitCastError!void {
if (pad_bits == 0) return;
const zcu = unpack.zcu;
// Figure out how many full bytes and leftover bits there are.
const bytes = pad_bits / 8;
const bits = pad_bits % 8;
// Add undef u8 values for the bytes...
const undef_u8 = try zcu.undefValue(Type.u8);
for (0..@intCast(bytes)) |_| {
try unpack.primitive(undef_u8);
}
// ...and an undef int for the leftover bits.
if (bits == 0) return;
const bits_ty = try zcu.intType(.unsigned, @intCast(bits));
const bits_val = try zcu.undefValue(bits_ty);
try unpack.primitive(bits_val);
}
 
fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void {
const zcu = unpack.zcu;
 
if (unpack.remaining_bits == 0) {
return;
}
 
const ty = val.typeOf(zcu);
const bit_size = ty.bitSize(zcu);
 
// Note that this skips all zero-bit types.
if (unpack.skip_bits >= bit_size) {
unpack.skip_bits -= bit_size;
return;
}
 
if (unpack.skip_bits > 0) {
const skip = unpack.skip_bits;
unpack.skip_bits = 0;
return unpack.splitPrimitive(val, skip, bit_size - skip);
}
 
if (unpack.remaining_bits < bit_size) {
return unpack.splitPrimitive(val, 0, unpack.remaining_bits);
}
 
unpack.remaining_bits -|= bit_size;
 
try unpack.unpacked.append(val.toIntern());
}
 
fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void {
const zcu = unpack.zcu;
const ty = val.typeOf(zcu);
 
const val_bits = ty.bitSize(zcu);
assert(bit_offset + bit_count <= val_bits);
 
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
// In the `ptr` case, this will return `error.ReinterpretDeclRef`
// if we're trying to split a non-integer pointer value.
.int, .float, .enum_tag, .ptr, .opt => {
// This @intCast is okay because no primitive can exceed the size of a u16.
const int_ty = try zcu.intType(.unsigned, @intCast(bit_count));
const buf = try unpack.arena.alloc(u8, @intCast((val_bits + 7) / 8));
try val.writeToPackedMemory(ty, zcu, buf, 0);
const sub_val = try Value.readFromPackedMemory(int_ty, zcu, buf, @intCast(bit_offset), unpack.arena);
try unpack.primitive(sub_val);
},
.undef => try unpack.padding(bit_count),
// The only values here with runtime bits are `true` and `false.
// These are both 1 bit, so will never need truncating.
.simple_value => unreachable,
.empty_enum_value => unreachable, // zero-bit
else => unreachable, // zero-bit or not primitives
}
}
};
 
/// Given a sequence of bit-packed values in packed memory (see `UnpackValueBits`),
/// reconstructs a value of an arbitrary type, with correct handling of `undefined`
/// values and of pointers which align in virtual memory.
const PackValueBits = struct {
zcu: *Zcu,
arena: Allocator,
bit_offset: u64 = 0,
unpacked: []const InternPool.Index,
 
fn get(pack: *PackValueBits, ty: Type) BitCastError!Value {
const zcu = pack.zcu;
const endian = zcu.getTarget().cpu.arch.endian();
const ip = &zcu.intern_pool;
const arena = pack.arena;
switch (ty.zigTypeTag(zcu)) {
.Vector => {
// Elements are bit-packed.
const len = ty.arrayLen(zcu);
const elem_ty = ty.childType(zcu);
const elems = try arena.alloc(InternPool.Index, @intCast(len));
// We reverse vector elements in packed memory on BE targets.
switch (endian) {
.little => for (elems) |*elem| {
elem.* = (try pack.get(elem_ty)).toIntern();
},
.big => {
var i = elems.len;
while (i > 0) {
i -= 1;
elems[i] = (try pack.get(elem_ty)).toIntern();
}
},
}
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
} }));
},
.Array => {
// Each element is padded up to its ABI size. The final element does not have trailing padding.
const len = ty.arrayLen(zcu);
const elem_ty = ty.childType(zcu);
const maybe_sent = ty.sentinel(zcu);
const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
const elems = try arena.alloc(InternPool.Index, @intCast(len));
 
if (endian == .big and maybe_sent != null) {
// TODO: validate sentinel was preserved!
try pack.padding(elem_ty.bitSize(zcu));
if (len != 0) try pack.padding(pad_bits);
}
 
for (0..elems.len) |i| {
const real_idx = switch (endian) {
.little => i,
.big => len - i - 1,
};
elems[@intCast(real_idx)] = (try pack.get(elem_ty)).toIntern();
if (i != len - 1) try pack.padding(pad_bits);
}
 
if (endian == .little and maybe_sent != null) {
// TODO: validate sentinel was preserved!
if (len != 0) try pack.padding(pad_bits);
try pack.padding(elem_ty.bitSize(zcu));
}
 
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
} }));
},
.Struct => switch (ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"extern" => {
const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu));
@memset(elems, .none);
switch (endian) {
.little => {
var cur_bit_off: u64 = 0;
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
while (it.next()) |field_idx| {
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
try pack.padding(want_bit_off - cur_bit_off);
const field_ty = ty.structFieldType(field_idx, zcu);
elems[field_idx] = (try pack.get(field_ty)).toIntern();
cur_bit_off = want_bit_off + field_ty.bitSize(zcu);
}
try pack.padding(ty.bitSize(zcu) - cur_bit_off);
},
.big => {
var cur_bit_off: u64 = ty.bitSize(zcu);
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
while (it.next()) |field_idx| {
const field_ty = ty.structFieldType(field_idx, zcu);
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
try pack.padding(cur_bit_off - want_bit_off);
elems[field_idx] = (try pack.get(field_ty)).toIntern();
cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
}
assert(cur_bit_off == 0);
},
}
// Any fields which do not have runtime bits should be OPV or comptime fields.
// Fill those values now.
for (elems, 0..) |*elem, field_idx| {
if (elem.* != .none) continue;
const val = (try ty.structFieldValueComptime(zcu, field_idx)).?;
elem.* = val.toIntern();
}
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
} }));
},
.@"packed" => {
// All fields are in order with no padding.
// This is identical between LE and BE targets.
const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu));
for (elems, 0..) |*elem, i| {
const field_ty = ty.structFieldType(i, zcu);
elem.* = (try pack.get(field_ty)).toIntern();
}
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
} }));
},
},
.Union => {
// We will attempt to read as the backing representation. If this emits
// `error.ReinterpretDeclRef`, we will try each union field, preferring larger ones.
// We will also attempt smaller fields when we get `undefined`, as if some bits are
// defined we want to include them.
// TODO: this is very very bad. We need a more sophisticated union representation.
 
const prev_unpacked = pack.unpacked;
const prev_bit_offset = pack.bit_offset;
 
const backing_ty = try ty.unionBackingType(zcu);
 
backing: {
const backing_val = pack.get(backing_ty) catch |err| switch (err) {
error.ReinterpretDeclRef => {
pack.unpacked = prev_unpacked;
pack.bit_offset = prev_bit_offset;
break :backing;
},
else => |e| return e,
};
if (backing_val.isUndef(zcu)) {
pack.unpacked = prev_unpacked;
pack.bit_offset = prev_bit_offset;
break :backing;
}
return Value.fromInterned(try zcu.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = .none,
.val = backing_val.toIntern(),
} }));
}
 
const field_order = try pack.arena.alloc(u32, ty.unionTagTypeHypothetical(zcu).enumFieldCount(zcu));
for (field_order, 0..) |*f, i| f.* = @intCast(i);
// Sort `field_order` to put the fields with the largest bit sizes first.
const SizeSortCtx = struct {
zcu: *Zcu,
field_types: []const InternPool.Index,
fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool {
const a_ty = Type.fromInterned(ctx.field_types[a_idx]);
const b_ty = Type.fromInterned(ctx.field_types[b_idx]);
return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu);
}
};
std.mem.sortUnstable(u32, field_order, SizeSortCtx{
.zcu = zcu,
.field_types = zcu.typeToUnion(ty).?.field_types.get(ip),
}, SizeSortCtx.lessThan);
 
const padding_after = endian == .little or ty.containerLayout(zcu) == .@"packed";
 
for (field_order) |field_idx| {
const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]);
const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu);
if (!padding_after) try pack.padding(pad_bits);
const field_val = pack.get(field_ty) catch |err| switch (err) {
error.ReinterpretDeclRef => {
pack.unpacked = prev_unpacked;
pack.bit_offset = prev_bit_offset;
continue;
},
else => |e| return e,
};
if (padding_after) try pack.padding(pad_bits);
if (field_val.isUndef(zcu)) {
pack.unpacked = prev_unpacked;
pack.bit_offset = prev_bit_offset;
continue;
}
const tag_val = try zcu.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx);
return Value.fromInterned(try zcu.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = tag_val.toIntern(),
.val = field_val.toIntern(),
} }));
}
 
// No field could represent the value. Just do whatever happens when we try to read
// the backing type - either `undefined` or `error.ReinterpretDeclRef`.
const backing_val = try pack.get(backing_ty);
return Value.fromInterned(try zcu.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = .none,
.val = backing_val.toIntern(),
} }));
},
else => return pack.primitive(ty),
}
}
 
fn padding(pack: *PackValueBits, pad_bits: u64) BitCastError!void {
_ = pack.prepareBits(pad_bits);
}
 
fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value {
const zcu = pack.zcu;
const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu));
 
for (vals) |val| {
if (!Value.fromInterned(val).isUndef(zcu)) break;
} else {
// All bits of the value are `undefined`.
return zcu.undefValue(want_ty);
}
 
// TODO: we need to decide how to handle partially-undef values here.
// Currently, a value with some undefined bits becomes `0xAA` so that we
// preserve the well-defined bits, because we can't currently represent
// a partially-undefined primitive (e.g. an int with some undef bits).
// In future, we probably want to take one of these two routes:
// * Define that if any bits are `undefined`, the entire value is `undefined`.
// This is a major breaking change, and probably a footgun.
// * Introduce tracking for partially-undef values at comptime.
// This would complicate a lot of operations in Sema, such as basic
// arithmetic.
// This design complexity is tracked by #19634.
 
ptr_cast: {
if (vals.len != 1) break :ptr_cast;
const val = Value.fromInterned(vals[0]);
if (!val.typeOf(zcu).isPtrAtRuntime(zcu)) break :ptr_cast;
if (!want_ty.isPtrAtRuntime(zcu)) break :ptr_cast;
return zcu.getCoerced(val, want_ty);
}
 
// Reinterpret via an in-memory buffer.
 
var buf_bits: u64 = 0;
for (vals) |ip_val| {
const val = Value.fromInterned(ip_val);
const ty = val.typeOf(zcu);
buf_bits += ty.bitSize(zcu);
}
 
const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8));
// We will skip writing undefined values, so mark the buffer as `0xAA` so we get "undefined" bits.
@memset(buf, 0xAA);
var cur_bit_off: usize = 0;
for (vals) |ip_val| {
const val = Value.fromInterned(ip_val);
const ty = val.typeOf(zcu);
if (!val.isUndef(zcu)) {
try val.writeToPackedMemory(ty, zcu, buf, cur_bit_off);
}
cur_bit_off += @intCast(ty.bitSize(zcu));
}
 
return Value.readFromPackedMemory(want_ty, zcu, buf, @intCast(bit_offset), pack.arena);
}
 
fn prepareBits(pack: *PackValueBits, need_bits: u64) struct { []const InternPool.Index, u64 } {
if (need_bits == 0) return .{ &.{}, 0 };
 
const zcu = pack.zcu;
 
var bits: u64 = 0;
var len: usize = 0;
while (bits < pack.bit_offset + need_bits) {
bits += Value.fromInterned(pack.unpacked[len]).typeOf(zcu).bitSize(zcu);
len += 1;
}
 
const result_vals = pack.unpacked[0..len];
const result_offset = pack.bit_offset;
 
const extra_bits = bits - pack.bit_offset - need_bits;
if (extra_bits == 0) {
pack.unpacked = pack.unpacked[len..];
pack.bit_offset = 0;
} else {
pack.unpacked = pack.unpacked[len - 1 ..];
pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(zcu).bitSize(zcu) - extra_bits;
}
 
return .{ result_vals, result_offset };
}
};
 
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
 
const Sema = @import("../Sema.zig");
const Zcu = @import("../Module.zig");
const InternPool = @import("../InternPool.zig");
const Type = @import("../type.zig").Type;
const Value = @import("../Value.zig");
const CompileError = Zcu.CompileError;
 
filename was Deleted added: 4888, removed: 2579, total 2309
@@ -0,0 +1,1059 @@
pub const ComptimeLoadResult = union(enum) {
success: MutableValue,
 
runtime_load,
undef,
err_payload: InternPool.NullTerminatedString,
null_payload,
inactive_union_field,
needed_well_defined: Type,
out_of_bounds: Type,
exceeds_host_size,
};
 
pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value) !ComptimeLoadResult {
const zcu = sema.mod;
const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu);
// TODO: host size for vectors is terrible
const host_bits = switch (ptr_info.flags.vector_index) {
.none => ptr_info.packed_offset.host_size * 8,
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
};
const bit_offset = if (host_bits != 0) bit_offset: {
const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => return .runtime_load,
else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
.little => child_bits * @intFromEnum(idx),
.big => host_bits - child_bits * (@intFromEnum(idx) + 1), // element order reversed on big endian
},
};
if (child_bits + bit_offset > host_bits) {
return .exceeds_host_size;
}
break :bit_offset bit_offset;
} else 0;
return loadComptimePtrInner(sema, block, src, ptr, bit_offset, host_bits, Type.fromInterned(ptr_info.child), 0);
}
 
pub const ComptimeStoreResult = union(enum) {
success,
 
runtime_store,
comptime_field_mismatch: Value,
undef,
err_payload: InternPool.NullTerminatedString,
null_payload,
inactive_union_field,
needed_well_defined: Type,
out_of_bounds: Type,
exceeds_host_size,
};
 
/// Perform a comptime load of value `store_val` to a pointer.
/// The pointer's type is ignored.
pub fn storeComptimePtr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr: Value,
store_val: Value,
) !ComptimeStoreResult {
const zcu = sema.mod;
const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu);
assert(store_val.typeOf(zcu).toIntern() == ptr_info.child);
// TODO: host size for vectors is terrible
const host_bits = switch (ptr_info.flags.vector_index) {
.none => ptr_info.packed_offset.host_size * 8,
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
};
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => return .runtime_store,
else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
.little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx),
.big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian
},
};
const pseudo_store_ty = if (host_bits > 0) t: {
const need_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
if (need_bits + bit_offset > host_bits) {
return .exceeds_host_size;
}
break :t try zcu.intType(.unsigned, @intCast(host_bits));
} else Type.fromInterned(ptr_info.child);
 
const strat = try prepareComptimePtrStore(sema, block, src, ptr, pseudo_store_ty, 0);
 
// Propagate errors and handle comptime fields.
switch (strat) {
.direct, .index, .flat_index, .reinterpret => {},
.comptime_field => {
// To "store" to a comptime field, just perform a load of the field
// and see if the store value matches.
const expected_mv = switch (try loadComptimePtr(sema, block, src, ptr)) {
.success => |mv| mv,
.runtime_load => unreachable, // this is a comptime field
.exceeds_host_size => unreachable, // checked above
.undef => return .undef,
.err_payload => |err| return .{ .err_payload = err },
.null_payload => return .null_payload,
.inactive_union_field => return .inactive_union_field,
.needed_well_defined => |ty| return .{ .needed_well_defined = ty },
.out_of_bounds => |ty| return .{ .out_of_bounds = ty },
};
const expected = try expected_mv.intern(zcu, sema.arena);
if (store_val.toIntern() != expected.toIntern()) {
return .{ .comptime_field_mismatch = expected };
}
return .success;
},
.runtime_store => return .runtime_store,
.undef => return .undef,
.err_payload => |err| return .{ .err_payload = err },
.null_payload => return .null_payload,
.inactive_union_field => return .inactive_union_field,
.needed_well_defined => |ty| return .{ .needed_well_defined = ty },
.out_of_bounds => |ty| return .{ .out_of_bounds = ty },
}
 
// Check the store is not inside a runtime condition
try checkComptimeVarStore(sema, block, src, strat.alloc());
 
if (host_bits == 0) {
// We can attempt a direct store depending on the strategy.
switch (strat) {
.direct => |direct| {
const want_ty = direct.val.typeOf(zcu);
const coerced_store_val = try zcu.getCoerced(store_val, want_ty);
direct.val.* = .{ .interned = coerced_store_val.toIntern() };
return .success;
},
.index => |index| {
const want_ty = index.val.typeOf(zcu).childType(zcu);
const coerced_store_val = try zcu.getCoerced(store_val, want_ty);
try index.val.setElem(zcu, sema.arena, @intCast(index.elem_index), .{ .interned = coerced_store_val.toIntern() });
return .success;
},
.flat_index => |flat| {
const store_elems = store_val.typeOf(zcu).arrayBase(zcu)[1];
const flat_elems = try sema.arena.alloc(InternPool.Index, @intCast(store_elems));
{
var next_idx: u64 = 0;
var skip: u64 = 0;
try flattenArray(sema, .{ .interned = store_val.toIntern() }, &skip, &next_idx, flat_elems);
}
for (flat_elems, 0..) |elem, idx| {
// TODO: recursiveIndex in a loop does a lot of redundant work!
// Better would be to gather all the store targets into an array.
var index: u64 = flat.flat_elem_index + idx;
const val_ptr, const final_idx = (try recursiveIndex(sema, flat.val, &index)).?;
try val_ptr.setElem(zcu, sema.arena, @intCast(final_idx), .{ .interned = elem });
}
return .success;
},
.reinterpret => {},
else => unreachable,
}
}
 
// Either there is a bit offset, or the strategy required reinterpreting.
// Therefore, we must perform a bitcast.
 
const val_ptr: *MutableValue, const byte_offset: u64 = switch (strat) {
.direct => |direct| .{ direct.val, 0 },
.index => |index| .{
index.val,
index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(zcu),
},
.flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(zcu) },
.reinterpret => |reinterpret| .{ reinterpret.val, reinterpret.byte_offset },
else => unreachable,
};
 
if (!val_ptr.typeOf(zcu).hasWellDefinedLayout(zcu)) {
return .{ .needed_well_defined = val_ptr.typeOf(zcu) };
}
 
if (!store_val.typeOf(zcu).hasWellDefinedLayout(zcu)) {
return .{ .needed_well_defined = store_val.typeOf(zcu) };
}
 
const new_val = try sema.bitCastSpliceVal(
try val_ptr.intern(zcu, sema.arena),
store_val,
byte_offset,
host_bits,
bit_offset,
) orelse return .runtime_store;
val_ptr.* = .{ .interned = new_val.toIntern() };
return .success;
}
 
/// Perform a comptime load of type `load_ty` from a pointer.
/// The pointer's type is ignored.
fn loadComptimePtrInner(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr_val: Value,
bit_offset: u64,
host_bits: u64,
load_ty: Type,
/// If `load_ty` is an array, this is the number of array elements to skip
/// before `load_ty`. Otherwise, it is ignored and may be `undefined`.
array_offset: u64,
) !ComptimeLoadResult {
const zcu = sema.mod;
const ip = &zcu.intern_pool;
 
const ptr = switch (ip.indexToKey(ptr_val.toIntern())) {
.undef => return .undef,
.ptr => |ptr| ptr,
else => unreachable,
};
 
const base_val: MutableValue = switch (ptr.base_addr) {
.decl => |decl_index| val: {
try sema.declareDependency(.{ .decl_val = decl_index });
try sema.ensureDeclAnalyzed(decl_index);
const decl = zcu.declPtr(decl_index);
if (decl.val.getVariable(zcu) != null) return .runtime_load;
break :val .{ .interned = decl.val.toIntern() };
},
.comptime_alloc => |alloc_index| sema.getComptimeAlloc(alloc_index).val,
.anon_decl => |anon_decl| .{ .interned = anon_decl.val },
.comptime_field => |val| .{ .interned = val },
.int => return .runtime_load,
.eu_payload => |base_ptr_ip| val: {
const base_ptr = Value.fromInterned(base_ptr_ip);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
switch (try loadComptimePtrInner(sema, block, src, base_ptr, 0, 0, base_ty, undefined)) {
.success => |eu_val| switch (eu_val.unpackErrorUnion(zcu)) {
.undef => return .undef,
.err => |err| return .{ .err_payload = err },
.payload => |payload| break :val payload,
},
else => |err| return err,
}
},
.opt_payload => |base_ptr_ip| val: {
const base_ptr = Value.fromInterned(base_ptr_ip);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
switch (try loadComptimePtrInner(sema, block, src, base_ptr, 0, 0, base_ty, undefined)) {
.success => |eu_val| switch (eu_val.unpackOptional(zcu)) {
.undef => return .undef,
.null => return .null_payload,
.payload => |payload| break :val payload,
},
else => |err| return err,
}
},
.arr_elem => |base_index| val: {
const base_ptr = Value.fromInterned(base_index.base);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
 
// We have a comptime-only array. This case is a little nasty.
// To avoid loading too much data, we want to figure out how many elements we need.
// If `load_ty` and the array share a base type, we'll load the correct number of elements.
// Otherwise, we'll be reinterpreting (which we can't do, since it's comptime-only); just
// load a single element and let the logic below emit its error.
 
const load_one_ty, const load_count = load_ty.arrayBase(zcu);
const count = if (load_one_ty.toIntern() == base_ty.toIntern()) load_count else 1;
 
const want_ty = try zcu.arrayType(.{
.len = count,
.child = base_ty.toIntern(),
});
 
switch (try loadComptimePtrInner(sema, block, src, base_ptr, 0, 0, want_ty, base_index.index)) {
.success => |arr_val| break :val arr_val,
else => |err| return err,
}
},
.field => |base_index| val: {
const base_ptr = Value.fromInterned(base_index.base);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
 
// Field of a slice, or of an auto-layout struct or union.
const agg_val = switch (try loadComptimePtrInner(sema, block, src, base_ptr, 0, 0, base_ty, undefined)) {
.success => |val| val,
else => |err| return err,
};
 
const agg_ty = agg_val.typeOf(zcu);
switch (agg_ty.zigTypeTag(zcu)) {
.Struct, .Pointer => break :val try agg_val.getElem(zcu, @intCast(base_index.index)),
.Union => {
const tag_val: Value, const payload_mv: MutableValue = switch (agg_val) {
.un => |un| .{ Value.fromInterned(un.tag), un.payload.* },
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
.undef => return .undef,
.un => |un| .{ Value.fromInterned(un.tag), .{ .interned = un.val } },
else => unreachable,
},
else => unreachable,
};
const tag_ty = agg_ty.unionTagTypeHypothetical(zcu);
if (tag_ty.enumTagFieldIndex(tag_val, zcu).? != base_index.index) {
return .inactive_union_field;
}
break :val payload_mv;
},
else => unreachable,
}
 
break :val try agg_val.getElem(zcu, base_index.index);
},
};
 
if (ptr.byte_offset == 0 and host_bits == 0) {
if (load_ty.zigTypeTag(zcu) != .Array or array_offset == 0) {
if (.ok == try sema.coerceInMemoryAllowed(
block,
load_ty,
base_val.typeOf(zcu),
false,
zcu.getTarget(),
src,
src,
)) {
// We already have a value which is IMC to the desired type.
return .{ .success = base_val };
}
}
}
 
restructure_array: {
if (host_bits != 0) break :restructure_array;
 
// We might also be changing the length of an array, or restructuring it.
// e.g. [1][2][3]T -> [3][2]T.
// This case is important because it's permitted for types with ill-defined layouts.
 
const load_one_ty, const load_count = load_ty.arrayBase(zcu);
 
const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
if (try sema.typeRequiresComptime(load_one_ty)) break :restructure_array;
const elem_len = try sema.typeAbiSize(load_one_ty);
if (ptr.byte_offset % elem_len != 0) break :restructure_array;
break :idx @divExact(ptr.byte_offset, elem_len);
};
 
const val_one_ty, const val_count = base_val.typeOf(zcu).arrayBase(zcu);
if (.ok == try sema.coerceInMemoryAllowed(
block,
load_one_ty,
val_one_ty,
false,
zcu.getTarget(),
src,
src,
)) {
// Changing the length of an array.
const skip_base: u64 = extra_base_index + if (load_ty.zigTypeTag(zcu) == .Array) skip: {
break :skip load_ty.childType(zcu).arrayBase(zcu)[1] * array_offset;
} else 0;
if (skip_base + load_count > val_count) return .{ .out_of_bounds = base_val.typeOf(zcu) };
const elems = try sema.arena.alloc(InternPool.Index, @intCast(load_count));
var skip: u64 = skip_base;
var next_idx: u64 = 0;
try flattenArray(sema, base_val, &skip, &next_idx, elems);
next_idx = 0;
const val = try unflattenArray(sema, load_ty, elems, &next_idx);
return .{ .success = .{ .interned = val.toIntern() } };
}
}
 
// We need to reinterpret memory, which is only possible if neither the load
// type nor the type of the base value are comptime-only.
 
if (!load_ty.hasWellDefinedLayout(zcu)) {
return .{ .needed_well_defined = load_ty };
}
 
if (!base_val.typeOf(zcu).hasWellDefinedLayout(zcu)) {
return .{ .needed_well_defined = base_val.typeOf(zcu) };
}
 
var cur_val = base_val;
var cur_offset = ptr.byte_offset;
 
if (load_ty.zigTypeTag(zcu) == .Array and array_offset > 0) {
cur_offset += try sema.typeAbiSize(load_ty.childType(zcu)) * array_offset;
}
 
const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else try sema.typeAbiSize(load_ty);
 
if (cur_offset + need_bytes > try sema.typeAbiSize(cur_val.typeOf(zcu))) {
return .{ .out_of_bounds = cur_val.typeOf(zcu) };
}
 
// In the worst case, we can reinterpret the entire value - however, that's
// pretty wasteful. If the memory region we're interested in refers to one
// field or array element, let's just look at that.
while (true) {
const cur_ty = cur_val.typeOf(zcu);
switch (cur_ty.zigTypeTag(zcu)) {
.NoReturn,
.Type,
.ComptimeInt,
.ComptimeFloat,
.Null,
.Undefined,
.EnumLiteral,
.Opaque,
.Fn,
.ErrorUnion,
=> unreachable, // ill-defined layout
.Int,
.Float,
.Bool,
.Void,
.Pointer,
.ErrorSet,
.AnyFrame,
.Frame,
.Enum,
.Vector,
=> break, // terminal types (no sub-values)
.Optional => break, // this can only be a pointer-like optional so is terminal
.Array => {
const elem_ty = cur_ty.childType(zcu);
const elem_size = try sema.typeAbiSize(elem_ty);
const elem_idx = cur_offset / elem_size;
const next_elem_off = elem_size * (elem_idx + 1);
if (cur_offset + need_bytes <= next_elem_off) {
// We can look at a single array element.
cur_val = try cur_val.getElem(zcu, @intCast(elem_idx));
cur_offset -= elem_idx * elem_size;
} else {
break;
}
},
.Struct => switch (cur_ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu));
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
cur_val = try cur_val.getElem(zcu, field_idx);
cur_offset -= start_off;
break;
}
} else break, // pointer spans multiple fields
},
.Union => switch (cur_ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => {
// TODO: we have to let bitcast logic handle this for now.
// Otherwise, we might traverse into a union field which doesn't allow pointers.
// Figure out a solution!
if (true) break;
const payload: MutableValue = switch (cur_val) {
.un => |un| un.payload.*,
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
.un => |un| .{ .interned = un.val },
.undef => return .undef,
else => unreachable,
},
else => unreachable,
};
// The payload always has offset 0. If it's big enough
// to represent the whole load type, we can use it.
if (try sema.typeAbiSize(payload.typeOf(zcu)) >= need_bytes) {
cur_val = payload;
} else {
break;
}
},
},
}
}
 
// Fast path: check again if we're now at the type we want to load.
// If so, just return the loaded value.
if (cur_offset == 0 and host_bits == 0 and cur_val.typeOf(zcu).toIntern() == load_ty.toIntern()) {
return .{ .success = cur_val };
}
 
const result_val = try sema.bitCastVal(
try cur_val.intern(zcu, sema.arena),
load_ty,
cur_offset,
host_bits,
bit_offset,
) orelse return .runtime_load;
return .{ .success = .{ .interned = result_val.toIntern() } };
}
 
const ComptimeStoreStrategy = union(enum) {
/// The store should be performed directly to this value, which `store_ty`
/// is in-memory coercible to.
direct: struct {
alloc: ComptimeAllocIndex,
val: *MutableValue,
},
/// The store should be performed at the index `elem_index` into `val`,
/// which is an array.
/// This strategy exists to avoid the need to convert the parent value
/// to the `aggregate` representation when `repeated` or `bytes` may
/// suffice.
index: struct {
alloc: ComptimeAllocIndex,
val: *MutableValue,
elem_index: u64,
},
/// The store should be performed on this array value, but it is being
/// restructured, e.g. [3][2][1]T -> [2][3]T.
/// This includes the case where it is a sub-array, e.g. [3]T -> [2]T.
/// This is only returned if `store_ty` is an array type, and its array
/// base type is IMC to that of the type of `val`.
flat_index: struct {
alloc: ComptimeAllocIndex,
val: *MutableValue,
flat_elem_index: u64,
},
/// This value should be reinterpreted using bitcast logic to perform the
/// store. Only returned if `store_ty` and the type of `val` both have
/// well-defined layouts.
reinterpret: struct {
alloc: ComptimeAllocIndex,
val: *MutableValue,
byte_offset: u64,
},
 
comptime_field,
runtime_store,
undef,
err_payload: InternPool.NullTerminatedString,
null_payload,
inactive_union_field,
needed_well_defined: Type,
out_of_bounds: Type,
 
fn alloc(strat: ComptimeStoreStrategy) ComptimeAllocIndex {
return switch (strat) {
inline .direct, .index, .flat_index, .reinterpret => |info| info.alloc,
.comptime_field,
.runtime_store,
.undef,
.err_payload,
.null_payload,
.inactive_union_field,
.needed_well_defined,
.out_of_bounds,
=> unreachable,
};
}
};
 
/// Decide the strategy we will use to perform a comptime store of type `store_ty` to a pointer.
/// The pointer's type is ignored.
fn prepareComptimePtrStore(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr_val: Value,
store_ty: Type,
/// If `store_ty` is an array, this is the number of array elements to skip
/// before `store_ty`. Otherwise, it is ignored and may be `undefined`.
array_offset: u64,
) !ComptimeStoreStrategy {
const zcu = sema.mod;
const ip = &zcu.intern_pool;
 
const ptr = switch (ip.indexToKey(ptr_val.toIntern())) {
.undef => return .undef,
.ptr => |ptr| ptr,
else => unreachable,
};
 
// `base_strat` will not be an error case.
const base_strat: ComptimeStoreStrategy = switch (ptr.base_addr) {
.decl, .anon_decl, .int => return .runtime_store,
.comptime_field => return .comptime_field,
.comptime_alloc => |alloc_index| .{ .direct = .{
.alloc = alloc_index,
.val = &sema.getComptimeAlloc(alloc_index).val,
} },
.eu_payload => |base_ptr_ip| base_val: {
const base_ptr = Value.fromInterned(base_ptr_ip);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
const eu_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
.direct => |direct| .{ direct.val, direct.alloc },
.index => |index| .{
try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)),
index.alloc,
},
.flat_index => unreachable, // base_ty is not an array
.reinterpret => unreachable, // base_ty has ill-defined layout
else => |err| return err,
};
try eu_val_ptr.unintern(zcu, sema.arena, false, false);
switch (eu_val_ptr.*) {
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
.undef => return .undef,
.error_union => |eu| return .{ .err_payload = eu.val.err_name },
else => unreachable,
},
.eu_payload => |data| break :base_val .{ .direct = .{
.val = data.child,
.alloc = alloc,
} },
else => unreachable,
}
},
.opt_payload => |base_ptr_ip| base_val: {
const base_ptr = Value.fromInterned(base_ptr_ip);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
const opt_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
.direct => |direct| .{ direct.val, direct.alloc },
.index => |index| .{
try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)),
index.alloc,
},
.flat_index => unreachable, // base_ty is not an array
.reinterpret => unreachable, // base_ty has ill-defined layout
else => |err| return err,
};
try opt_val_ptr.unintern(zcu, sema.arena, false, false);
switch (opt_val_ptr.*) {
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
.undef => return .undef,
.opt => return .null_payload,
else => unreachable,
},
.opt_payload => |data| break :base_val .{ .direct = .{
.val = data.child,
.alloc = alloc,
} },
else => unreachable,
}
},
.arr_elem => |base_index| base_val: {
const base_ptr = Value.fromInterned(base_index.base);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
 
// We have a comptime-only array. This case is a little nasty.
// To avoid messing with too much data, we want to figure out how many elements we need to store.
// If `store_ty` and the array share a base type, we'll store the correct number of elements.
// Otherwise, we'll be reinterpreting (which we can't do, since it's comptime-only); just
// load a single element and let the logic below emit its error.
 
const store_one_ty, const store_count = store_ty.arrayBase(zcu);
const count = if (store_one_ty.toIntern() == base_ty.toIntern()) store_count else 1;
 
const want_ty = try zcu.arrayType(.{
.len = count,
.child = base_ty.toIntern(),
});
 
const result = try prepareComptimePtrStore(sema, block, src, base_ptr, want_ty, base_index.index);
switch (result) {
.direct, .index, .flat_index => break :base_val result,
.reinterpret => unreachable, // comptime-only array so ill-defined layout
else => |err| return err,
}
},
.field => |base_index| strat: {
const base_ptr = Value.fromInterned(base_index.base);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
 
// Field of a slice, or of an auto-layout struct or union.
const agg_val, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
.direct => |direct| .{ direct.val, direct.alloc },
.index => |index| .{
try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)),
index.alloc,
},
.flat_index => unreachable, // base_ty is not an array
.reinterpret => unreachable, // base_ty has ill-defined layout
else => |err| return err,
};
 
const agg_ty = agg_val.typeOf(zcu);
switch (agg_ty.zigTypeTag(zcu)) {
.Struct, .Pointer => break :strat .{ .direct = .{
.val = try agg_val.elem(zcu, sema.arena, @intCast(base_index.index)),
.alloc = alloc,
} },
.Union => {
if (agg_val.* == .interned and Value.fromInterned(agg_val.interned).isUndef(zcu)) {
return .undef;
}
try agg_val.unintern(zcu, sema.arena, false, false);
const un = agg_val.un;
const tag_ty = agg_ty.unionTagTypeHypothetical(zcu);
if (tag_ty.enumTagFieldIndex(Value.fromInterned(un.tag), zcu).? != base_index.index) {
return .inactive_union_field;
}
break :strat .{ .direct = .{
.val = un.payload,
.alloc = alloc,
} };
},
else => unreachable,
}
},
};
 
if (ptr.byte_offset == 0) {
if (store_ty.zigTypeTag(zcu) != .Array or array_offset == 0) direct: {
const base_val_ty = switch (base_strat) {
.direct => |direct| direct.val.typeOf(zcu),
.index => |index| index.val.typeOf(zcu).childType(zcu),
.flat_index, .reinterpret => break :direct,
else => unreachable,
};
if (.ok == try sema.coerceInMemoryAllowed(
block,
base_val_ty,
store_ty,
true,
zcu.getTarget(),
src,
src,
)) {
// The base strategy already gets us a value which the desired type is IMC to.
return base_strat;
}
}
}
 
restructure_array: {
// We might also be changing the length of an array, or restructuring it.
// e.g. [1][2][3]T -> [3][2]T.
// This case is important because it's permitted for types with ill-defined layouts.
 
const store_one_ty, const store_count = store_ty.arrayBase(zcu);
const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
if (try sema.typeRequiresComptime(store_one_ty)) break :restructure_array;
const elem_len = try sema.typeAbiSize(store_one_ty);
if (ptr.byte_offset % elem_len != 0) break :restructure_array;
break :idx @divExact(ptr.byte_offset, elem_len);
};
 
const base_val, const base_elem_offset, const oob_ty = switch (base_strat) {
.direct => |direct| .{ direct.val, 0, direct.val.typeOf(zcu) },
.index => |index| restructure_info: {
const elem_ty = index.val.typeOf(zcu).childType(zcu);
const elem_off = elem_ty.arrayBase(zcu)[1] * index.elem_index;
break :restructure_info .{ index.val, elem_off, elem_ty };
},
.flat_index => |flat| .{ flat.val, flat.flat_elem_index, flat.val.typeOf(zcu) },
.reinterpret => break :restructure_array,
else => unreachable,
};
const val_one_ty, const val_count = base_val.typeOf(zcu).arrayBase(zcu);
if (.ok != try sema.coerceInMemoryAllowed(block, val_one_ty, store_one_ty, true, zcu.getTarget(), src, src)) {
break :restructure_array;
}
if (base_elem_offset + extra_base_index + store_count > val_count) return .{ .out_of_bounds = oob_ty };
 
if (store_ty.zigTypeTag(zcu) == .Array) {
const skip = store_ty.childType(zcu).arrayBase(zcu)[1] * array_offset;
return .{ .flat_index = .{
.alloc = base_strat.alloc(),
.val = base_val,
.flat_elem_index = skip + base_elem_offset + extra_base_index,
} };
}
 
// `base_val` must be an array, since otherwise the "direct reinterpret" logic above noticed it.
assert(base_val.typeOf(zcu).zigTypeTag(zcu) == .Array);
 
var index: u64 = base_elem_offset + extra_base_index;
const arr_val, const arr_index = (try recursiveIndex(sema, base_val, &index)).?;
return .{ .index = .{
.alloc = base_strat.alloc(),
.val = arr_val,
.elem_index = arr_index,
} };
}
 
// We need to reinterpret memory, which is only possible if neither the store
// type nor the type of the base value have an ill-defined layout.
 
if (!store_ty.hasWellDefinedLayout(zcu)) {
return .{ .needed_well_defined = store_ty };
}
 
var cur_val: *MutableValue, var cur_offset: u64 = switch (base_strat) {
.direct => |direct| .{ direct.val, 0 },
// It's okay to do `abiSize` - the comptime-only case will be caught below.
.index => |index| .{ index.val, index.elem_index * try sema.typeAbiSize(index.val.typeOf(zcu).childType(zcu)) },
.flat_index => |flat_index| .{
flat_index.val,
// It's okay to do `abiSize` - the comptime-only case will be caught below.
flat_index.flat_elem_index * try sema.typeAbiSize(flat_index.val.typeOf(zcu).arrayBase(zcu)[0]),
},
.reinterpret => |r| .{ r.val, r.byte_offset },
else => unreachable,
};
cur_offset += ptr.byte_offset;
 
if (!cur_val.typeOf(zcu).hasWellDefinedLayout(zcu)) {
return .{ .needed_well_defined = cur_val.typeOf(zcu) };
}
 
if (store_ty.zigTypeTag(zcu) == .Array and array_offset > 0) {
cur_offset += try sema.typeAbiSize(store_ty.childType(zcu)) * array_offset;
}
 
const need_bytes = try sema.typeAbiSize(store_ty);
 
if (cur_offset + need_bytes > try sema.typeAbiSize(cur_val.typeOf(zcu))) {
return .{ .out_of_bounds = cur_val.typeOf(zcu) };
}
 
// In the worst case, we can reinterpret the entire value - however, that's
// pretty wasteful. If the memory region we're interested in refers to one
// field or array element, let's just look at that.
while (true) {
const cur_ty = cur_val.typeOf(zcu);
switch (cur_ty.zigTypeTag(zcu)) {
.NoReturn,
.Type,
.ComptimeInt,
.ComptimeFloat,
.Null,
.Undefined,
.EnumLiteral,
.Opaque,
.Fn,
.ErrorUnion,
=> unreachable, // ill-defined layout
.Int,
.Float,
.Bool,
.Void,
.Pointer,
.ErrorSet,
.AnyFrame,
.Frame,
.Enum,
.Vector,
=> break, // terminal types (no sub-values)
.Optional => break, // this can only be a pointer-like optional so is terminal
.Array => {
const elem_ty = cur_ty.childType(zcu);
const elem_size = try sema.typeAbiSize(elem_ty);
const elem_idx = cur_offset / elem_size;
const next_elem_off = elem_size * (elem_idx + 1);
if (cur_offset + need_bytes <= next_elem_off) {
// We can look at a single array element.
cur_val = try cur_val.elem(zcu, sema.arena, @intCast(elem_idx));
cur_offset -= elem_idx * elem_size;
} else {
break;
}
},
.Struct => switch (cur_ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu));
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
cur_val = try cur_val.elem(zcu, sema.arena, field_idx);
cur_offset -= start_off;
break;
}
} else break, // pointer spans multiple fields
},
.Union => switch (cur_ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => {
// TODO: we have to let bitcast logic handle this for now.
// Otherwise, we might traverse into a union field which doesn't allow pointers.
// Figure out a solution!
if (true) break;
try cur_val.unintern(zcu, sema.arena, false, false);
const payload = switch (cur_val.*) {
.un => |un| un.payload,
else => unreachable,
};
// The payload always has offset 0. If it's big enough
// to represent the whole load type, we can use it.
if (try sema.typeAbiSize(payload.typeOf(zcu)) >= need_bytes) {
cur_val = payload;
} else {
break;
}
},
},
}
}
 
// Fast path: check again if we're now at the type we want to store.
// If so, we can use the `direct` strategy.
if (cur_offset == 0 and cur_val.typeOf(zcu).toIntern() == store_ty.toIntern()) {
return .{ .direct = .{
.alloc = base_strat.alloc(),
.val = cur_val,
} };
}
 
return .{ .reinterpret = .{
.alloc = base_strat.alloc(),
.val = cur_val,
.byte_offset = cur_offset,
} };
}
 
/// Given a potentially-nested array value, recursively flatten all of its elements into the given
/// output array. The result can be used by `unflattenArray` to restructure array values.
fn flattenArray(
sema: *Sema,
val: MutableValue,
skip: *u64,
next_idx: *u64,
out: []InternPool.Index,
) Allocator.Error!void {
if (next_idx.* == out.len) return;
 
const zcu = sema.mod;
 
const ty = val.typeOf(zcu);
const base_elem_count = ty.arrayBase(zcu)[1];
if (skip.* >= base_elem_count) {
skip.* -= base_elem_count;
return;
}
 
if (ty.zigTypeTag(zcu) != .Array) {
out[@intCast(next_idx.*)] = (try val.intern(zcu, sema.arena)).toIntern();
next_idx.* += 1;
return;
}
 
const arr_base_elem_count = ty.childType(zcu).arrayBase(zcu)[1];
for (0..@intCast(ty.arrayLen(zcu))) |elem_idx| {
// Optimization: the `getElem` here may be expensive since we might intern an
// element of the `bytes` representation, so avoid doing it unnecessarily.
if (next_idx.* == out.len) return;
if (skip.* >= arr_base_elem_count) {
skip.* -= arr_base_elem_count;
continue;
}
try flattenArray(sema, try val.getElem(zcu, elem_idx), skip, next_idx, out);
}
if (ty.sentinel(zcu)) |s| {
try flattenArray(sema, .{ .interned = s.toIntern() }, skip, next_idx, out);
}
}
 
/// Given a sequence of non-array elements, "unflatten" them into the given array type.
/// Asserts that values of `elems` are in-memory coercible to the array base type of `ty`.
fn unflattenArray(
sema: *Sema,
ty: Type,
elems: []const InternPool.Index,
next_idx: *u64,
) Allocator.Error!Value {
const zcu = sema.mod;
const arena = sema.arena;
 
if (ty.zigTypeTag(zcu) != .Array) {
const val = Value.fromInterned(elems[@intCast(next_idx.*)]);
next_idx.* += 1;
return zcu.getCoerced(val, ty);
}
 
const elem_ty = ty.childType(zcu);
const buf = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(zcu)));
for (buf) |*elem| {
elem.* = (try unflattenArray(sema, elem_ty, elems, next_idx)).toIntern();
}
if (ty.sentinel(zcu) != null) {
// TODO: validate sentinel
_ = try unflattenArray(sema, elem_ty, elems, next_idx);
}
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = buf },
} }));
}
 
/// Given a `MutableValue` representing a potentially-nested array, treats `index` as an index into
/// the array's base type. For instance, given a [3][3]T, the index 5 represents 'val[1][2]'.
/// The final level of array is not dereferenced. This allows use sites to use `setElem` to prevent
/// unnecessary `MutableValue` representation changes.
fn recursiveIndex(
sema: *Sema,
mv: *MutableValue,
index: *u64,
) !?struct { *MutableValue, u64 } {
const zcu = sema.mod;
 
const ty = mv.typeOf(zcu);
assert(ty.zigTypeTag(zcu) == .Array);
 
const ty_base_elems = ty.arrayBase(zcu)[1];
if (index.* >= ty_base_elems) {
index.* -= ty_base_elems;
return null;
}
 
const elem_ty = ty.childType(zcu);
if (elem_ty.zigTypeTag(zcu) != .Array) {
assert(index.* < ty.arrayLenIncludingSentinel(zcu)); // should be handled by initial check
return .{ mv, index.* };
}
 
for (0..@intCast(ty.arrayLenIncludingSentinel(zcu))) |elem_index| {
if (try recursiveIndex(sema, try mv.elem(zcu, sema.arena, elem_index), index)) |result| {
return result;
}
}
unreachable; // should be handled by initial check
}
 
fn checkComptimeVarStore(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
alloc_index: ComptimeAllocIndex,
) !void {
const runtime_index = sema.getComptimeAlloc(alloc_index).runtime_index;
if (@intFromEnum(runtime_index) < @intFromEnum(block.runtime_index)) {
if (block.runtime_cond) |cond_src| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "store to comptime variable depends on runtime condition", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNoteNonLazy(cond_src, msg, "runtime condition here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
if (block.runtime_loop) |loop_src| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cannot store to comptime variable in non-inline loop", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNoteNonLazy(loop_src, msg, "non-inline loop here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
unreachable;
}
}
 
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const LazySrcLoc = std.zig.LazySrcLoc;
 
const InternPool = @import("../InternPool.zig");
const ComptimeAllocIndex = InternPool.ComptimeAllocIndex;
const Sema = @import("../Sema.zig");
const Block = Sema.Block;
const MutableValue = @import("../mutable_value.zig").MutableValue;
const Type = @import("../type.zig").Type;
const Value = @import("../Value.zig");
 
src/Value.zig added: 4888, removed: 2579, total 2309
@@ -39,10 +39,11 @@ pub fn fmtDebug(val: Value) std.fmt.Formatter(dump) {
return .{ .data = val };
}
 
pub fn fmtValue(val: Value, mod: *Module) std.fmt.Formatter(print_value.format) {
pub fn fmtValue(val: Value, mod: *Module, opt_sema: ?*Sema) std.fmt.Formatter(print_value.format) {
return .{ .data = .{
.val = val,
.mod = mod,
.opt_sema = opt_sema,
} };
}
 
@@ -246,18 +247,13 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64
else
Type.fromInterned(ty).abiSize(mod),
},
.ptr => |ptr| switch (ptr.addr) {
.int => |int| Value.fromInterned(int).getUnsignedIntAdvanced(mod, opt_sema),
.elem => |elem| {
const base_addr = (try Value.fromInterned(elem.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null;
const elem_ty = Value.fromInterned(elem.base).typeOf(mod).elemType2(mod);
return base_addr + elem.index * elem_ty.abiSize(mod);
},
.ptr => |ptr| switch (ptr.base_addr) {
.int => ptr.byte_offset,
.field => |field| {
const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null;
const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod);
if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty);
return base_addr + struct_ty.structFieldOffset(@intCast(field.index), mod);
return base_addr + struct_ty.structFieldOffset(@intCast(field.index), mod) + ptr.byte_offset;
},
else => null,
},
@@ -309,11 +305,11 @@ pub fn toBool(val: Value) bool {
fn ptrHasIntAddr(val: Value, mod: *Module) bool {
var check = val;
while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| switch (ptr.base_addr) {
.decl, .comptime_alloc, .comptime_field, .anon_decl => return false,
.int => return true,
.eu_payload, .opt_payload => |base| check = Value.fromInterned(base),
.elem, .field => |base_index| check = Value.fromInterned(base_index.base),
.arr_elem, .field => |base_index| check = Value.fromInterned(base_index.base),
},
else => unreachable,
};
@@ -473,7 +469,9 @@ pub fn writeToPackedMemory(
const endian = target.cpu.arch.endian();
if (val.isUndef(mod)) {
const bit_size: usize = @intCast(ty.bitSize(mod));
std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian);
if (bit_size != 0) {
std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian);
}
return;
}
switch (ty.zigTypeTag(mod)) {
@@ -731,7 +729,8 @@ pub fn readFromMemory(
const int_val = try readFromMemory(Type.usize, mod, buffer, arena);
return Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = ty.toIntern(),
.addr = .{ .int = int_val.toIntern() },
.base_addr = .int,
.byte_offset = int_val.toUnsignedInt(mod),
} })));
},
.Optional => {
@@ -869,12 +868,25 @@ pub fn readFromPackedMemory(
},
.Pointer => {
assert(!ty.isSlice(mod)); // No well defined layout.
return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena);
const int_val = try readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena);
return Value.fromInterned(try mod.intern(.{ .ptr = .{
.ty = ty.toIntern(),
.base_addr = .int,
.byte_offset = int_val.toUnsignedInt(mod),
} }));
},
.Optional => {
assert(ty.isPtrLikeOptional(mod));
const child = ty.optionalChild(mod);
return readFromPackedMemory(child, mod, buffer, bit_offset, arena);
const child_ty = ty.optionalChild(mod);
const child_val = try readFromPackedMemory(child_ty, mod, buffer, bit_offset, arena);
return Value.fromInterned(try mod.intern(.{ .opt = .{
.ty = ty.toIntern(),
.val = switch (child_val.orderAgainstZero(mod)) {
.lt => unreachable,
.eq => .none,
.gt => child_val.toIntern(),
},
} }));
},
else => @panic("TODO implement readFromPackedMemory for more types"),
}
@@ -983,16 +995,17 @@ pub fn intBitCountTwosComp(self: Value, mod: *Module) usize {
 
/// Converts an integer or a float to a float. May result in a loss of information.
/// Caller can find out by equality checking the result against the operand.
pub fn floatCast(self: Value, dest_ty: Type, mod: *Module) !Value {
const target = mod.getTarget();
return Value.fromInterned((try mod.intern(.{ .float = .{
pub fn floatCast(val: Value, dest_ty: Type, zcu: *Zcu) !Value {
const target = zcu.getTarget();
if (val.isUndef(zcu)) return zcu.undefValue(dest_ty);
return Value.fromInterned((try zcu.intern(.{ .float = .{
.ty = dest_ty.toIntern(),
.storage = switch (dest_ty.floatBits(target)) {
16 => .{ .f16 = self.toFloat(f16, mod) },
32 => .{ .f32 = self.toFloat(f32, mod) },
64 => .{ .f64 = self.toFloat(f64, mod) },
80 => .{ .f80 = self.toFloat(f80, mod) },
128 => .{ .f128 = self.toFloat(f128, mod) },
16 => .{ .f16 = val.toFloat(f16, zcu) },
32 => .{ .f32 = val.toFloat(f32, zcu) },
64 => .{ .f64 = val.toFloat(f64, zcu) },
80 => .{ .f80 = val.toFloat(f80, zcu) },
128 => .{ .f128 = val.toFloat(f128, zcu) },
else => unreachable,
},
} })));
@@ -1021,14 +1034,9 @@ pub fn orderAgainstZeroAdvanced(
.bool_false => .eq,
.bool_true => .gt,
else => switch (mod.intern_pool.indexToKey(lhs.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| if (ptr.byte_offset > 0) .gt else switch (ptr.base_addr) {
.decl, .comptime_alloc, .comptime_field => .gt,
.int => |int| Value.fromInterned(int).orderAgainstZeroAdvanced(mod, opt_sema),
.elem => |elem| switch (try Value.fromInterned(elem.base).orderAgainstZeroAdvanced(mod, opt_sema)) {
.lt => unreachable,
.gt => .gt,
.eq => if (elem.index == 0) .eq else .gt,
},
.int => .eq,
else => unreachable,
},
.int => |int| switch (int.storage) {
@@ -1158,6 +1166,7 @@ pub fn compareScalar(
 
/// Asserts the value is comparable.
/// For vectors, returns true if comparison is true for ALL elements.
/// Returns `false` if the value or any vector element is undefined.
///
/// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)`
pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool {
@@ -1200,6 +1209,7 @@ pub fn compareAllWithZeroAdvancedExtra(
} else true,
.repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema),
},
.undef => return false,
else => {},
}
return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op);
@@ -1217,14 +1227,14 @@ pub fn canMutateComptimeVarState(val: Value, zcu: *Zcu) bool {
.err_name => false,
.payload => |payload| Value.fromInterned(payload).canMutateComptimeVarState(zcu),
},
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| switch (ptr.base_addr) {
.decl => false, // The value of a Decl can never reference a comptime alloc.
.int => false,
.comptime_alloc => true, // A comptime alloc is either mutable or references comptime-mutable memory.
.comptime_field => true, // Comptime field pointers are comptime-mutable, albeit only to the "correct" value.
.eu_payload, .opt_payload => |base| Value.fromInterned(base).canMutateComptimeVarState(zcu),
.anon_decl => |anon_decl| Value.fromInterned(anon_decl.val).canMutateComptimeVarState(zcu),
.elem, .field => |base_index| Value.fromInterned(base_index.base).canMutateComptimeVarState(zcu),
.arr_elem, .field => |base_index| Value.fromInterned(base_index.base).canMutateComptimeVarState(zcu),
},
.slice => |slice| return Value.fromInterned(slice.ptr).canMutateComptimeVarState(zcu),
.opt => |opt| switch (opt.val) {
@@ -1247,10 +1257,10 @@ pub fn pointerDecl(val: Value, mod: *Module) ?InternPool.DeclIndex {
.variable => |variable| variable.decl,
.extern_func => |extern_func| extern_func.decl,
.func => |func| func.owner_decl,
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.decl => |decl| decl,
else => null,
},
} else null,
else => null,
};
}
@@ -1386,44 +1396,6 @@ pub fn unionValue(val: Value, mod: *Module) Value {
};
}
 
/// Returns a pointer to the element value at the index.
pub fn elemPtr(
val: Value,
elem_ptr_ty: Type,
index: usize,
mod: *Module,
) Allocator.Error!Value {
const elem_ty = elem_ptr_ty.childType(mod);
const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) {
.slice => |slice| Value.fromInterned(slice.ptr),
else => val,
};
switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.elem => |elem| if (Value.fromInterned(elem.base).typeOf(mod).elemType2(mod).eql(elem_ty, mod))
return Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = elem_ptr_ty.toIntern(),
.addr = .{ .elem = .{
.base = elem.base,
.index = elem.index + index,
} },
} }))),
else => {},
},
else => {},
}
var ptr_ty_key = mod.intern_pool.indexToKey(elem_ptr_ty.toIntern()).ptr_type;
assert(ptr_ty_key.flags.size != .Slice);
ptr_ty_key.flags.size = .Many;
return Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = elem_ptr_ty.toIntern(),
.addr = .{ .elem = .{
.base = (try mod.getCoerced(ptr_val, try mod.ptrType(ptr_ty_key))).toIntern(),
.index = index,
} },
} })));
}
 
pub fn isUndef(val: Value, mod: *Module) bool {
return mod.intern_pool.isUndef(val.toIntern());
}
@@ -1444,11 +1416,8 @@ pub fn isNull(val: Value, mod: *Module) bool {
.null_value => true,
else => return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => unreachable,
.ptr => |ptr| switch (ptr.addr) {
.int => {
var buf: BigIntSpace = undefined;
return val.toBigInt(&buf, mod).eqlZero();
},
.ptr => |ptr| switch (ptr.base_addr) {
.int => ptr.byte_offset == 0,
else => false,
},
.opt => |opt| opt.val == .none,
@@ -1725,6 +1694,13 @@ pub fn intMulWithOverflowScalar(
) !OverflowArithmeticResult {
const info = ty.intInfo(mod);
 
if (lhs.isUndef(mod) or rhs.isUndef(mod)) {
return .{
.overflow_bit = try mod.undefValue(Type.u1),
.wrapped_result = try mod.undefValue(ty),
};
}
 
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
@@ -1941,16 +1917,29 @@ pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *
}
 
/// operands must be integers; handles undefined.
pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, zcu: *Zcu) !Value {
// If one operand is defined, we turn the other into `0xAA` so the bitwise AND can
// still zero out some bits.
// TODO: ideally we'd still like tracking for the undef bits. Related: #19634.
const lhs: Value, const rhs: Value = make_defined: {
const lhs_undef = orig_lhs.isUndef(zcu);
const rhs_undef = orig_rhs.isUndef(zcu);
break :make_defined switch ((@as(u2, @intFromBool(lhs_undef)) << 1) | @intFromBool(rhs_undef)) {
0b00 => .{ orig_lhs, orig_rhs },
0b01 => .{ orig_lhs, try intValueAa(ty, arena, zcu) },
0b10 => .{ try intValueAa(ty, arena, zcu), orig_rhs },
0b11 => return zcu.undefValue(ty),
};
};
 
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() and rhs.toBool());
 
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
const limbs = try arena.alloc(
std.math.big.Limb,
// + 1 for negatives
@@ -1958,7 +1947,25 @@ pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod:
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitAnd(lhs_bigint, rhs_bigint);
return mod.intValue_big(ty, result_bigint.toConst());
return zcu.intValue_big(ty, result_bigint.toConst());
}
 
/// Given an integer or boolean type, creates an value of that with the bit pattern 0xAA.
/// This is used to convert undef values into 0xAA when performing e.g. bitwise operations.
fn intValueAa(ty: Type, arena: Allocator, zcu: *Zcu) !Value {
if (ty.toIntern() == .bool_type) return Value.true;
const info = ty.intInfo(zcu);
 
const buf = try arena.alloc(u8, (info.bits + 7) / 8);
@memset(buf, 0xAA);
 
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.readTwosComplement(buf, info.bits, zcu.getTarget().cpu.arch.endian(), info.signedness);
return zcu.intValue_big(ty, result_bigint.toConst());
}
 
/// operands must be (vectors of) integers; handles undefined scalars.
@@ -2008,23 +2015,36 @@ pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *M
}
 
/// operands must be integers; handles undefined.
pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, zcu: *Zcu) !Value {
// If one operand is defined, we turn the other into `0xAA` so the bitwise AND can
// still zero out some bits.
// TODO: ideally we'd still like tracking for the undef bits. Related: #19634.
const lhs: Value, const rhs: Value = make_defined: {
const lhs_undef = orig_lhs.isUndef(zcu);
const rhs_undef = orig_rhs.isUndef(zcu);
break :make_defined switch ((@as(u2, @intFromBool(lhs_undef)) << 1) | @intFromBool(rhs_undef)) {
0b00 => .{ orig_lhs, orig_rhs },
0b01 => .{ orig_lhs, try intValueAa(ty, arena, zcu) },
0b10 => .{ try intValueAa(ty, arena, zcu), orig_rhs },
0b11 => return zcu.undefValue(ty),
};
};
 
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() or rhs.toBool());
 
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
const limbs = try arena.alloc(
std.math.big.Limb,
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitOr(lhs_bigint, rhs_bigint);
return mod.intValue_big(ty, result_bigint.toConst());
return zcu.intValue_big(ty, result_bigint.toConst());
}
 
/// operands must be (vectors of) integers; handles undefined scalars.
@@ -2439,12 +2459,14 @@ pub fn intTruncScalar(
allocator: Allocator,
signedness: std.builtin.Signedness,
bits: u16,
mod: *Module,
zcu: *Zcu,
) !Value {
if (bits == 0) return mod.intValue(ty, 0);
if (bits == 0) return zcu.intValue(ty, 0);
 
if (val.isUndef(zcu)) return zcu.undefValue(ty);
 
var val_space: Value.BigIntSpace = undefined;
const val_bigint = val.toBigInt(&val_space, mod);
const val_bigint = val.toBigInt(&val_space, zcu);
 
const limbs = try allocator.alloc(
std.math.big.Limb,
@@ -2453,7 +2475,7 @@ pub fn intTruncScalar(
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
 
result_bigint.truncate(val_bigint, signedness, bits);
return mod.intValue_big(ty, result_bigint.toConst());
return zcu.intValue_big(ty, result_bigint.toConst());
}
 
pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
@@ -3585,3 +3607,660 @@ pub fn makeBool(x: bool) Value {
}
 
pub const RuntimeIndex = InternPool.RuntimeIndex;
 
/// `parent_ptr` must be a single-pointer to some optional.
/// Returns a pointer to the payload of the optional.
/// This takes a `Sema` because it may need to perform type resolution.
pub fn ptrOptPayload(parent_ptr: Value, sema: *Sema) !Value {
const zcu = sema.mod;
 
const parent_ptr_ty = parent_ptr.typeOf(zcu);
const opt_ty = parent_ptr_ty.childType(zcu);
 
assert(parent_ptr_ty.ptrSize(zcu) == .One);
assert(opt_ty.zigTypeTag(zcu) == .Optional);
 
const result_ty = try sema.ptrType(info: {
var new = parent_ptr_ty.ptrInfo(zcu);
// We can correctly preserve alignment `.none`, since an optional has the same
// natural alignment as its child type.
new.child = opt_ty.childType(zcu).toIntern();
break :info new;
});
 
if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty);
 
if (opt_ty.isPtrLikeOptional(zcu)) {
// Just reinterpret the pointer, since the layout is well-defined
return zcu.getCoerced(parent_ptr, result_ty);
}
 
const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, opt_ty, zcu);
return Value.fromInterned(try zcu.intern(.{ .ptr = .{
.ty = result_ty.toIntern(),
.base_addr = .{ .opt_payload = base_ptr.toIntern() },
.byte_offset = 0,
} }));
}
 
/// `parent_ptr` must be a single-pointer to some error union.
/// Returns a pointer to the payload of the error union.
/// This takes a `Sema` because it may need to perform type resolution.
pub fn ptrEuPayload(parent_ptr: Value, sema: *Sema) !Value {
const zcu = sema.mod;
 
const parent_ptr_ty = parent_ptr.typeOf(zcu);
const eu_ty = parent_ptr_ty.childType(zcu);
 
assert(parent_ptr_ty.ptrSize(zcu) == .One);
assert(eu_ty.zigTypeTag(zcu) == .ErrorUnion);
 
const result_ty = try sema.ptrType(info: {
var new = parent_ptr_ty.ptrInfo(zcu);
// We can correctly preserve alignment `.none`, since an error union has a
// natural alignment greater than or equal to that of its payload type.
new.child = eu_ty.errorUnionPayload(zcu).toIntern();
break :info new;
});
 
if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty);
 
const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, eu_ty, zcu);
return Value.fromInterned(try zcu.intern(.{ .ptr = .{
.ty = result_ty.toIntern(),
.base_addr = .{ .eu_payload = base_ptr.toIntern() },
.byte_offset = 0,
} }));
}
 
/// `parent_ptr` must be a single-pointer to a struct, union, or slice.
/// Returns a pointer to the aggregate field at the specified index.
/// For slices, uses `slice_ptr_index` and `slice_len_index`.
/// This takes a `Sema` because it may need to perform type resolution.
pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
const zcu = sema.mod;
 
const parent_ptr_ty = parent_ptr.typeOf(zcu);
const aggregate_ty = parent_ptr_ty.childType(zcu);
 
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
assert(parent_ptr_info.flags.size == .One);
 
// Exiting this `switch` indicates that the `field` pointer repsentation should be used.
// `field_align` may be `.none` to represent the natural alignment of `field_ty`, but is not necessarily.
const field_ty: Type, const field_align: InternPool.Alignment = switch (aggregate_ty.zigTypeTag(zcu)) {
.Struct => field: {
const field_ty = aggregate_ty.structFieldType(field_idx, zcu);
switch (aggregate_ty.containerLayout(zcu)) {
.auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) },
.@"extern" => {
// Well-defined layout, so just offset the pointer appropriately.
const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu);
const field_align = a: {
const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: {
break :pa try sema.typeAbiAlignment(aggregate_ty);
} else parent_ptr_info.flags.alignment;
break :a InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(byte_off)));
};
const result_ty = try sema.ptrType(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
new.flags.alignment = field_align;
break :info new;
});
return parent_ptr.getOffsetPtr(byte_off, result_ty, zcu);
},
.@"packed" => switch (aggregate_ty.packedStructFieldPtrInfo(parent_ptr_ty, field_idx, zcu)) {
.bit_ptr => |packed_offset| {
const result_ty = try zcu.ptrType(info: {
var new = parent_ptr_info;
new.packed_offset = packed_offset;
new.child = field_ty.toIntern();
if (new.flags.alignment == .none) {
new.flags.alignment = try sema.typeAbiAlignment(aggregate_ty);
}
break :info new;
});
return zcu.getCoerced(parent_ptr, result_ty);
},
.byte_ptr => |ptr_info| {
const result_ty = try sema.ptrType(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
new.packed_offset = .{
.host_size = 0,
.bit_offset = 0,
};
new.flags.alignment = ptr_info.alignment;
break :info new;
});
return parent_ptr.getOffsetPtr(ptr_info.offset, result_ty, zcu);
},
},
}
},
.Union => field: {
const union_obj = zcu.typeToUnion(aggregate_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]);
switch (aggregate_ty.containerLayout(zcu)) {
.auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) },
.@"extern" => {
// Point to the same address.
const result_ty = try sema.ptrType(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
break :info new;
});
return zcu.getCoerced(parent_ptr, result_ty);
},
.@"packed" => {
// If the field has an ABI size matching its bit size, then we can continue to use a
// non-bit pointer if the parent pointer is also a non-bit pointer.
if (parent_ptr_info.packed_offset.host_size == 0 and try sema.typeAbiSize(field_ty) * 8 == try field_ty.bitSizeAdvanced(zcu, sema)) {
// We must offset the pointer on big-endian targets, since the bits of packed memory don't align nicely.
const byte_offset = switch (zcu.getTarget().cpu.arch.endian()) {
.little => 0,
.big => try sema.typeAbiSize(aggregate_ty) - try sema.typeAbiSize(field_ty),
};
const result_ty = try sema.ptrType(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
new.flags.alignment = InternPool.Alignment.fromLog2Units(
@ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema)).toByteUnits().?),
);
break :info new;
});
return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu);
} else {
// The result must be a bit-pointer if it is not already.
const result_ty = try sema.ptrType(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
if (new.packed_offset.host_size == 0) {
new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, sema)) + 7) / 8);
assert(new.packed_offset.bit_offset == 0);
}
break :info new;
});
return zcu.getCoerced(parent_ptr, result_ty);
}
},
}
},
.Pointer => field_ty: {
assert(aggregate_ty.isSlice(zcu));
break :field_ty switch (field_idx) {
Value.slice_ptr_index => .{ aggregate_ty.slicePtrFieldType(zcu), Type.usize.abiAlignment(zcu) },
Value.slice_len_index => .{ Type.usize, Type.usize.abiAlignment(zcu) },
else => unreachable,
};
},
else => unreachable,
};
 
const new_align: InternPool.Alignment = if (parent_ptr_info.flags.alignment != .none) a: {
const ty_align = try sema.typeAbiAlignment(field_ty);
const true_field_align = if (field_align == .none) ty_align else field_align;
const new_align = true_field_align.min(parent_ptr_info.flags.alignment);
if (new_align == ty_align) break :a .none;
break :a new_align;
} else field_align;
 
const result_ty = try sema.ptrType(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
new.flags.alignment = new_align;
break :info new;
});
 
if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty);
 
const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, aggregate_ty, zcu);
return Value.fromInterned(try zcu.intern(.{ .ptr = .{
.ty = result_ty.toIntern(),
.base_addr = .{ .field = .{
.base = base_ptr.toIntern(),
.index = field_idx,
} },
.byte_offset = 0,
} }));
}
 
/// `orig_parent_ptr` must be either a single-pointer to an array or vector, or a many-pointer or C-pointer or slice.
/// Returns a pointer to the element at the specified index.
/// This takes a `Sema` because it may need to perform type resolution.
pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value {
const zcu = sema.mod;
 
const parent_ptr = switch (orig_parent_ptr.typeOf(zcu).ptrSize(zcu)) {
.One, .Many, .C => orig_parent_ptr,
.Slice => orig_parent_ptr.slicePtr(zcu),
};
 
const parent_ptr_ty = parent_ptr.typeOf(zcu);
const elem_ty = parent_ptr_ty.childType(zcu);
const result_ty = try sema.elemPtrType(parent_ptr_ty, @intCast(field_idx));
 
if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty);
 
if (result_ty.ptrInfo(zcu).packed_offset.host_size != 0) {
// Since we have a bit-pointer, the pointer address should be unchanged.
assert(elem_ty.zigTypeTag(zcu) == .Vector);
return zcu.getCoerced(parent_ptr, result_ty);
}
 
const PtrStrat = union(enum) {
offset: u64,
elem_ptr: Type, // many-ptr elem ty
};
 
const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) {
.One => switch (elem_ty.zigTypeTag(zcu)) {
.Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, sema), 8) },
.Array => strat: {
const arr_elem_ty = elem_ty.childType(zcu);
if (try sema.typeRequiresComptime(arr_elem_ty)) {
break :strat .{ .elem_ptr = arr_elem_ty };
}
break :strat .{ .offset = field_idx * try sema.typeAbiSize(arr_elem_ty) };
},
else => unreachable,
},
 
.Many, .C => if (try sema.typeRequiresComptime(elem_ty))
.{ .elem_ptr = elem_ty }
else
.{ .offset = field_idx * try sema.typeAbiSize(elem_ty) },
 
.Slice => unreachable,
};
 
switch (strat) {
.offset => |byte_offset| {
return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu);
},
.elem_ptr => |manyptr_elem_ty| if (field_idx == 0) {
return zcu.getCoerced(parent_ptr, result_ty);
} else {
const arr_base_ty, const arr_base_len = manyptr_elem_ty.arrayBase(zcu);
const base_idx = arr_base_len * field_idx;
const parent_info = zcu.intern_pool.indexToKey(parent_ptr.toIntern()).ptr;
switch (parent_info.base_addr) {
.arr_elem => |arr_elem| {
if (Value.fromInterned(arr_elem.base).typeOf(zcu).childType(zcu).toIntern() == arr_base_ty.toIntern()) {
// We already have a pointer to an element of an array of this type.
// Just modify the index.
return Value.fromInterned(try zcu.intern(.{ .ptr = ptr: {
var new = parent_info;
new.base_addr.arr_elem.index += base_idx;
new.ty = result_ty.toIntern();
break :ptr new;
} }));
}
},
else => {},
}
const base_ptr = try parent_ptr.canonicalizeBasePtr(.Many, arr_base_ty, zcu);
return Value.fromInterned(try zcu.intern(.{ .ptr = .{
.ty = result_ty.toIntern(),
.base_addr = .{ .arr_elem = .{
.base = base_ptr.toIntern(),
.index = base_idx,
} },
.byte_offset = 0,
} }));
},
}
}
 
fn canonicalizeBasePtr(base_ptr: Value, want_size: std.builtin.Type.Pointer.Size, want_child: Type, zcu: *Zcu) !Value {
const ptr_ty = base_ptr.typeOf(zcu);
const ptr_info = ptr_ty.ptrInfo(zcu);
 
if (ptr_info.flags.size == want_size and
ptr_info.child == want_child.toIntern() and
!ptr_info.flags.is_const and
!ptr_info.flags.is_volatile and
!ptr_info.flags.is_allowzero and
ptr_info.sentinel == .none and
ptr_info.flags.alignment == .none)
{
// Already canonical!
return base_ptr;
}
 
const new_ty = try zcu.ptrType(.{
.child = want_child.toIntern(),
.sentinel = .none,
.flags = .{
.size = want_size,
.alignment = .none,
.is_const = false,
.is_volatile = false,
.is_allowzero = false,
.address_space = ptr_info.flags.address_space,
},
});
return zcu.getCoerced(base_ptr, new_ty);
}
 
pub fn getOffsetPtr(ptr_val: Value, byte_off: u64, new_ty: Type, zcu: *Zcu) !Value {
if (ptr_val.isUndef(zcu)) return ptr_val;
var ptr = zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
ptr.ty = new_ty.toIntern();
ptr.byte_offset += byte_off;
return Value.fromInterned(try zcu.intern(.{ .ptr = ptr }));
}
 
pub const PointerDeriveStep = union(enum) {
int: struct {
addr: u64,
ptr_ty: Type,
},
decl_ptr: InternPool.DeclIndex,
anon_decl_ptr: InternPool.Key.Ptr.BaseAddr.AnonDecl,
comptime_alloc_ptr: struct {
val: Value,
ptr_ty: Type,
},
comptime_field_ptr: Value,
eu_payload_ptr: struct {
parent: *PointerDeriveStep,
/// This type will never be cast: it is provided for convenience.
result_ptr_ty: Type,
},
opt_payload_ptr: struct {
parent: *PointerDeriveStep,
/// This type will never be cast: it is provided for convenience.
result_ptr_ty: Type,
},
field_ptr: struct {
parent: *PointerDeriveStep,
field_idx: u32,
/// This type will never be cast: it is provided for convenience.
result_ptr_ty: Type,
},
elem_ptr: struct {
parent: *PointerDeriveStep,
elem_idx: u64,
/// This type will never be cast: it is provided for convenience.
result_ptr_ty: Type,
},
offset_and_cast: struct {
parent: *PointerDeriveStep,
byte_offset: u64,
new_ptr_ty: Type,
},
 
pub fn ptrType(step: PointerDeriveStep, zcu: *Zcu) !Type {
return switch (step) {
.int => |int| int.ptr_ty,
.decl_ptr => |decl| try zcu.declPtr(decl).declPtrType(zcu),
.anon_decl_ptr => |ad| Type.fromInterned(ad.orig_ty),
.comptime_alloc_ptr => |info| info.ptr_ty,
.comptime_field_ptr => |val| try zcu.singleConstPtrType(val.typeOf(zcu)),
.offset_and_cast => |oac| oac.new_ptr_ty,
inline .eu_payload_ptr, .opt_payload_ptr, .field_ptr, .elem_ptr => |x| x.result_ptr_ty,
};
}
};
 
pub fn pointerDerivation(ptr_val: Value, arena: Allocator, zcu: *Zcu) Allocator.Error!PointerDeriveStep {
return ptr_val.pointerDerivationAdvanced(arena, zcu, null) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.AnalysisFail,
error.NeededSourceLocation,
error.GenericPoison,
error.ComptimeReturn,
error.ComptimeBreak,
=> unreachable,
};
}
 
/// Given a pointer value, get the sequence of steps to derive it, ideally by taking
/// only field and element pointers with no casts. This can be used by codegen backends
/// which prefer field/elem accesses when lowering constant pointer values.
/// It is also used by the Value printing logic for pointers.
pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, opt_sema: ?*Sema) !PointerDeriveStep {
const ptr = zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
const base_derive: PointerDeriveStep = switch (ptr.base_addr) {
.int => return .{ .int = .{
.addr = ptr.byte_offset,
.ptr_ty = Type.fromInterned(ptr.ty),
} },
.decl => |decl| .{ .decl_ptr = decl },
.anon_decl => |ad| base: {
// A slight tweak: `orig_ty` here is sometimes not `const`, but it ought to be.
// TODO: fix this in the sites interning anon decls!
const const_ty = try zcu.ptrType(info: {
var info = Type.fromInterned(ad.orig_ty).ptrInfo(zcu);
info.flags.is_const = true;
break :info info;
});
break :base .{ .anon_decl_ptr = .{
.val = ad.val,
.orig_ty = const_ty.toIntern(),
} };
},
.comptime_alloc => |idx| base: {
const alloc = opt_sema.?.getComptimeAlloc(idx);
const val = try alloc.val.intern(zcu, opt_sema.?.arena);
const ty = val.typeOf(zcu);
break :base .{ .comptime_alloc_ptr = .{
.val = val,
.ptr_ty = try zcu.ptrType(.{
.child = ty.toIntern(),
.flags = .{
.alignment = alloc.alignment,
},
}),
} };
},
.comptime_field => |val| .{ .comptime_field_ptr = Value.fromInterned(val) },
.eu_payload => |eu_ptr| base: {
const base_ptr = Value.fromInterned(eu_ptr);
const base_ptr_ty = base_ptr.typeOf(zcu);
const parent_step = try arena.create(PointerDeriveStep);
parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(eu_ptr), arena, zcu, opt_sema);
break :base .{ .eu_payload_ptr = .{
.parent = parent_step,
.result_ptr_ty = try zcu.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).errorUnionPayload(zcu)),
} };
},
.opt_payload => |opt_ptr| base: {
const base_ptr = Value.fromInterned(opt_ptr);
const base_ptr_ty = base_ptr.typeOf(zcu);
const parent_step = try arena.create(PointerDeriveStep);
parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(opt_ptr), arena, zcu, opt_sema);
break :base .{ .opt_payload_ptr = .{
.parent = parent_step,
.result_ptr_ty = try zcu.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).optionalChild(zcu)),
} };
},
.field => |field| base: {
const base_ptr = Value.fromInterned(field.base);
const base_ptr_ty = base_ptr.typeOf(zcu);
const agg_ty = base_ptr_ty.childType(zcu);
const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) {
.Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) },
.Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) },
.Pointer => .{ switch (field.index) {
Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu),
Value.slice_len_index => Type.usize,
else => unreachable,
}, Type.usize.abiAlignment(zcu) },
else => unreachable,
};
const base_align = base_ptr_ty.ptrAlignment(zcu);
const result_align = field_align.minStrict(base_align);
const result_ty = try zcu.ptrType(.{
.child = field_ty.toIntern(),
.flags = flags: {
var flags = base_ptr_ty.ptrInfo(zcu).flags;
if (result_align == field_ty.abiAlignment(zcu)) {
flags.alignment = .none;
} else {
flags.alignment = result_align;
}
break :flags flags;
},
});
const parent_step = try arena.create(PointerDeriveStep);
parent_step.* = try pointerDerivationAdvanced(base_ptr, arena, zcu, opt_sema);
break :base .{ .field_ptr = .{
.parent = parent_step,
.field_idx = @intCast(field.index),
.result_ptr_ty = result_ty,
} };
},
.arr_elem => |arr_elem| base: {
const parent_step = try arena.create(PointerDeriveStep);
parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(arr_elem.base), arena, zcu, opt_sema);
const parent_ptr_info = (try parent_step.ptrType(zcu)).ptrInfo(zcu);
const result_ptr_ty = try zcu.ptrType(.{
.child = parent_ptr_info.child,
.flags = flags: {
var flags = parent_ptr_info.flags;
flags.size = .One;
break :flags flags;
},
});
break :base .{ .elem_ptr = .{
.parent = parent_step,
.elem_idx = arr_elem.index,
.result_ptr_ty = result_ptr_ty,
} };
},
};
 
if (ptr.byte_offset == 0 and ptr.ty == (try base_derive.ptrType(zcu)).toIntern()) {
return base_derive;
}
 
const need_child = Type.fromInterned(ptr.ty).childType(zcu);
if (need_child.comptimeOnly(zcu)) {
// No refinement can happen - this pointer is presumably invalid.
// Just offset it.
const parent = try arena.create(PointerDeriveStep);
parent.* = base_derive;
return .{ .offset_and_cast = .{
.parent = parent,
.byte_offset = ptr.byte_offset,
.new_ptr_ty = Type.fromInterned(ptr.ty),
} };
}
const need_bytes = need_child.abiSize(zcu);
 
var cur_derive = base_derive;
var cur_offset = ptr.byte_offset;
 
// Refine through fields and array elements as much as possible.
 
if (need_bytes > 0) while (true) {
const cur_ty = (try cur_derive.ptrType(zcu)).childType(zcu);
if (cur_ty.toIntern() == need_child.toIntern() and cur_offset == 0) {
break;
}
switch (cur_ty.zigTypeTag(zcu)) {
.NoReturn,
.Type,
.ComptimeInt,
.ComptimeFloat,
.Null,
.Undefined,
.EnumLiteral,
.Opaque,
.Fn,
.ErrorUnion,
.Int,
.Float,
.Bool,
.Void,
.Pointer,
.ErrorSet,
.AnyFrame,
.Frame,
.Enum,
.Vector,
.Optional,
.Union,
=> break,
 
.Array => {
const elem_ty = cur_ty.childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
const start_idx = cur_offset / elem_size;
const end_idx = (cur_offset + need_bytes + elem_size - 1) / elem_size;
if (end_idx == start_idx + 1) {
const parent = try arena.create(PointerDeriveStep);
parent.* = cur_derive;
cur_derive = .{ .elem_ptr = .{
.parent = parent,
.elem_idx = start_idx,
.result_ptr_ty = try zcu.adjustPtrTypeChild(try parent.ptrType(zcu), elem_ty),
} };
cur_offset -= start_idx * elem_size;
} else {
// Go into the first element if needed, but don't go any deeper.
if (start_idx > 0) {
const parent = try arena.create(PointerDeriveStep);
parent.* = cur_derive;
cur_derive = .{ .elem_ptr = .{
.parent = parent,
.elem_idx = start_idx,
.result_ptr_ty = try zcu.adjustPtrTypeChild(try parent.ptrType(zcu), elem_ty),
} };
cur_offset -= start_idx * elem_size;
}
break;
}
},
.Struct => switch (cur_ty.containerLayout(zcu)) {
.auto, .@"packed" => break,
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
const field_ty = cur_ty.structFieldType(field_idx, zcu);
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
const end_off = start_off + field_ty.abiSize(zcu);
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
const old_ptr_ty = try cur_derive.ptrType(zcu);
const parent_align = old_ptr_ty.ptrAlignment(zcu);
const field_align = InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(start_off)));
const parent = try arena.create(PointerDeriveStep);
parent.* = cur_derive;
const new_ptr_ty = try zcu.ptrType(.{
.child = field_ty.toIntern(),
.flags = flags: {
var flags = old_ptr_ty.ptrInfo(zcu).flags;
if (field_align == field_ty.abiAlignment(zcu)) {
flags.alignment = .none;
} else {
flags.alignment = field_align;
}
break :flags flags;
},
});
cur_derive = .{ .field_ptr = .{
.parent = parent,
.field_idx = @intCast(field_idx),
.result_ptr_ty = new_ptr_ty,
} };
cur_offset -= start_off;
break;
}
} else break, // pointer spans multiple fields
},
}
};
 
if (cur_offset == 0 and (try cur_derive.ptrType(zcu)).toIntern() == ptr.ty) {
return cur_derive;
}
 
const parent = try arena.create(PointerDeriveStep);
parent.* = cur_derive;
return .{ .offset_and_cast = .{
.parent = parent,
.byte_offset = cur_offset,
.new_ptr_ty = Type.fromInterned(ptr.ty),
} };
}
 
src/arch/wasm/CodeGen.zig added: 4888, removed: 2579, total 2309
@@ -2206,7 +2206,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
);
break :blk extern_func.decl;
} else switch (mod.intern_pool.indexToKey(func_val.ip_index)) {
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.decl => |decl| {
_ = try func.bin_file.getOrCreateAtomForDecl(decl);
break :blk decl;
@@ -3058,72 +3058,59 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
return WValue{ .stack = {} };
}
 
fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue {
const mod = func.bin_file.base.comp.module.?;
const ptr = mod.intern_pool.indexToKey(ptr_val.ip_index).ptr;
switch (ptr.addr) {
.decl => |decl_index| {
return func.lowerParentPtrDecl(ptr_val, decl_index, offset);
},
.anon_decl => |ad| return func.lowerAnonDeclRef(ad, offset),
.eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}),
.int => |base| return func.lowerConstant(Value.fromInterned(base), Type.usize),
.opt_payload => |base_ptr| return func.lowerParentPtr(Value.fromInterned(base_ptr), offset),
.comptime_field, .comptime_alloc => unreachable,
.elem => |elem| {
const index = elem.index;
const elem_type = Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod);
const elem_offset = index * elem_type.abiSize(mod);
return func.lowerParentPtr(Value.fromInterned(elem.base), @as(u32, @intCast(elem_offset + offset)));
},
fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue {
const zcu = func.bin_file.base.comp.module.?;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
.decl => |decl| return func.lowerDeclRefValue(decl, @intCast(offset)),
.anon_decl => |ad| return func.lowerAnonDeclRef(ad, @intCast(offset)),
.int => return func.lowerConstant(try zcu.intValue(Type.usize, offset), Type.usize),
.eu_payload => return func.fail("Wasm TODO: lower error union payload pointer", .{}),
.opt_payload => |opt_ptr| return func.lowerPtr(opt_ptr, offset),
.field => |field| {
const parent_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base));
const parent_ty = parent_ptr_ty.childType(mod);
const field_index: u32 = @intCast(field.index);
 
const field_offset = switch (parent_ty.zigTypeTag(mod)) {
.Struct => blk: {
if (mod.typeToPackedStruct(parent_ty)) |struct_type| {
if (Type.fromInterned(ptr.ty).ptrInfo(mod).packed_offset.host_size == 0)
break :blk @divExact(mod.structPackedFieldBitOffset(struct_type, field_index) + parent_ptr_ty.ptrInfo(mod).packed_offset.bit_offset, 8)
else
break :blk 0;
}
break :blk parent_ty.structFieldOffset(field_index, mod);
},
.Union => switch (parent_ty.containerLayout(mod)) {
.@"packed" => 0,
else => blk: {
const layout: Module.UnionLayout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) break :blk 0;
if (layout.payload_align.compare(.gt, layout.tag_align)) break :blk 0;
 
// tag is stored first so calculate offset from where payload starts
break :blk layout.tag_align.forward(layout.tag_size);
},
},
.Pointer => switch (parent_ty.ptrSize(mod)) {
.Slice => switch (field.index) {
0 => 0,
1 => func.ptrSize(),
const base_ptr = Value.fromInterned(field.base);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
const field_off: u64 = switch (base_ty.zigTypeTag(zcu)) {
.Pointer => off: {
assert(base_ty.isSlice(zcu));
break :off switch (field.index) {
Value.slice_ptr_index => 0,
Value.slice_len_index => @divExact(zcu.getTarget().ptrBitWidth(), 8),
else => unreachable,
};
},
.Struct => switch (base_ty.containerLayout(zcu)) {
.auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
.@"extern", .@"packed" => unreachable,
},
.Union => switch (base_ty.containerLayout(zcu)) {
.auto => off: {
// Keep in sync with the `un` case of `generateSymbol`.
const layout = base_ty.unionGetLayout(zcu);
if (layout.payload_size == 0) break :off 0;
if (layout.tag_size == 0) break :off 0;
if (layout.tag_align.compare(.gte, layout.payload_align)) {
// Tag first.
break :off layout.tag_size;
} else {
// Payload first.
break :off 0;
}
},
else => unreachable,
.@"extern", .@"packed" => unreachable,
},
else => unreachable,
};
return func.lowerParentPtr(Value.fromInterned(field.base), @as(u32, @intCast(offset + field_offset)));
return func.lowerPtr(field.base, offset + field_off);
},
}
}
 
fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue {
return func.lowerDeclRefValue(ptr_val, decl_index, offset);
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
};
}
 
fn lowerAnonDeclRef(
func: *CodeGen,
anon_decl: InternPool.Key.Ptr.Addr.AnonDecl,
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
offset: u32,
) InnerError!WValue {
const mod = func.bin_file.base.comp.module.?;
@@ -3153,7 +3140,7 @@ fn lowerAnonDeclRef(
} else return WValue{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } };
}
 
fn lowerDeclRefValue(func: *CodeGen, val: Value, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue {
fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue {
const mod = func.bin_file.base.comp.module.?;
 
const decl = mod.declPtr(decl_index);
@@ -3161,11 +3148,11 @@ fn lowerDeclRefValue(func: *CodeGen, val: Value, decl_index: InternPool.DeclInde
// want to lower the actual decl, rather than the alias itself.
if (decl.val.getFunction(mod)) |func_val| {
if (func_val.owner_decl != decl_index) {
return func.lowerDeclRefValue(val, func_val.owner_decl, offset);
return func.lowerDeclRefValue(func_val.owner_decl, offset);
}
} else if (decl.val.getExternFunc(mod)) |func_val| {
if (func_val.decl != decl_index) {
return func.lowerDeclRefValue(val, func_val.decl, offset);
return func.lowerDeclRefValue(func_val.decl, offset);
}
}
const decl_ty = decl.typeOf(mod);
@@ -3309,23 +3296,16 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
},
.slice => |slice| {
var ptr = ip.indexToKey(slice.ptr).ptr;
const owner_decl = while (true) switch (ptr.addr) {
const owner_decl = while (true) switch (ptr.base_addr) {
.decl => |decl| break decl,
.int, .anon_decl => return func.fail("Wasm TODO: lower slice where ptr is not owned by decl", .{}),
.opt_payload, .eu_payload => |base| ptr = ip.indexToKey(base).ptr,
.elem, .field => |base_index| ptr = ip.indexToKey(base_index.base).ptr,
.comptime_field, .comptime_alloc => unreachable,
.field => |base_index| ptr = ip.indexToKey(base_index.base).ptr,
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
};
return .{ .memory = try func.bin_file.lowerUnnamedConst(val, owner_decl) };
},
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| return func.lowerDeclRefValue(val, decl, 0),
.int => |int| return func.lowerConstant(Value.fromInterned(int), Type.fromInterned(ip.typeOf(int))),
.opt_payload, .elem, .field => return func.lowerParentPtr(val, 0),
.anon_decl => |ad| return func.lowerAnonDeclRef(ad, 0),
.comptime_field, .comptime_alloc => unreachable,
else => return func.fail("Wasm TODO: lowerConstant for other const addr tag {}", .{ptr.addr}),
},
.ptr => return func.lowerPtr(val.toIntern(), 0),
.opt => if (ty.optionalReprIsPayload(mod)) {
const pl_ty = ty.optionalChild(mod);
if (val.optionalValue(mod)) |payload| {
@@ -3435,7 +3415,10 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
.enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod),
.int => |int| intStorageAsI32(int.storage, mod),
.ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod),
.ptr => |ptr| {
assert(ptr.base_addr == .int);
return @intCast(ptr.byte_offset);
},
.err => |err| @as(i32, @bitCast(@as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))),
else => unreachable,
},
 
src/arch/x86_64/CodeGen.zig added: 4888, removed: 2579, total 2309
@@ -12249,10 +12249,10 @@ fn genCall(self: *Self, info: union(enum) {
const func_key = mod.intern_pool.indexToKey(func_value.ip_index);
switch (switch (func_key) {
else => func_key,
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.decl => |decl| mod.intern_pool.indexToKey(mod.declPtr(decl).val.toIntern()),
else => func_key,
},
} else func_key,
}) {
.func => |func| {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
@@ -17877,8 +17877,8 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
 
break :result null;
}) orelse return self.fail("TODO implement airShuffle from {} and {} to {} with {}", .{
lhs_ty.fmt(mod), rhs_ty.fmt(mod), dst_ty.fmt(mod),
Value.fromInterned(extra.mask).fmtValue(mod),
lhs_ty.fmt(mod), rhs_ty.fmt(mod), dst_ty.fmt(mod),
Value.fromInterned(extra.mask).fmtValue(mod, null),
});
return self.finishAir(inst, result, .{ extra.a, extra.b, .none });
}
 
src/codegen.zig added: 4888, removed: 2579, total 2309
@@ -16,7 +16,8 @@ const Compilation = @import("Compilation.zig");
const ErrorMsg = Module.ErrorMsg;
const InternPool = @import("InternPool.zig");
const Liveness = @import("Liveness.zig");
const Module = @import("Module.zig");
const Zcu = @import("Module.zig");
const Module = Zcu;
const Target = std.Target;
const Type = @import("type.zig").Type;
const Value = @import("Value.zig");
@@ -185,7 +186,7 @@ pub fn generateSymbol(
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
 
log.debug("generateSymbol: val = {}", .{val.fmtValue(mod)});
log.debug("generateSymbol: val = {}", .{val.fmtValue(mod, null)});
 
if (val.isUndefDeep(mod)) {
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
@@ -314,7 +315,7 @@ pub fn generateSymbol(
},
.f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)),
},
.ptr => switch (try lowerParentPtr(bin_file, src_loc, val.toIntern(), code, debug_output, reloc_info)) {
.ptr => switch (try lowerPtr(bin_file, src_loc, val.toIntern(), code, debug_output, reloc_info, 0)) {
.ok => {},
.fail => |em| return .{ .fail = em },
},
@@ -614,111 +615,79 @@ pub fn generateSymbol(
return .ok;
}
 
fn lowerParentPtr(
fn lowerPtr(
bin_file: *link.File,
src_loc: Module.SrcLoc,
parent_ptr: InternPool.Index,
ptr_val: InternPool.Index,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
prev_offset: u64,
) CodeGenError!Result {
const mod = bin_file.comp.module.?;
const ip = &mod.intern_pool;
const ptr = ip.indexToKey(parent_ptr).ptr;
return switch (ptr.addr) {
.decl => |decl| try lowerDeclRef(bin_file, src_loc, decl, code, debug_output, reloc_info),
.anon_decl => |ad| try lowerAnonDeclRef(bin_file, src_loc, ad, code, debug_output, reloc_info),
.int => |int| try generateSymbol(bin_file, src_loc, Value.fromInterned(int), code, debug_output, reloc_info),
.eu_payload => |eu_payload| try lowerParentPtr(
const zcu = bin_file.comp.module.?;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
.decl => |decl| try lowerDeclRef(bin_file, src_loc, decl, code, debug_output, reloc_info, offset),
.anon_decl => |ad| try lowerAnonDeclRef(bin_file, src_loc, ad, code, debug_output, reloc_info, offset),
.int => try generateSymbol(bin_file, src_loc, try zcu.intValue(Type.usize, offset), code, debug_output, reloc_info),
.eu_payload => |eu_ptr| try lowerPtr(
bin_file,
src_loc,
eu_payload,
code,
debug_output,
reloc_info.offset(@intCast(errUnionPayloadOffset(
Type.fromInterned(ip.typeOf(eu_payload)),
mod,
))),
),
.opt_payload => |opt_payload| try lowerParentPtr(
bin_file,
src_loc,
opt_payload,
eu_ptr,
code,
debug_output,
reloc_info,
offset + errUnionPayloadOffset(
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu),
zcu,
),
),
.elem => |elem| try lowerParentPtr(
.opt_payload => |opt_ptr| try lowerPtr(
bin_file,
src_loc,
elem.base,
opt_ptr,
code,
debug_output,
reloc_info.offset(@intCast(elem.index *
Type.fromInterned(ip.typeOf(elem.base)).elemType2(mod).abiSize(mod))),
reloc_info,
offset,
),
.field => |field| {
const base_ptr_ty = ip.typeOf(field.base);
const base_ty = ip.indexToKey(base_ptr_ty).ptr_type.child;
return lowerParentPtr(
bin_file,
src_loc,
field.base,
code,
debug_output,
reloc_info.offset(switch (ip.indexToKey(base_ty)) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One, .Many, .C => unreachable,
.Slice => switch (field.index) {
0 => 0,
1 => @divExact(mod.getTarget().ptrBitWidth(), 8),
else => unreachable,
},
},
.struct_type,
.anon_struct_type,
.union_type,
=> switch (Type.fromInterned(base_ty).containerLayout(mod)) {
.auto, .@"extern" => @intCast(Type.fromInterned(base_ty).structFieldOffset(
@intCast(field.index),
mod,
)),
.@"packed" => if (mod.typeToStruct(Type.fromInterned(base_ty))) |struct_obj|
if (Type.fromInterned(ptr.ty).ptrInfo(mod).packed_offset.host_size == 0)
@divExact(Type.fromInterned(base_ptr_ty).ptrInfo(mod)
.packed_offset.bit_offset + mod.structPackedFieldBitOffset(
struct_obj,
@intCast(field.index),
), 8)
else
0
else
0,
},
else => unreachable,
}),
);
const base_ptr = Value.fromInterned(field.base);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
const field_off: u64 = switch (base_ty.zigTypeTag(zcu)) {
.Pointer => off: {
assert(base_ty.isSlice(zcu));
break :off switch (field.index) {
Value.slice_ptr_index => 0,
Value.slice_len_index => @divExact(zcu.getTarget().ptrBitWidth(), 8),
else => unreachable,
};
},
.Struct, .Union => switch (base_ty.containerLayout(zcu)) {
.auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
.@"extern", .@"packed" => unreachable,
},
else => unreachable,
};
return lowerPtr(bin_file, src_loc, field.base, code, debug_output, reloc_info, offset + field_off);
},
.comptime_field, .comptime_alloc => unreachable,
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
};
}
 
const RelocInfo = struct {
parent_atom_index: u32,
addend: ?u32 = null,
 
fn offset(ri: RelocInfo, addend: u32) RelocInfo {
return .{ .parent_atom_index = ri.parent_atom_index, .addend = (ri.addend orelse 0) + addend };
}
};
 
fn lowerAnonDeclRef(
lf: *link.File,
src_loc: Module.SrcLoc,
anon_decl: InternPool.Key.Ptr.Addr.AnonDecl,
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
offset: u64,
) CodeGenError!Result {
_ = debug_output;
const zcu = lf.comp.module.?;
@@ -745,7 +714,7 @@ fn lowerAnonDeclRef(
const vaddr = try lf.getAnonDeclVAddr(decl_val, .{
.parent_atom_index = reloc_info.parent_atom_index,
.offset = code.items.len,
.addend = reloc_info.addend orelse 0,
.addend = @intCast(offset),
});
const endian = target.cpu.arch.endian();
switch (ptr_width_bytes) {
@@ -765,6 +734,7 @@ fn lowerDeclRef(
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
offset: u64,
) CodeGenError!Result {
_ = src_loc;
_ = debug_output;
@@ -783,7 +753,7 @@ fn lowerDeclRef(
const vaddr = try lf.getDeclVAddr(decl_index, .{
.parent_atom_index = reloc_info.parent_atom_index,
.offset = code.items.len,
.addend = reloc_info.addend orelse 0,
.addend = @intCast(offset),
});
const endian = target.cpu.arch.endian();
switch (ptr_width) {
@@ -861,7 +831,7 @@ fn genDeclRef(
const zcu = lf.comp.module.?;
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
log.debug("genDeclRef: val = {}", .{val.fmtValue(zcu)});
log.debug("genDeclRef: val = {}", .{val.fmtValue(zcu, null)});
 
const ptr_decl = zcu.declPtr(ptr_decl_index);
const namespace = zcu.namespacePtr(ptr_decl.src_namespace);
@@ -966,7 +936,7 @@ fn genUnnamedConst(
) CodeGenError!GenResult {
const zcu = lf.comp.module.?;
const gpa = lf.comp.gpa;
log.debug("genUnnamedConst: val = {}", .{val.fmtValue(zcu)});
log.debug("genUnnamedConst: val = {}", .{val.fmtValue(zcu, null)});
 
const local_sym_index = lf.lowerUnnamedConst(val, owner_decl_index) catch |err| {
return GenResult.fail(gpa, src_loc, "lowering unnamed constant failed: {s}", .{@errorName(err)});
@@ -1007,7 +977,7 @@ pub fn genTypedValue(
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
 
log.debug("genTypedValue: val = {}", .{val.fmtValue(zcu)});
log.debug("genTypedValue: val = {}", .{val.fmtValue(zcu, null)});
 
if (val.isUndef(zcu))
return GenResult.mcv(.undef);
@@ -1018,7 +988,7 @@ pub fn genTypedValue(
const ptr_bits = target.ptrBitWidth();
 
if (!ty.isSlice(zcu)) switch (ip.indexToKey(val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.decl => |decl| return genDeclRef(lf, src_loc, val, decl),
else => {},
},
 
src/codegen/c.zig added: 4888, removed: 2579, total 2309
@@ -646,8 +646,7 @@ pub const DeclGen = struct {
fn renderAnonDeclValue(
dg: *DeclGen,
writer: anytype,
ptr_val: Value,
anon_decl: InternPool.Key.Ptr.Addr.AnonDecl,
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
const zcu = dg.zcu;
@@ -657,16 +656,16 @@ pub const DeclGen = struct {
const decl_ty = decl_val.typeOf(zcu);
 
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
const ptr_ty = ptr_val.typeOf(zcu);
const ptr_ty = Type.fromInterned(anon_decl.orig_ty);
if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(zcu)) {
return dg.writeCValue(writer, .{ .undef = ptr_ty });
}
 
// Chase function values in order to be able to reference the original function.
if (decl_val.getFunction(zcu)) |func|
return dg.renderDeclValue(writer, ptr_val, func.owner_decl, location);
return dg.renderDeclValue(writer, func.owner_decl, location);
if (decl_val.getExternFunc(zcu)) |extern_func|
return dg.renderDeclValue(writer, ptr_val, extern_func.decl, location);
return dg.renderDeclValue(writer, extern_func.decl, location);
 
assert(decl_val.getVariable(zcu) == null);
 
@@ -712,7 +711,6 @@ pub const DeclGen = struct {
fn renderDeclValue(
dg: *DeclGen,
writer: anytype,
val: Value,
decl_index: InternPool.DeclIndex,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
@@ -722,17 +720,17 @@ pub const DeclGen = struct {
assert(decl.has_tv);
 
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
const ty = val.typeOf(zcu);
const decl_ty = decl.typeOf(zcu);
if (ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(zcu)) {
return dg.writeCValue(writer, .{ .undef = ty });
const ptr_ty = try decl.declPtrType(zcu);
if (!decl_ty.isFnOrHasRuntimeBits(zcu)) {
return dg.writeCValue(writer, .{ .undef = ptr_ty });
}
 
// Chase function values in order to be able to reference the original function.
if (decl.val.getFunction(zcu)) |func| if (func.owner_decl != decl_index)
return dg.renderDeclValue(writer, val, func.owner_decl, location);
return dg.renderDeclValue(writer, func.owner_decl, location);
if (decl.val.getExternFunc(zcu)) |extern_func| if (extern_func.decl != decl_index)
return dg.renderDeclValue(writer, val, extern_func.decl, location);
return dg.renderDeclValue(writer, extern_func.decl, location);
 
if (decl.val.getVariable(zcu)) |variable| try dg.renderFwdDecl(decl_index, variable, .tentative);
 
@@ -740,7 +738,7 @@ pub const DeclGen = struct {
// them). The analysis until now should ensure that the C function
// pointers are compatible. If they are not, then there is a bug
// somewhere and we should let the C compiler tell us about it.
const ctype = try dg.ctypeFromType(ty, .complete);
const ctype = try dg.ctypeFromType(ptr_ty, .complete);
const elem_ctype = ctype.info(ctype_pool).pointer.elem_ctype;
const decl_ctype = try dg.ctypeFromType(decl_ty, .complete);
const need_cast = !elem_ctype.eql(decl_ctype) and
@@ -755,125 +753,108 @@ pub const DeclGen = struct {
if (need_cast) try writer.writeByte(')');
}
 
/// Renders a "parent" pointer by recursing to the root decl/variable
/// that its contents are defined with respect to.
fn renderParentPtr(
fn renderPointer(
dg: *DeclGen,
writer: anytype,
ptr_val: InternPool.Index,
derivation: Value.PointerDeriveStep,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
const zcu = dg.zcu;
const ip = &zcu.intern_pool;
const ptr_ty = Type.fromInterned(ip.typeOf(ptr_val));
const ptr_ctype = try dg.ctypeFromType(ptr_ty, .complete);
const ptr_child_ctype = ptr_ctype.info(&dg.ctype_pool).pointer.elem_ctype;
const ptr = ip.indexToKey(ptr_val).ptr;
switch (ptr.addr) {
.decl => |d| try dg.renderDeclValue(writer, Value.fromInterned(ptr_val), d, location),
.anon_decl => |anon_decl| try dg.renderAnonDeclValue(writer, Value.fromInterned(ptr_val), anon_decl, location),
switch (derivation) {
.comptime_alloc_ptr, .comptime_field_ptr => unreachable,
.int => |int| {
const ptr_ctype = try dg.ctypeFromType(int.ptr_ty, .complete);
const addr_val = try zcu.intValue(Type.usize, int.addr);
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.print("){x}", .{try dg.fmtIntLiteral(Value.fromInterned(int), .Other)});
try writer.print("){x}", .{try dg.fmtIntLiteral(addr_val, .Other)});
},
.eu_payload, .opt_payload => |base| {
const ptr_base_ty = Type.fromInterned(ip.typeOf(base));
const base_ty = ptr_base_ty.childType(zcu);
// Ensure complete type definition is visible before accessing fields.
_ = try dg.ctypeFromType(base_ty, .complete);
const payload_ty = switch (ptr.addr) {
.eu_payload => base_ty.errorUnionPayload(zcu),
.opt_payload => base_ty.optionalChild(zcu),
else => unreachable,
};
const payload_ctype = try dg.ctypeFromType(payload_ty, .forward);
if (!ptr_child_ctype.eql(payload_ctype)) {
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
}
 
.decl_ptr => |decl| try dg.renderDeclValue(writer, decl, location),
.anon_decl_ptr => |ad| try dg.renderAnonDeclValue(writer, ad, location),
 
inline .eu_payload_ptr, .opt_payload_ptr => |info| {
try writer.writeAll("&(");
try dg.renderParentPtr(writer, base, location);
try dg.renderPointer(writer, info.parent.*, location);
try writer.writeAll(")->payload");
},
.elem => |elem| {
const ptr_base_ty = Type.fromInterned(ip.typeOf(elem.base));
const elem_ty = ptr_base_ty.elemType2(zcu);
const elem_ctype = try dg.ctypeFromType(elem_ty, .forward);
if (!ptr_child_ctype.eql(elem_ctype)) {
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
}
try writer.writeAll("&(");
if (ip.indexToKey(ptr_base_ty.toIntern()).ptr_type.flags.size == .One)
try writer.writeByte('*');
try dg.renderParentPtr(writer, elem.base, location);
try writer.print(")[{d}]", .{elem.index});
},
.field => |field| {
const ptr_base_ty = Type.fromInterned(ip.typeOf(field.base));
const base_ty = ptr_base_ty.childType(zcu);
 
.field_ptr => |field| {
const parent_ptr_ty = try field.parent.ptrType(zcu);
 
// Ensure complete type definition is available before accessing fields.
_ = try dg.ctypeFromType(base_ty, .complete);
switch (fieldLocation(ptr_base_ty, ptr_ty, @as(u32, @intCast(field.index)), zcu)) {
_ = try dg.ctypeFromType(parent_ptr_ty.childType(zcu), .complete);
 
switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, zcu)) {
.begin => {
const ptr_base_ctype = try dg.ctypeFromType(ptr_base_ty, .complete);
if (!ptr_ctype.eql(ptr_base_ctype)) {
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
}
try dg.renderParentPtr(writer, field.base, location);
const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete);
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
try dg.renderPointer(writer, field.parent.*, location);
},
.field => |name| {
const field_ty = switch (ip.indexToKey(base_ty.toIntern())) {
.anon_struct_type,
.struct_type,
.union_type,
=> base_ty.structFieldType(@as(usize, @intCast(field.index)), zcu),
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One, .Many, .C => unreachable,
.Slice => switch (field.index) {
Value.slice_ptr_index => base_ty.slicePtrFieldType(zcu),
Value.slice_len_index => Type.usize,
else => unreachable,
},
},
else => unreachable,
};
const field_ctype = try dg.ctypeFromType(field_ty, .forward);
if (!ptr_child_ctype.eql(field_ctype)) {
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
}
try writer.writeAll("&(");
try dg.renderParentPtr(writer, field.base, location);
try dg.renderPointer(writer, field.parent.*, location);
try writer.writeAll(")->");
try dg.writeCValue(writer, name);
},
.byte_offset => |byte_offset| {
const u8_ptr_ty = try zcu.adjustPtrTypeChild(ptr_ty, Type.u8);
const u8_ptr_ctype = try dg.ctypeFromType(u8_ptr_ty, .complete);
 
if (!ptr_ctype.eql(u8_ptr_ctype)) {
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
}
try writer.writeAll("((");
try dg.renderCType(writer, u8_ptr_ctype);
const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete);
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
try dg.renderParentPtr(writer, field.base, location);
try writer.print(" + {})", .{
try dg.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset), .Other),
});
const offset_val = try zcu.intValue(Type.usize, byte_offset);
try writer.writeAll("((char *)");
try dg.renderPointer(writer, field.parent.*, location);
try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)});
},
}
},
.comptime_field, .comptime_alloc => unreachable,
 
.elem_ptr => |elem| if (!(try elem.parent.ptrType(zcu)).childType(zcu).hasRuntimeBits(zcu)) {
// Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer.
const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete);
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
try dg.renderPointer(writer, elem.parent.*, location);
} else {
const index_val = try zcu.intValue(Type.usize, elem.elem_idx);
// We want to do pointer arithmetic on a pointer to the element type.
// We might have a pointer-to-array. In this case, we must cast first.
const result_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete);
const parent_ctype = try dg.ctypeFromType(try elem.parent.ptrType(zcu), .complete);
if (result_ctype.eql(parent_ctype)) {
// The pointer already has an appropriate type - just do the arithmetic.
try writer.writeByte('(');
try dg.renderPointer(writer, elem.parent.*, location);
try writer.print(" + {})", .{try dg.fmtIntLiteral(index_val, .Other)});
} else {
// We probably have an array pointer `T (*)[n]`. Cast to an element pointer,
// and *then* apply the index.
try writer.writeAll("((");
try dg.renderCType(writer, result_ctype);
try writer.writeByte(')');
try dg.renderPointer(writer, elem.parent.*, location);
try writer.print(" + {})", .{try dg.fmtIntLiteral(index_val, .Other)});
}
},
 
.offset_and_cast => |oac| {
const ptr_ctype = try dg.ctypeFromType(oac.new_ptr_ty, .complete);
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
if (oac.byte_offset == 0) {
try dg.renderPointer(writer, oac.parent.*, location);
} else {
const offset_val = try zcu.intValue(Type.usize, oac.byte_offset);
try writer.writeAll("((char *)");
try dg.renderPointer(writer, oac.parent.*, location);
try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)});
}
},
}
}
 
@@ -1103,20 +1084,11 @@ pub const DeclGen = struct {
}
try writer.writeByte('}');
},
.ptr => |ptr| switch (ptr.addr) {
.decl => |d| try dg.renderDeclValue(writer, val, d, location),
.anon_decl => |decl_val| try dg.renderAnonDeclValue(writer, val, decl_val, location),
.int => |int| {
try writer.writeAll("((");
try dg.renderCType(writer, ctype);
try writer.print("){x})", .{try dg.fmtIntLiteral(Value.fromInterned(int), location)});
},
.eu_payload,
.opt_payload,
.elem,
.field,
=> try dg.renderParentPtr(writer, val.toIntern(), location),
.comptime_field, .comptime_alloc => unreachable,
.ptr => {
var arena = std.heap.ArenaAllocator.init(zcu.gpa);
defer arena.deinit();
const derivation = try val.pointerDerivation(arena.allocator(), zcu);
try dg.renderPointer(writer, derivation, location);
},
.opt => |opt| switch (ctype.info(ctype_pool)) {
.basic => if (ctype.isBool()) try writer.writeAll(switch (opt.val) {
@@ -4574,10 +4546,10 @@ fn airCall(
break :fn_decl switch (zcu.intern_pool.indexToKey(callee_val.toIntern())) {
.extern_func => |extern_func| extern_func.decl,
.func => |func| func.owner_decl,
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.decl => |decl| decl,
else => break :known,
},
} else break :known,
else => break :known,
};
};
@@ -5147,10 +5119,10 @@ fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool
'I' => !target.cpu.arch.isArmOrThumb(),
else => switch (value) {
.constant => |val| switch (f.object.dg.zcu.intern_pool.indexToKey(val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.decl => false,
else => true,
},
} else true,
else => true,
},
else => false,
 
src/codegen/llvm.zig added: 4888, removed: 2579, total 2309
@@ -3262,6 +3262,7 @@ pub const Object = struct {
try o.lowerType(Type.fromInterned(vector_type.child)),
),
.opt_type => |child_ty| {
// Must stay in sync with `opt_payload` logic in `lowerPtr`.
if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(mod)) return .i8;
 
const payload_ty = try o.lowerType(Type.fromInterned(child_ty));
@@ -3281,6 +3282,8 @@ pub const Object = struct {
},
.anyframe_type => @panic("TODO implement lowerType for AnyFrame types"),
.error_union_type => |error_union_type| {
// Must stay in sync with `codegen.errUnionPayloadOffset`.
// See logic in `lowerPtr`.
const error_type = try o.errorIntType();
if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(mod))
return error_type;
@@ -3792,17 +3795,7 @@ pub const Object = struct {
128 => try o.builder.fp128Const(val.toFloat(f128, mod)),
else => unreachable,
},
.ptr => |ptr| return switch (ptr.addr) {
.decl => |decl| try o.lowerDeclRefValue(ty, decl),
.anon_decl => |anon_decl| try o.lowerAnonDeclRef(ty, anon_decl),
.int => |int| try o.lowerIntAsPtr(int),
.eu_payload,
.opt_payload,
.elem,
.field,
=> try o.lowerParentPtr(val),
.comptime_field, .comptime_alloc => unreachable,
},
.ptr => try o.lowerPtr(arg_val, 0),
.slice => |slice| return o.builder.structConst(try o.lowerType(ty), &.{
try o.lowerValue(slice.ptr),
try o.lowerValue(slice.len),
@@ -4223,20 +4216,6 @@ pub const Object = struct {
};
}
 
fn lowerIntAsPtr(o: *Object, val: InternPool.Index) Allocator.Error!Builder.Constant {
const mod = o.module;
switch (mod.intern_pool.indexToKey(val)) {
.undef => return o.builder.undefConst(.ptr),
.int => {
var bigint_space: Value.BigIntSpace = undefined;
const bigint = Value.fromInterned(val).toBigInt(&bigint_space, mod);
const llvm_int = try lowerBigInt(o, Type.usize, bigint);
return o.builder.castConst(.inttoptr, llvm_int, .ptr);
},
else => unreachable,
}
}
 
fn lowerBigInt(
o: *Object,
ty: Type,
@@ -4246,129 +4225,60 @@ pub const Object = struct {
return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint);
}
 
fn lowerParentPtrDecl(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant {
const mod = o.module;
const decl = mod.declPtr(decl_index);
const ptr_ty = try mod.singleMutPtrType(decl.typeOf(mod));
return o.lowerDeclRefValue(ptr_ty, decl_index);
}
 
fn lowerParentPtr(o: *Object, ptr_val: Value) Error!Builder.Constant {
const mod = o.module;
const ip = &mod.intern_pool;
const ptr = ip.indexToKey(ptr_val.toIntern()).ptr;
return switch (ptr.addr) {
.decl => |decl| try o.lowerParentPtrDecl(decl),
.anon_decl => |ad| try o.lowerAnonDeclRef(Type.fromInterned(ad.orig_ty), ad),
.int => |int| try o.lowerIntAsPtr(int),
.eu_payload => |eu_ptr| {
const parent_ptr = try o.lowerParentPtr(Value.fromInterned(eu_ptr));
 
const eu_ty = Type.fromInterned(ip.typeOf(eu_ptr)).childType(mod);
const payload_ty = eu_ty.errorUnionPayload(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// In this case, we represent pointer to error union the same as pointer
// to the payload.
return parent_ptr;
}
 
const err_int_ty = try mod.errorIntType();
const payload_align = payload_ty.abiAlignment(mod);
const err_align = err_int_ty.abiAlignment(mod);
const index: u32 = if (payload_align.compare(.gt, err_align)) 2 else 1;
return o.builder.gepConst(.inbounds, try o.lowerType(eu_ty), parent_ptr, null, &.{
.@"0", try o.builder.intConst(.i32, index),
fn lowerPtr(
o: *Object,
ptr_val: InternPool.Index,
prev_offset: u64,
) Error!Builder.Constant {
const zcu = o.module;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
.decl => |decl| {
const base_ptr = try o.lowerDeclRefValue(decl);
return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{
try o.builder.intConst(.i64, offset),
});
},
.opt_payload => |opt_ptr| {
const parent_ptr = try o.lowerParentPtr(Value.fromInterned(opt_ptr));
 
const opt_ty = Type.fromInterned(ip.typeOf(opt_ptr)).childType(mod);
const payload_ty = opt_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
payload_ty.optionalReprIsPayload(mod))
{
// In this case, we represent pointer to optional the same as pointer
// to the payload.
return parent_ptr;
}
 
return o.builder.gepConst(.inbounds, try o.lowerType(opt_ty), parent_ptr, null, &.{ .@"0", .@"0" });
},
.comptime_field, .comptime_alloc => unreachable,
.elem => |elem_ptr| {
const parent_ptr = try o.lowerParentPtr(Value.fromInterned(elem_ptr.base));
const elem_ty = Type.fromInterned(ip.typeOf(elem_ptr.base)).elemType2(mod);
 
return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, null, &.{
try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index),
.anon_decl => |ad| {
const base_ptr = try o.lowerAnonDeclRef(ad);
return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{
try o.builder.intConst(.i64, offset),
});
},
.field => |field_ptr| {
const parent_ptr = try o.lowerParentPtr(Value.fromInterned(field_ptr.base));
const parent_ptr_ty = Type.fromInterned(ip.typeOf(field_ptr.base));
const parent_ty = parent_ptr_ty.childType(mod);
const field_index: u32 = @intCast(field_ptr.index);
switch (parent_ty.zigTypeTag(mod)) {
.Union => {
if (parent_ty.containerLayout(mod) == .@"packed") {
return parent_ptr;
}
 
const layout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
// In this case a pointer to the union and a pointer to any
// (void) payload is the same.
return parent_ptr;
}
 
const parent_llvm_ty = try o.lowerType(parent_ty);
return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{
.@"0",
try o.builder.intConst(.i32, @intFromBool(
layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align),
)),
});
.int => try o.builder.castConst(
.inttoptr,
try o.builder.intConst(try o.lowerType(Type.usize), offset),
.ptr,
),
.eu_payload => |eu_ptr| try o.lowerPtr(
eu_ptr,
offset + @import("../codegen.zig").errUnionPayloadOffset(
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu),
zcu,
),
),
.opt_payload => |opt_ptr| try o.lowerPtr(opt_ptr, offset),
.field => |field| {
const agg_ty = Value.fromInterned(field.base).typeOf(zcu).childType(zcu);
const field_off: u64 = switch (agg_ty.zigTypeTag(zcu)) {
.Pointer => off: {
assert(agg_ty.isSlice(zcu));
break :off switch (field.index) {
Value.slice_ptr_index => 0,
Value.slice_len_index => @divExact(zcu.getTarget().ptrBitWidth(), 8),
else => unreachable,
};
},
.Struct => {
if (mod.typeToPackedStruct(parent_ty)) |struct_type| {
const ptr_info = Type.fromInterned(ptr.ty).ptrInfo(mod);
if (ptr_info.packed_offset.host_size != 0) return parent_ptr;
 
const parent_ptr_info = parent_ptr_ty.ptrInfo(mod);
const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index) + parent_ptr_info.packed_offset.bit_offset;
const llvm_usize = try o.lowerType(Type.usize);
const base_addr = try o.builder.castConst(.ptrtoint, parent_ptr, llvm_usize);
const byte_offset = try o.builder.intConst(llvm_usize, @divExact(bit_offset, 8));
const field_addr = try o.builder.binConst(.add, base_addr, byte_offset);
return o.builder.castConst(.inttoptr, field_addr, .ptr);
}
 
return o.builder.gepConst(
.inbounds,
try o.lowerType(parent_ty),
parent_ptr,
null,
if (o.llvmFieldIndex(parent_ty, field_index)) |llvm_field_index| &.{
.@"0",
try o.builder.intConst(.i32, llvm_field_index),
} else &.{
try o.builder.intConst(.i32, @intFromBool(
parent_ty.hasRuntimeBitsIgnoreComptime(mod),
)),
},
);
},
.Pointer => {
assert(parent_ty.isSlice(mod));
const parent_llvm_ty = try o.lowerType(parent_ty);
return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{
.@"0", try o.builder.intConst(.i32, field_index),
});
.Struct, .Union => switch (agg_ty.containerLayout(zcu)) {
.auto => agg_ty.structFieldOffset(@intCast(field.index), zcu),
.@"extern", .@"packed" => unreachable,
},
else => unreachable,
}
};
return o.lowerPtr(field.base, offset + field_off);
},
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
};
}
 
@@ -4376,8 +4286,7 @@ pub const Object = struct {
/// Maybe the logic could be unified.
fn lowerAnonDeclRef(
o: *Object,
ptr_ty: Type,
anon_decl: InternPool.Key.Ptr.Addr.AnonDecl,
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
) Error!Builder.Constant {
const mod = o.module;
const ip = &mod.intern_pool;
@@ -4393,6 +4302,8 @@ pub const Object = struct {
@panic("TODO");
}
 
const ptr_ty = Type.fromInterned(anon_decl.orig_ty);
 
const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or
(is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty);
@@ -4400,9 +4311,8 @@ pub const Object = struct {
if (is_fn_body)
@panic("TODO");
 
const orig_ty = Type.fromInterned(anon_decl.orig_ty);
const llvm_addr_space = toLlvmAddressSpace(orig_ty.ptrAddressSpace(mod), target);
const alignment = orig_ty.ptrAlignment(mod);
const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target);
const alignment = ptr_ty.ptrAlignment(mod);
const llvm_global = (try o.resolveGlobalAnonDecl(decl_val, llvm_addr_space, alignment)).ptrConst(&o.builder).global;
 
const llvm_val = try o.builder.convConst(
@@ -4411,13 +4321,10 @@ pub const Object = struct {
try o.builder.ptrType(llvm_addr_space),
);
 
return o.builder.convConst(if (ptr_ty.isAbiInt(mod)) switch (ptr_ty.intInfo(mod).signedness) {
.signed => .signed,
.unsigned => .unsigned,
} else .unneeded, llvm_val, try o.lowerType(ptr_ty));
return o.builder.convConst(.unneeded, llvm_val, try o.lowerType(ptr_ty));
}
 
fn lowerDeclRefValue(o: *Object, ty: Type, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant {
fn lowerDeclRefValue(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant {
const mod = o.module;
 
// In the case of something like:
@@ -4428,18 +4335,23 @@ pub const Object = struct {
const decl = mod.declPtr(decl_index);
if (decl.val.getFunction(mod)) |func| {
if (func.owner_decl != decl_index) {
return o.lowerDeclRefValue(ty, func.owner_decl);
return o.lowerDeclRefValue(func.owner_decl);
}
} else if (decl.val.getExternFunc(mod)) |func| {
if (func.decl != decl_index) {
return o.lowerDeclRefValue(ty, func.decl);
return o.lowerDeclRefValue(func.decl);
}
}
 
const decl_ty = decl.typeOf(mod);
const ptr_ty = try decl.declPtrType(mod);
 
const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or
(is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ty);
(is_fn_body and mod.typeToFunc(decl_ty).?.is_generic))
{
return o.lowerPtrToVoid(ptr_ty);
}
 
const llvm_global = if (is_fn_body)
(try o.resolveLlvmFunction(decl_index)).ptrConst(&o.builder).global
@@ -4452,10 +4364,7 @@ pub const Object = struct {
try o.builder.ptrType(toLlvmAddressSpace(decl.@"addrspace", mod.getTarget())),
);
 
return o.builder.convConst(if (ty.isAbiInt(mod)) switch (ty.intInfo(mod).signedness) {
.signed => .signed,
.unsigned => .unsigned,
} else .unneeded, llvm_val, try o.lowerType(ty));
return o.builder.convConst(.unneeded, llvm_val, try o.lowerType(ptr_ty));
}
 
fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant {
 
src/codegen/spirv.zig added: 4888, removed: 2579, total 2309
@@ -863,7 +863,7 @@ const DeclGen = struct {
const result_ty_id = try self.resolveType(ty, repr);
const ip = &mod.intern_pool;
 
log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod) });
log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod, null) });
if (val.isUndefDeep(mod)) {
return self.spv.constUndef(result_ty_id);
}
@@ -983,10 +983,10 @@ const DeclGen = struct {
const int_ty = ty.intTagType(mod);
break :cache try self.constant(int_ty, int_val, repr);
},
.ptr => return self.constantPtr(ty, val),
.ptr => return self.constantPtr(val),
.slice => |slice| {
const ptr_ty = ty.slicePtrFieldType(mod);
const ptr_id = try self.constantPtr(ptr_ty, Value.fromInterned(slice.ptr));
const ptr_id = try self.constantPtr(Value.fromInterned(slice.ptr));
const len_id = try self.constant(Type.usize, Value.fromInterned(slice.len), .indirect);
return self.constructStruct(
ty,
@@ -1107,62 +1107,86 @@ const DeclGen = struct {
return cacheable_id;
}
 
fn constantPtr(self: *DeclGen, ptr_ty: Type, ptr_val: Value) Error!IdRef {
fn constantPtr(self: *DeclGen, ptr_val: Value) Error!IdRef {
// TODO: Caching??
 
const result_ty_id = try self.resolveType(ptr_ty, .direct);
const mod = self.module;
const zcu = self.module;
 
if (ptr_val.isUndef(mod)) return self.spv.constUndef(result_ty_id);
if (ptr_val.isUndef(zcu)) {
const result_ty = ptr_val.typeOf(zcu);
const result_ty_id = try self.resolveType(result_ty, .direct);
return self.spv.constUndef(result_ty_id);
}
 
switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) {
.decl => |decl| return try self.constantDeclRef(ptr_ty, decl),
.anon_decl => |anon_decl| return try self.constantAnonDeclRef(ptr_ty, anon_decl),
var arena = std.heap.ArenaAllocator.init(self.gpa);
defer arena.deinit();
 
const derivation = try ptr_val.pointerDerivation(arena.allocator(), zcu);
return self.derivePtr(derivation);
}
 
fn derivePtr(self: *DeclGen, derivation: Value.PointerDeriveStep) Error!IdRef {
const zcu = self.module;
switch (derivation) {
.comptime_alloc_ptr, .comptime_field_ptr => unreachable,
.int => |int| {
const ptr_id = self.spv.allocId();
const result_ty_id = try self.resolveType(int.ptr_ty, .direct);
// TODO: This can probably be an OpSpecConstantOp Bitcast, but
// that is not implemented by Mesa yet. Therefore, just generate it
// as a runtime operation.
const result_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
.id_result_type = result_ty_id,
.id_result = ptr_id,
.integer_value = try self.constant(Type.usize, Value.fromInterned(int), .direct),
.id_result = result_ptr_id,
.integer_value = try self.constant(Type.usize, try zcu.intValue(Type.usize, int.addr), .direct),
});
return ptr_id;
return result_ptr_id;
},
.eu_payload => unreachable, // TODO
.opt_payload => unreachable, // TODO
.comptime_field, .comptime_alloc => unreachable,
.elem => |elem_ptr| {
const parent_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(elem_ptr.base));
const parent_ptr_id = try self.constantPtr(parent_ptr_ty, Value.fromInterned(elem_ptr.base));
const index_id = try self.constInt(Type.usize, elem_ptr.index, .direct);
.decl_ptr => |decl| {
const result_ptr_ty = try zcu.declPtr(decl).declPtrType(zcu);
return self.constantDeclRef(result_ptr_ty, decl);
},
.anon_decl_ptr => |ad| {
const result_ptr_ty = Type.fromInterned(ad.orig_ty);
return self.constantAnonDeclRef(result_ptr_ty, ad);
},
.eu_payload_ptr => @panic("TODO"),
.opt_payload_ptr => @panic("TODO"),
.field_ptr => |field| {
const parent_ptr_id = try self.derivePtr(field.parent.*);
const parent_ptr_ty = try field.parent.ptrType(zcu);
return self.structFieldPtr(field.result_ptr_ty, parent_ptr_ty, parent_ptr_id, field.field_idx);
},
.elem_ptr => |elem| {
const parent_ptr_id = try self.derivePtr(elem.parent.*);
const parent_ptr_ty = try elem.parent.ptrType(zcu);
const index_id = try self.constInt(Type.usize, elem.elem_idx, .direct);
return self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id);
},
.offset_and_cast => |oac| {
const parent_ptr_id = try self.derivePtr(oac.parent.*);
const parent_ptr_ty = try oac.parent.ptrType(zcu);
disallow: {
if (oac.byte_offset != 0) break :disallow;
// Allow changing the pointer type child only to restructure arrays.
// e.g. [3][2]T to T is fine, as is [2]T -> [2][1]T.
const src_base_ty = parent_ptr_ty.arrayBase(zcu)[0];
const dest_base_ty = oac.new_ptr_ty.arrayBase(zcu)[0];
if (self.getTarget().os.tag == .vulkan and src_base_ty.toIntern() != dest_base_ty.toIntern()) break :disallow;
 
const elem_ptr_id = try self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id);
 
// TODO: Can we consolidate this in ptrElemPtr?
const elem_ty = parent_ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod)));
 
// TODO: Can we remove this ID comparison?
if (elem_ptr_ty_id == result_ty_id) {
return elem_ptr_id;
const result_ty_id = try self.resolveType(oac.new_ptr_ty, .direct);
const result_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
.id_result_type = result_ty_id,
.id_result = result_ptr_id,
.operand = parent_ptr_id,
});
return result_ptr_id;
}
// This may happen when we have pointer-to-array and the result is
// another pointer-to-array instead of a pointer-to-element.
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
.id_result_type = result_ty_id,
.id_result = result_id,
.operand = elem_ptr_id,
return self.fail("Cannot perform pointer cast: '{}' to '{}'", .{
parent_ptr_ty.fmt(zcu),
oac.new_ptr_ty.fmt(zcu),
});
return result_id;
},
.field => |field| {
const base_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base));
const base_ptr = try self.constantPtr(base_ptr_ty, Value.fromInterned(field.base));
const field_index: u32 = @intCast(field.index);
return try self.structFieldPtr(ptr_ty, base_ptr_ty, base_ptr, field_index);
},
}
}
@@ -1170,7 +1194,7 @@ const DeclGen = struct {
fn constantAnonDeclRef(
self: *DeclGen,
ty: Type,
anon_decl: InternPool.Key.Ptr.Addr.AnonDecl,
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
) !IdRef {
// TODO: Merge this function with constantDeclRef.
 
@@ -4456,16 +4480,20 @@ const DeclGen = struct {
) !IdRef {
const result_ty_id = try self.resolveType(result_ptr_ty, .direct);
 
const mod = self.module;
const object_ty = object_ptr_ty.childType(mod);
switch (object_ty.zigTypeTag(mod)) {
.Struct => switch (object_ty.containerLayout(mod)) {
const zcu = self.module;
const object_ty = object_ptr_ty.childType(zcu);
switch (object_ty.zigTypeTag(zcu)) {
.Pointer => {
assert(object_ty.isSlice(zcu));
return self.accessChain(result_ty_id, object_ptr, &.{field_index});
},
.Struct => switch (object_ty.containerLayout(zcu)) {
.@"packed" => unreachable, // TODO
else => {
return try self.accessChain(result_ty_id, object_ptr, &.{field_index});
},
},
.Union => switch (object_ty.containerLayout(mod)) {
.Union => switch (object_ty.containerLayout(zcu)) {
.@"packed" => unreachable, // TODO
else => {
const layout = self.unionLayout(object_ty);
@@ -4475,7 +4503,7 @@ const DeclGen = struct {
return try self.spv.constUndef(result_ty_id);
}
 
const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(mod));
const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(zcu));
const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class);
const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index});
 
 
src/link/Wasm/ZigObject.zig added: 4888, removed: 2579, total 2309
@@ -539,7 +539,6 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: V
.none,
.{
.parent_atom_index = @intFromEnum(atom.sym_index),
.addend = null,
},
);
break :code switch (result) {
 
src/mutable_value.zig added: 4888, removed: 2579, total 2309
@@ -54,22 +54,22 @@ pub const MutableValue = union(enum) {
payload: *MutableValue,
};
 
pub fn intern(mv: MutableValue, zcu: *Zcu, arena: Allocator) Allocator.Error!InternPool.Index {
pub fn intern(mv: MutableValue, zcu: *Zcu, arena: Allocator) Allocator.Error!Value {
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
return switch (mv) {
return Value.fromInterned(switch (mv) {
.interned => |ip_index| ip_index,
.eu_payload => |sv| try ip.get(gpa, .{ .error_union = .{
.ty = sv.ty,
.val = .{ .payload = try sv.child.intern(zcu, arena) },
.val = .{ .payload = (try sv.child.intern(zcu, arena)).toIntern() },
} }),
.opt_payload => |sv| try ip.get(gpa, .{ .opt = .{
.ty = sv.ty,
.val = try sv.child.intern(zcu, arena),
.val = (try sv.child.intern(zcu, arena)).toIntern(),
} }),
.repeated => |sv| try ip.get(gpa, .{ .aggregate = .{
.ty = sv.ty,
.storage = .{ .repeated_elem = try sv.child.intern(zcu, arena) },
.storage = .{ .repeated_elem = (try sv.child.intern(zcu, arena)).toIntern() },
} }),
.bytes => |b| try ip.get(gpa, .{ .aggregate = .{
.ty = b.ty,
@@ -78,24 +78,24 @@ pub const MutableValue = union(enum) {
.aggregate => |a| {
const elems = try arena.alloc(InternPool.Index, a.elems.len);
for (a.elems, elems) |mut_elem, *interned_elem| {
interned_elem.* = try mut_elem.intern(zcu, arena);
interned_elem.* = (try mut_elem.intern(zcu, arena)).toIntern();
}
return ip.get(gpa, .{ .aggregate = .{
return Value.fromInterned(try ip.get(gpa, .{ .aggregate = .{
.ty = a.ty,
.storage = .{ .elems = elems },
} });
} }));
},
.slice => |s| try ip.get(gpa, .{ .slice = .{
.ty = s.ty,
.ptr = try s.ptr.intern(zcu, arena),
.len = try s.len.intern(zcu, arena),
.ptr = (try s.ptr.intern(zcu, arena)).toIntern(),
.len = (try s.len.intern(zcu, arena)).toIntern(),
} }),
.un => |u| try ip.get(gpa, .{ .un = .{
.ty = u.ty,
.tag = u.tag,
.val = try u.payload.intern(zcu, arena),
.val = (try u.payload.intern(zcu, arena)).toIntern(),
} }),
};
});
}
 
/// Un-interns the top level of this `MutableValue`, if applicable.
@@ -248,9 +248,11 @@ pub const MutableValue = union(enum) {
},
.Union => {
const payload = try arena.create(MutableValue);
// HACKHACK: this logic is silly, but Sema detects it and reverts the change where needed.
// See comment at the top of `Sema.beginComptimePtrMutationInner`.
payload.* = .{ .interned = .undef };
const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(zcu);
payload.* = .{ .interned = try ip.get(
gpa,
.{ .undef = backing_ty.toIntern() },
) };
mv.* = .{ .un = .{
.ty = ty_ip,
.tag = .none,
@@ -294,7 +296,6 @@ pub const MutableValue = union(enum) {
/// Get a pointer to the `MutableValue` associated with a field/element.
/// The returned pointer can be safety mutated through to modify the field value.
/// The returned pointer is valid until the representation of `mv` changes.
/// This function does *not* support accessing the ptr/len field of slices.
pub fn elem(
mv: *MutableValue,
zcu: *Zcu,
@@ -304,18 +305,18 @@ pub const MutableValue = union(enum) {
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
// Convert to the `aggregate` representation.
switch (mv) {
.eu_payload, .opt_payload, .slice, .un => unreachable,
switch (mv.*) {
.eu_payload, .opt_payload, .un => unreachable,
.interned => {
try mv.unintern(zcu, arena, false, false);
},
.bytes => |bytes| {
const elems = try arena.alloc(MutableValue, bytes.data.len);
for (bytes.data, elems) |byte, interned_byte| {
interned_byte.* = try ip.get(gpa, .{ .int = .{
for (bytes.data, elems) |byte, *interned_byte| {
interned_byte.* = .{ .interned = try ip.get(gpa, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = byte },
} });
} }) };
}
mv.* = .{ .aggregate = .{
.ty = bytes.ty,
@@ -331,9 +332,17 @@ pub const MutableValue = union(enum) {
.elems = elems,
} };
},
.aggregate => {},
.slice, .aggregate => {},
}
switch (mv.*) {
.aggregate => |*agg| return &agg.elems[field_idx],
.slice => |*slice| return switch (field_idx) {
Value.slice_ptr_index => slice.ptr,
Value.slice_len_index => slice.len,
else => unreachable,
},
else => unreachable,
}
return &mv.aggregate.elems[field_idx];
}
 
/// Modify a single field of a `MutableValue` which represents an aggregate or slice, leaving others
@@ -349,43 +358,44 @@ pub const MutableValue = union(enum) {
) Allocator.Error!void {
const ip = &zcu.intern_pool;
const is_trivial_int = field_val.isTrivialInt(zcu);
try mv.unintern(arena, is_trivial_int, true);
switch (mv) {
try mv.unintern(zcu, arena, is_trivial_int, true);
switch (mv.*) {
.interned,
.eu_payload,
.opt_payload,
.un,
=> unreachable,
.slice => |*s| switch (field_idx) {
Value.slice_ptr_index => s.ptr = field_val,
Value.slice_len_index => s.len = field_val,
Value.slice_ptr_index => s.ptr.* = field_val,
Value.slice_len_index => s.len.* = field_val,
else => unreachable,
},
.bytes => |b| {
assert(is_trivial_int);
assert(field_val.typeOf() == Type.u8);
b.data[field_idx] = Value.fromInterned(field_val.interned).toUnsignedInt(zcu);
assert(field_val.typeOf(zcu).toIntern() == .u8_type);
b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
},
.repeated => |r| {
if (field_val.eqlTrivial(r.child.*)) return;
// We must switch to either the `aggregate` or the `bytes` representation.
const len_inc_sent = ip.aggregateTypeLenIncludingSentinel(r.ty);
if (ip.zigTypeTag(r.ty) != .Struct and
if (Type.fromInterned(r.ty).zigTypeTag(zcu) != .Struct and
is_trivial_int and
Type.fromInterned(r.ty).childType(zcu) == .u8_type and
Type.fromInterned(r.ty).childType(zcu).toIntern() == .u8_type and
r.child.isTrivialInt(zcu))
{
// We can use the `bytes` representation.
const bytes = try arena.alloc(u8, @intCast(len_inc_sent));
const repeated_byte = Value.fromInterned(r.child.interned).getUnsignedInt(zcu);
@memset(bytes, repeated_byte);
bytes[field_idx] = Value.fromInterned(field_val.interned).getUnsignedInt(zcu);
const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(zcu);
@memset(bytes, @intCast(repeated_byte));
bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
mv.* = .{ .bytes = .{
.ty = r.ty,
.data = bytes,
} };
} else {
// We must use the `aggregate` representation.
const mut_elems = try arena.alloc(u8, @intCast(len_inc_sent));
const mut_elems = try arena.alloc(MutableValue, @intCast(len_inc_sent));
@memset(mut_elems, r.child.*);
mut_elems[field_idx] = field_val;
mv.* = .{ .aggregate = .{
@@ -396,12 +406,12 @@ pub const MutableValue = union(enum) {
},
.aggregate => |a| {
a.elems[field_idx] = field_val;
const is_struct = ip.zigTypeTag(a.ty) == .Struct;
const is_struct = Type.fromInterned(a.ty).zigTypeTag(zcu) == .Struct;
// Attempt to switch to a more efficient representation.
const is_repeated = for (a.elems) |e| {
if (!e.eqlTrivial(field_val)) break false;
} else true;
if (is_repeated) {
if (!is_struct and is_repeated) {
// Switch to `repeated` repr
const mut_repeated = try arena.create(MutableValue);
mut_repeated.* = field_val;
@@ -425,7 +435,7 @@ pub const MutableValue = union(enum) {
} else {
const bytes = try arena.alloc(u8, a.elems.len);
for (a.elems, bytes) |elem_val, *b| {
b.* = Value.fromInterned(elem_val.interned).toUnsignedInt(zcu);
b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(zcu));
}
mv.* = .{ .bytes = .{
.ty = a.ty,
@@ -505,4 +515,67 @@ pub const MutableValue = union(enum) {
inline else => |x| Type.fromInterned(x.ty),
};
}
 
pub fn unpackOptional(mv: MutableValue, zcu: *Zcu) union(enum) {
undef,
null,
payload: MutableValue,
} {
return switch (mv) {
.opt_payload => |pl| return .{ .payload = pl.child.* },
.interned => |ip_index| switch (zcu.intern_pool.indexToKey(ip_index)) {
.undef => return .undef,
.opt => |opt| if (opt.val == .none) .null else .{ .payload = .{ .interned = opt.val } },
else => unreachable,
},
else => unreachable,
};
}
 
pub fn unpackErrorUnion(mv: MutableValue, zcu: *Zcu) union(enum) {
undef,
err: InternPool.NullTerminatedString,
payload: MutableValue,
} {
return switch (mv) {
.eu_payload => |pl| return .{ .payload = pl.child.* },
.interned => |ip_index| switch (zcu.intern_pool.indexToKey(ip_index)) {
.undef => return .undef,
.error_union => |eu| switch (eu.val) {
.err_name => |name| .{ .err = name },
.payload => |pl| .{ .payload = .{ .interned = pl } },
},
else => unreachable,
},
else => unreachable,
};
}
 
/// Fast equality checking which may return false negatives.
/// Used for deciding when to switch aggregate representations without fully
/// interning many values.
fn eqlTrivial(a: MutableValue, b: MutableValue) bool {
const Tag = @typeInfo(MutableValue).Union.tag_type.?;
if (@as(Tag, a) != @as(Tag, b)) return false;
return switch (a) {
.interned => |a_ip| a_ip == b.interned,
.eu_payload => |a_pl| a_pl.ty == b.eu_payload.ty and a_pl.child.eqlTrivial(b.eu_payload.child.*),
.opt_payload => |a_pl| a_pl.ty == b.opt_payload.ty and a_pl.child.eqlTrivial(b.opt_payload.child.*),
.repeated => |a_rep| a_rep.ty == b.repeated.ty and a_rep.child.eqlTrivial(b.repeated.child.*),
.bytes => |a_bytes| a_bytes.ty == b.bytes.ty and std.mem.eql(u8, a_bytes.data, b.bytes.data),
.aggregate => |a_agg| {
const b_agg = b.aggregate;
if (a_agg.ty != b_agg.ty) return false;
if (a_agg.elems.len != b_agg.elems.len) return false;
for (a_agg.elems, b_agg.elems) |a_elem, b_elem| {
if (!a_elem.eqlTrivial(b_elem)) return false;
}
return true;
},
.slice => |a_slice| a_slice.ty == b.slice.ty and
a_slice.ptr.interned == b.slice.ptr.interned and
a_slice.len.interned == b.slice.len.interned,
.un => |a_un| a_un.ty == b.un.ty and a_un.tag == b.un.tag and a_un.payload.eqlTrivial(b.un.payload.*),
};
}
};
 
src/print_air.zig added: 4888, removed: 2579, total 2309
@@ -951,7 +951,7 @@ const Writer = struct {
const ty = Type.fromInterned(mod.intern_pool.indexToKey(ip_index).typeOf());
try s.print("<{}, {}>", .{
ty.fmt(mod),
Value.fromInterned(ip_index).fmtValue(mod),
Value.fromInterned(ip_index).fmtValue(mod, null),
});
} else {
return w.writeInstIndex(s, operand.toIndex().?, dies);
 
src/print_value.zig added: 4888, removed: 2579, total 2309
@@ -17,6 +17,7 @@ const max_string_len = 256;
const FormatContext = struct {
val: Value,
mod: *Module,
opt_sema: ?*Sema,
};
 
pub fn format(
@@ -27,10 +28,10 @@ pub fn format(
) !void {
_ = options;
comptime std.debug.assert(fmt.len == 0);
return print(ctx.val, writer, 3, ctx.mod, null) catch |err| switch (err) {
return print(ctx.val, writer, 3, ctx.mod, ctx.opt_sema) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function
error.ComptimeBreak, error.ComptimeReturn => unreachable,
error.AnalysisFail, error.NeededSourceLocation => unreachable, // TODO: re-evaluate when we actually pass `opt_sema`
error.AnalysisFail, error.NeededSourceLocation => unreachable, // TODO: re-evaluate when we use `opt_sema` more fully
else => |e| return e,
};
}
@@ -117,7 +118,7 @@ pub fn print(
},
.slice => |slice| {
const print_contents = switch (ip.getBackingAddrTag(slice.ptr).?) {
.field, .elem, .eu_payload, .opt_payload => unreachable,
.field, .arr_elem, .eu_payload, .opt_payload => unreachable,
.anon_decl, .comptime_alloc, .comptime_field => true,
.decl, .int => false,
};
@@ -125,7 +126,7 @@ pub fn print(
// TODO: eventually we want to load the slice as an array with `opt_sema`, but that's
// currently not possible without e.g. triggering compile errors.
}
try printPtr(slice.ptr, writer, false, false, 0, level, mod, opt_sema);
try printPtr(Value.fromInterned(slice.ptr), writer, level, mod, opt_sema);
try writer.writeAll("[0..");
if (level == 0) {
try writer.writeAll("(...)");
@@ -136,7 +137,7 @@ pub fn print(
},
.ptr => {
const print_contents = switch (ip.getBackingAddrTag(val.toIntern()).?) {
.field, .elem, .eu_payload, .opt_payload => unreachable,
.field, .arr_elem, .eu_payload, .opt_payload => unreachable,
.anon_decl, .comptime_alloc, .comptime_field => true,
.decl, .int => false,
};
@@ -144,13 +145,13 @@ pub fn print(
// TODO: eventually we want to load the pointer with `opt_sema`, but that's
// currently not possible without e.g. triggering compile errors.
}
try printPtr(val.toIntern(), writer, false, false, 0, level, mod, opt_sema);
try printPtr(val, writer, level, mod, opt_sema);
},
.opt => |opt| switch (opt.val) {
.none => try writer.writeAll("null"),
else => |payload| try print(Value.fromInterned(payload), writer, level, mod, opt_sema),
},
.aggregate => |aggregate| try printAggregate(val, aggregate, writer, level, false, mod, opt_sema),
.aggregate => |aggregate| try printAggregate(val, aggregate, false, writer, level, mod, opt_sema),
.un => |un| {
if (level == 0) {
try writer.writeAll(".{ ... }");
@@ -176,13 +177,14 @@ pub fn print(
fn printAggregate(
val: Value,
aggregate: InternPool.Key.Aggregate,
is_ref: bool,
writer: anytype,
level: u8,
is_ref: bool,
zcu: *Zcu,
opt_sema: ?*Sema,
) (@TypeOf(writer).Error || Module.CompileError)!void {
if (level == 0) {
if (is_ref) try writer.writeByte('&');
return writer.writeAll(".{ ... }");
}
const ip = &zcu.intern_pool;
@@ -257,101 +259,87 @@ fn printAggregate(
return writer.writeAll(" }");
}
 
fn printPtr(
ptr_val: InternPool.Index,
writer: anytype,
force_type: bool,
force_addrof: bool,
leading_parens: u32,
level: u8,
zcu: *Zcu,
opt_sema: ?*Sema,
) (@TypeOf(writer).Error || Module.CompileError)!void {
const ip = &zcu.intern_pool;
const ptr = switch (ip.indexToKey(ptr_val)) {
.undef => |ptr_ty| {
if (force_addrof) try writer.writeAll("&");
try writer.writeByteNTimes('(', leading_parens);
try writer.print("@as({}, undefined)", .{Type.fromInterned(ptr_ty).fmt(zcu)});
return;
},
fn printPtr(ptr_val: Value, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*Sema) (@TypeOf(writer).Error || Module.CompileError)!void {
const ptr = switch (zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
.undef => return writer.writeAll("undefined"),
.ptr => |ptr| ptr,
else => unreachable,
};
if (level == 0) {
return writer.writeAll("&...");
}
switch (ptr.addr) {
.int => |int| {
if (force_addrof) try writer.writeAll("&");
try writer.writeByteNTimes('(', leading_parens);
if (force_type) {
try writer.print("@as({}, @ptrFromInt(", .{Type.fromInterned(ptr.ty).fmt(zcu)});
try print(Value.fromInterned(int), writer, level - 1, zcu, opt_sema);
try writer.writeAll("))");
} else {
try writer.writeAll("@ptrFromInt(");
try print(Value.fromInterned(int), writer, level - 1, zcu, opt_sema);
try writer.writeAll(")");
}
},
.decl => |index| {
try writer.writeAll("&");
try zcu.declPtr(index).renderFullyQualifiedName(zcu, writer);
},
.comptime_alloc => try writer.writeAll("&(comptime alloc)"),
.anon_decl => |anon| switch (ip.indexToKey(anon.val)) {
.aggregate => |aggregate| try printAggregate(
Value.fromInterned(anon.val),
aggregate,
writer,
level - 1,
 
if (ptr.base_addr == .anon_decl) {
// If the value is an aggregate, we can potentially print it more nicely.
switch (zcu.intern_pool.indexToKey(ptr.base_addr.anon_decl.val)) {
.aggregate => |agg| return printAggregate(
Value.fromInterned(ptr.base_addr.anon_decl.val),
agg,
true,
writer,
level,
zcu,
opt_sema,
),
else => {
const ty = Type.fromInterned(ip.typeOf(anon.val));
try writer.print("&@as({}, ", .{ty.fmt(zcu)});
try print(Value.fromInterned(anon.val), writer, level - 1, zcu, opt_sema);
try writer.writeAll(")");
},
else => {},
}
}
 
var arena = std.heap.ArenaAllocator.init(zcu.gpa);
defer arena.deinit();
const derivation = try ptr_val.pointerDerivationAdvanced(arena.allocator(), zcu, opt_sema);
try printPtrDerivation(derivation, writer, level, zcu, opt_sema);
}
 
/// Print `derivation` as an lvalue, i.e. such that writing `&` before this gives the pointer value.
fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*Sema) (@TypeOf(writer).Error || Module.CompileError)!void {
const ip = &zcu.intern_pool;
switch (derivation) {
.int => |int| try writer.print("@as({}, @ptrFromInt({x})).*", .{
int.ptr_ty.fmt(zcu),
int.addr,
}),
.decl_ptr => |decl| {
try zcu.declPtr(decl).renderFullyQualifiedName(zcu, writer);
},
.comptime_field => |val| {
const ty = Type.fromInterned(ip.typeOf(val));
try writer.print("&@as({}, ", .{ty.fmt(zcu)});
try print(Value.fromInterned(val), writer, level - 1, zcu, opt_sema);
try writer.writeAll(")");
.anon_decl_ptr => |anon| {
const ty = Value.fromInterned(anon.val).typeOf(zcu);
try writer.print("@as({}, ", .{ty.fmt(zcu)});
try print(Value.fromInterned(anon.val), writer, level - 1, zcu, opt_sema);
try writer.writeByte(')');
},
.eu_payload => |base| {
try printPtr(base, writer, true, true, leading_parens, level, zcu, opt_sema);
.comptime_alloc_ptr => |info| {
try writer.print("@as({}, ", .{info.val.typeOf(zcu).fmt(zcu)});
try print(info.val, writer, level - 1, zcu, opt_sema);
try writer.writeByte(')');
},
.comptime_field_ptr => |val| {
const ty = val.typeOf(zcu);
try writer.print("@as({}, ", .{ty.fmt(zcu)});
try print(val, writer, level - 1, zcu, opt_sema);
try writer.writeByte(')');
},
.eu_payload_ptr => |info| {
try writer.writeByte('(');
try printPtrDerivation(info.parent.*, writer, level, zcu, opt_sema);
try writer.writeAll(" catch unreachable)");
},
.opt_payload_ptr => |info| {
try printPtrDerivation(info.parent.*, writer, level, zcu, opt_sema);
try writer.writeAll(".?");
},
.opt_payload => |base| {
try writer.writeAll("(");
try printPtr(base, writer, true, true, leading_parens + 1, level, zcu, opt_sema);
try writer.writeAll(" catch unreachable");
},
.elem => |elem| {
try printPtr(elem.base, writer, true, true, leading_parens, level, zcu, opt_sema);
try writer.print("[{d}]", .{elem.index});
},
.field => |field| {
try printPtr(field.base, writer, true, true, leading_parens, level, zcu, opt_sema);
const base_ty = Type.fromInterned(ip.typeOf(field.base)).childType(zcu);
switch (base_ty.zigTypeTag(zcu)) {
.Struct => if (base_ty.isTuple(zcu)) {
try writer.print("[{d}]", .{field.index});
} else {
const field_name = base_ty.structFieldName(@intCast(field.index), zcu).unwrap().?;
.field_ptr => |field| {
try printPtrDerivation(field.parent.*, writer, level, zcu, opt_sema);
const agg_ty = (try field.parent.ptrType(zcu)).childType(zcu);
switch (agg_ty.zigTypeTag(zcu)) {
.Struct => if (agg_ty.structFieldName(field.field_idx, zcu).unwrap()) |field_name| {
try writer.print(".{i}", .{field_name.fmt(ip)});
} else {
try writer.print("[{d}]", .{field.field_idx});
},
.Union => {
const tag_ty = base_ty.unionTagTypeHypothetical(zcu);
const field_name = tag_ty.enumFieldName(@intCast(field.index), zcu);
const tag_ty = agg_ty.unionTagTypeHypothetical(zcu);
const field_name = tag_ty.enumFieldName(field.field_idx, zcu);
try writer.print(".{i}", .{field_name.fmt(ip)});
},
.Pointer => switch (field.index) {
.Pointer => switch (field.field_idx) {
Value.slice_ptr_index => try writer.writeAll(".ptr"),
Value.slice_len_index => try writer.writeAll(".len"),
else => unreachable,
@@ -359,5 +347,18 @@ fn printPtr(
else => unreachable,
}
},
.elem_ptr => |elem| {
try printPtrDerivation(elem.parent.*, writer, level, zcu, opt_sema);
try writer.print("[{d}]", .{elem.elem_idx});
},
.offset_and_cast => |oac| if (oac.byte_offset == 0) {
try writer.print("@as({}, @ptrCast(", .{oac.new_ptr_ty.fmt(zcu)});
try printPtrDerivation(oac.parent.*, writer, level, zcu, opt_sema);
try writer.writeAll("))");
} else {
try writer.print("@as({}, @ptrFromInt(@intFromPtr(", .{oac.new_ptr_ty.fmt(zcu)});
try printPtrDerivation(oac.parent.*, writer, level, zcu, opt_sema);
try writer.print(") + {d}))", .{oac.byte_offset});
},
}
}
 
src/type.zig added: 4888, removed: 2579, total 2309
@@ -172,6 +172,7 @@ pub const Type = struct {
}
 
/// Prints a name suitable for `@typeName`.
/// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels.
pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
@@ -187,8 +188,8 @@ pub const Type = struct {
 
if (info.sentinel != .none) switch (info.flags.size) {
.One, .C => unreachable,
.Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod)}),
.Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod)}),
.Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}),
.Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}),
} else switch (info.flags.size) {
.One => try writer.writeAll("*"),
.Many => try writer.writeAll("[*]"),
@@ -234,7 +235,7 @@ pub const Type = struct {
} else {
try writer.print("[{d}:{}]", .{
array_type.len,
Value.fromInterned(array_type.sentinel).fmtValue(mod),
Value.fromInterned(array_type.sentinel).fmtValue(mod, null),
});
try print(Type.fromInterned(array_type.child), writer, mod);
}
@@ -352,7 +353,7 @@ pub const Type = struct {
try print(Type.fromInterned(field_ty), writer, mod);
 
if (val != .none) {
try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod)});
try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod, null)});
}
}
try writer.writeAll("}");
@@ -1965,6 +1966,12 @@ pub const Type = struct {
return Type.fromInterned(union_fields[index]);
}
 
pub fn unionFieldTypeByIndex(ty: Type, index: usize, mod: *Module) Type {
const ip = &mod.intern_pool;
const union_obj = mod.typeToUnion(ty).?;
return Type.fromInterned(union_obj.field_types.get(ip)[index]);
}
 
pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
const union_obj = mod.typeToUnion(ty).?;
return mod.unionTagFieldIndex(union_obj, enum_tag);
@@ -3049,22 +3056,34 @@ pub const Type = struct {
};
}
 
pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) Alignment {
const ip = &mod.intern_pool;
pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment {
return ty.structFieldAlignAdvanced(index, zcu, null) catch unreachable;
}
 
pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*Sema) !Alignment {
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.layout != .@"packed");
const explicit_align = struct_type.fieldAlign(ip, index);
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
return mod.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
if (opt_sema) |sema| {
return sema.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
} else {
return zcu.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
}
},
.anon_struct_type => |anon_struct| {
return Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignment(mod);
return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, if (opt_sema) |sema| .{ .sema = sema } else .eager)).scalar;
},
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
return mod.unionFieldNormalAlignment(union_obj, @intCast(index));
if (opt_sema) |sema| {
return sema.unionFieldAlignment(union_obj, @intCast(index));
} else {
return zcu.unionFieldNormalAlignment(union_obj, @intCast(index));
}
},
else => unreachable,
}
@@ -3301,6 +3320,71 @@ pub const Type = struct {
};
}
 
pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } {
var cur_ty: Type = ty;
var cur_len: u64 = 1;
while (cur_ty.zigTypeTag(zcu) == .Array) {
cur_len *= cur_ty.arrayLenIncludingSentinel(zcu);
cur_ty = cur_ty.childType(zcu);
}
return .{ cur_ty, cur_len };
}
 
pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, zcu: *Zcu) union(enum) {
/// The result is a bit-pointer with the same value and a new packed offset.
bit_ptr: InternPool.Key.PtrType.PackedOffset,
/// The result is a standard pointer.
byte_ptr: struct {
/// The byte offset of the field pointer from the parent pointer value.
offset: u64,
/// The alignment of the field pointer type.
alignment: InternPool.Alignment,
},
} {
comptime assert(Type.packed_struct_layout_version == 2);
 
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
const field_ty = struct_ty.structFieldType(field_idx, zcu);
 
var bit_offset: u16 = 0;
var running_bits: u16 = 0;
for (0..struct_ty.structFieldCount(zcu)) |i| {
const f_ty = struct_ty.structFieldType(i, zcu);
if (i == field_idx) {
bit_offset = running_bits;
}
running_bits += @intCast(f_ty.bitSize(zcu));
}
 
const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0)
.{ parent_ptr_info.packed_offset.host_size, parent_ptr_info.packed_offset.bit_offset + bit_offset }
else
.{ (running_bits + 7) / 8, bit_offset };
 
// If the field happens to be byte-aligned, simplify the pointer type.
// We can only do this if the pointee's bit size matches its ABI byte size,
// so that loads and stores do not interfere with surrounding packed bits.
//
// TODO: we do not attempt this with big-endian targets yet because of nested
// structs and floats. I need to double-check the desired behavior for big endian
// targets before adding the necessary complications to this code. This will not
// cause miscompilations; it only means the field pointer uses bit masking when it
// might not be strictly necessary.
if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) {
const byte_offset = res_bit_offset / 8;
const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?));
return .{ .byte_ptr = .{
.offset = byte_offset,
.alignment = new_align,
} };
}
 
return .{ .bit_ptr = .{
.host_size = res_host_size,
.bit_offset = res_bit_offset,
} };
}
 
pub const @"u1": Type = .{ .ip_index = .u1_type };
pub const @"u8": Type = .{ .ip_index = .u8_type };
pub const @"u16": Type = .{ .ip_index = .u16_type };
 
test/behavior/bitcast.zig added: 4888, removed: 2579, total 2309
@@ -517,3 +517,61 @@ test "@bitCast of packed struct of bools all false" {
p.b3 = false;
try expect(@as(u8, @as(u4, @bitCast(p))) == 0);
}
 
test "@bitCast of packed struct containing pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
 
const S = struct {
const A = packed struct {
ptr: *const u32,
};
 
const B = packed struct {
ptr: *const i32,
};
 
fn doTheTest() !void {
const x: u32 = 123;
var a: A = undefined;
a = .{ .ptr = &x };
const b: B = @bitCast(a);
try expect(b.ptr.* == 123);
}
};
 
try S.doTheTest();
try comptime S.doTheTest();
}
 
test "@bitCast of extern struct containing pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
 
const S = struct {
const A = extern struct {
ptr: *const u32,
};
 
const B = extern struct {
ptr: *const i32,
};
 
fn doTheTest() !void {
const x: u32 = 123;
var a: A = undefined;
a = .{ .ptr = &x };
const b: B = @bitCast(a);
try expect(b.ptr.* == 123);
}
};
 
try S.doTheTest();
try comptime S.doTheTest();
}
 
test/behavior/cast_int.zig added: 4888, removed: 2579, total 2309
@@ -139,8 +139,8 @@ const Piece = packed struct {
color: Color,
type: Type,
 
const Type = enum { KING, QUEEN, BISHOP, KNIGHT, ROOK, PAWN };
const Color = enum { WHITE, BLACK };
const Type = enum(u3) { KING, QUEEN, BISHOP, KNIGHT, ROOK, PAWN };
const Color = enum(u1) { WHITE, BLACK };
 
fn charToPiece(c: u8) !@This() {
return .{
 
test/behavior/comptime_memory.zig added: 4888, removed: 2579, total 2309
@@ -32,32 +32,22 @@ test "type pun signed and unsigned as array pointer" {
}
 
test "type pun signed and unsigned as offset many pointer" {
if (true) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
 
comptime {
var x: u32 = 0;
var y = @as([*]i32, @ptrCast(&x));
var x: [11]u32 = undefined;
var y: [*]i32 = @ptrCast(&x[10]);
y -= 10;
y[10] = -1;
try testing.expectEqual(@as(u32, 0xFFFFFFFF), x);
try testing.expectEqual(@as(u32, 0xFFFFFFFF), x[10]);
}
}
 
test "type pun signed and unsigned as array pointer with pointer arithemtic" {
if (true) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
 
comptime {
var x: u32 = 0;
const y = @as([*]i32, @ptrCast(&x)) - 10;
var x: [11]u32 = undefined;
const y = @as([*]i32, @ptrCast(&x[10])) - 10;
const z: *[15]i32 = y[0..15];
z[10] = -1;
try testing.expectEqual(@as(u32, 0xFFFFFFFF), x);
try testing.expectEqual(@as(u32, 0xFFFFFFFF), x[10]);
}
}
 
@@ -171,10 +161,13 @@ fn doTypePunBitsTest(as_bits: *Bits) !void {
 
test "type pun bits" {
if (true) {
// TODO https://github.com/ziglang/zig/issues/9646
// TODO: currently, marking one bit of `Bits` as `undefined` does
// mark the whole value as `undefined`, since the pointer interpretation
// logic reads it back in as a `u32`, which is partially-undef and thus
// has value `undefined`. We need an improved comptime memory representation
// to make this work.
return error.SkipZigTest;
}
 
comptime {
var v: u32 = undefined;
try doTypePunBitsTest(@as(*Bits, @ptrCast(&v)));
@@ -296,11 +289,6 @@ test "dance on linker values" {
}
 
test "offset array ptr by element size" {
if (true) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
 
comptime {
const VirtualStruct = struct { x: u32 };
var arr: [4]VirtualStruct = .{
@@ -310,15 +298,10 @@ test "offset array ptr by element size" {
.{ .x = bigToNativeEndian(u32, 0x03070b0f) },
};
 
const address = @intFromPtr(&arr);
try testing.expectEqual(@intFromPtr(&arr[0]), address);
try testing.expectEqual(@intFromPtr(&arr[0]) + 10, address + 10);
try testing.expectEqual(@intFromPtr(&arr[1]), address + @sizeOf(VirtualStruct));
try testing.expectEqual(@intFromPtr(&arr[2]), address + 2 * @sizeOf(VirtualStruct));
try testing.expectEqual(@intFromPtr(&arr[3]), address + @sizeOf(VirtualStruct) * 3);
const buf: [*]align(@alignOf(VirtualStruct)) u8 = @ptrCast(&arr);
 
const secondElement = @as(*VirtualStruct, @ptrFromInt(@intFromPtr(&arr[0]) + 2 * @sizeOf(VirtualStruct)));
try testing.expectEqual(bigToNativeEndian(u32, 0x02060a0e), secondElement.x);
const second_element: *VirtualStruct = @ptrCast(buf + 2 * @sizeOf(VirtualStruct));
try testing.expectEqual(bigToNativeEndian(u32, 0x02060a0e), second_element.x);
}
}
 
@@ -364,7 +347,7 @@ test "offset field ptr by enclosing array element size" {
 
var i: usize = 0;
while (i < 4) : (i += 1) {
var ptr: [*]u8 = @as([*]u8, @ptrCast(&arr[0]));
var ptr: [*]u8 = @ptrCast(&arr[0]);
ptr += i;
ptr += @offsetOf(VirtualStruct, "x");
var j: usize = 0;
@@ -400,23 +383,18 @@ test "accessing reinterpreted memory of parent object" {
}
 
test "bitcast packed union to integer" {
if (true) {
// https://github.com/ziglang/zig/issues/19384
return error.SkipZigTest;
}
const U = packed union {
x: u1,
x: i2,
y: u2,
};
 
comptime {
const a = U{ .x = 1 };
const b = U{ .y = 2 };
const cast_a = @as(u2, @bitCast(a));
const cast_b = @as(u2, @bitCast(b));
const a: U = .{ .x = -1 };
const b: U = .{ .y = 2 };
const cast_a: u2 = @bitCast(a);
const cast_b: u2 = @bitCast(b);
 
// truncated because the upper bit is garbage memory that we don't care about
try testing.expectEqual(@as(u1, 1), @as(u1, @truncate(cast_a)));
try testing.expectEqual(@as(u2, 3), cast_a);
try testing.expectEqual(@as(u2, 2), cast_b);
}
}
 
test/behavior/error.zig added: 4888, removed: 2579, total 2309
@@ -1054,3 +1054,26 @@ test "errorCast from error sets to error unions" {
const err_union: Set1!void = @errorCast(error.A);
try expectError(error.A, err_union);
}
 
test "result location initialization of error union with OPV payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
const S = struct {
x: u0,
};
 
const a: anyerror!S = .{ .x = 0 };
comptime assert((a catch unreachable).x == 0);
 
comptime {
var b: anyerror!S = .{ .x = 0 };
_ = &b;
assert((b catch unreachable).x == 0);
}
 
var c: anyerror!S = .{ .x = 0 };
_ = &c;
try expectEqual(0, (c catch return error.TestFailed).x);
}
 
test/behavior/field_parent_ptr.zig added: 4888, removed: 2579, total 2309
@@ -1731,6 +1731,7 @@ test "@fieldParentPtr extern union" {
test "@fieldParentPtr packed union" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.target.cpu.arch.endian() == .big) return error.SkipZigTest; // TODO
 
const C = packed union {
a: bool,
 
test/behavior/optional.zig added: 4888, removed: 2579, total 2309
@@ -92,13 +92,11 @@ test "optional with zero-bit type" {
 
var two: ?struct { ZeroBit, ZeroBit } = undefined;
two = .{ with_runtime.zero_bit, with_runtime.zero_bit };
if (!@inComptime()) {
try expect(two != null);
try expect(two.?[0] == zero_bit);
try expect(two.?[0] == with_runtime.zero_bit);
try expect(two.?[1] == zero_bit);
try expect(two.?[1] == with_runtime.zero_bit);
}
try expect(two != null);
try expect(two.?[0] == zero_bit);
try expect(two.?[0] == with_runtime.zero_bit);
try expect(two.?[1] == zero_bit);
try expect(two.?[1] == with_runtime.zero_bit);
}
};
 
@@ -610,3 +608,27 @@ test "copied optional doesn't alias source" {
 
try expect(x[0] == 0.0);
}
 
test "result location initialization of optional with OPV payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
 
const S = struct {
x: u0,
};
 
const a: ?S = .{ .x = 0 };
comptime assert(a.?.x == 0);
 
comptime {
var b: ?S = .{ .x = 0 };
_ = &b;
assert(b.?.x == 0);
}
 
var c: ?S = .{ .x = 0 };
_ = &c;
try expectEqual(0, (c orelse return error.TestFailed).x);
}
 
test/behavior/packed-struct.zig added: 4888, removed: 2579, total 2309
@@ -1025,7 +1025,7 @@ test "modify nested packed struct aligned field" {
pretty_print: packed struct {
enabled: bool = false,
num_spaces: u4 = 4,
space_char: enum { space, tab } = .space,
space_char: enum(u1) { space, tab } = .space,
indent: u8 = 0,
} = .{},
baz: bool = false,
 
test/behavior/packed-union.zig added: 4888, removed: 2579, total 2309
@@ -1,5 +1,6 @@
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const expectEqual = std.testing.expectEqual;
 
test "flags in packed union" {
@@ -106,7 +107,7 @@ test "packed union in packed struct" {
 
fn testPackedUnionInPackedStruct() !void {
const ReadRequest = packed struct { key: i32 };
const RequestType = enum {
const RequestType = enum(u1) {
read,
insert,
};
@@ -169,3 +170,15 @@ test "assigning to non-active field at comptime" {
test_bits.bits = .{};
}
}
 
test "comptime packed union of pointers" {
const U = packed union {
a: *const u32,
b: *const [1]u32,
};
 
const x: u32 = 123;
const u: U = .{ .a = &x };
 
comptime assert(u.b[0] == 123);
}
 
test/behavior/pointers.zig added: 4888, removed: 2579, total 2309
@@ -621,3 +621,39 @@ test "cast pointers with zero sized elements" {
const d: []u8 = c;
_ = d;
}
 
test "comptime pointer equality through distinct fields with well-defined layout" {
const A = extern struct {
x: u32,
z: u16,
};
const B = extern struct {
x: u16,
y: u16,
z: u16,
};
 
const a: A = .{
.x = undefined,
.z = 123,
};
 
const ap: *const A = &a;
const bp: *const B = @ptrCast(ap);
 
comptime assert(&ap.z == &bp.z);
comptime assert(ap.z == 123);
comptime assert(bp.z == 123);
}
 
test "comptime pointer equality through distinct elements with well-defined layout" {
const buf: [2]u32 = .{ 123, 456 };
 
const ptr: *const [2]u32 = &buf;
const byte_ptr: *align(4) const [8]u8 = @ptrCast(ptr);
const second_elem: *const u32 = @ptrCast(byte_ptr[4..8]);
 
comptime assert(&buf[1] == second_elem);
comptime assert(buf[1] == 456);
comptime assert(second_elem.* == 456);
}
 
test/behavior/ptrcast.zig added: 4888, removed: 2579, total 2309
@@ -1,6 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const expect = std.testing.expect;
const assert = std.debug.assert;
const native_endian = builtin.target.cpu.arch.endian();
 
test "reinterpret bytes as integer with nonzero offset" {
@@ -277,7 +278,7 @@ test "@ptrCast undefined value at comptime" {
}
};
comptime {
const x = S.transmute([]u8, i32, undefined);
const x = S.transmute(u64, i32, undefined);
_ = x;
}
}
@@ -292,3 +293,60 @@ test "comptime @ptrCast with packed struct leaves value unmodified" {
try expect(p.*[0] == 6);
try expect(st.three == 6);
}
 
test "@ptrCast restructures comptime-only array" {
{
const a3a2: [3][2]comptime_int = .{
.{ 1, 2 },
.{ 3, 4 },
.{ 5, 6 },
};
const a2a3: *const [2][3]comptime_int = @ptrCast(&a3a2);
comptime assert(a2a3[0][0] == 1);
comptime assert(a2a3[0][1] == 2);
comptime assert(a2a3[0][2] == 3);
comptime assert(a2a3[1][0] == 4);
comptime assert(a2a3[1][1] == 5);
comptime assert(a2a3[1][2] == 6);
}
 
{
const a6a1: [6][1]comptime_int = .{
.{1}, .{2}, .{3}, .{4}, .{5}, .{6},
};
const a1a2a3: *const [1][2][3]comptime_int = @ptrCast(&a6a1);
comptime assert(a1a2a3[0][0][0] == 1);
comptime assert(a1a2a3[0][0][1] == 2);
comptime assert(a1a2a3[0][0][2] == 3);
comptime assert(a1a2a3[0][1][0] == 4);
comptime assert(a1a2a3[0][1][1] == 5);
comptime assert(a1a2a3[0][1][2] == 6);
}
 
{
const a1: [1]comptime_int = .{123};
const raw: *const comptime_int = @ptrCast(&a1);
comptime assert(raw.* == 123);
}
 
{
const raw: comptime_int = 123;
const a1: *const [1]comptime_int = @ptrCast(&raw);
comptime assert(a1[0] == 123);
}
}
 
test "@ptrCast restructures sliced comptime-only array" {
const a3a2: [4][2]comptime_int = .{
.{ 1, 2 },
.{ 3, 4 },
.{ 5, 6 },
.{ 7, 8 },
};
 
const sub: *const [4]comptime_int = @ptrCast(a3a2[1..]);
comptime assert(sub[0] == 3);
comptime assert(sub[1] == 4);
comptime assert(sub[2] == 5);
comptime assert(sub[3] == 6);
}
 
test/behavior/type.zig added: 4888, removed: 2579, total 2309
@@ -758,3 +758,24 @@ test "matching captures causes opaque equivalence" {
comptime assert(@TypeOf(a) == @TypeOf(b));
try testing.expect(a == b);
}
 
test "reify enum where fields refers to part of array" {
const fields: [3]std.builtin.Type.EnumField = .{
.{ .name = "foo", .value = 0 },
.{ .name = "bar", .value = 1 },
undefined,
};
const E = @Type(.{ .Enum = .{
.tag_type = u8,
.fields = fields[0..2],
.decls = &.{},
.is_exhaustive = true,
} });
var a: E = undefined;
var b: E = undefined;
a = .foo;
b = .bar;
try testing.expect(a == .foo);
try testing.expect(b == .bar);
try testing.expect(a != b);
}
 
test/behavior/union.zig added: 4888, removed: 2579, total 2309
@@ -1532,7 +1532,7 @@ test "reinterpreting enum value inside packed union" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
const U = packed union {
tag: enum { a, b },
tag: enum(u8) { a, b },
val: u8,
 
fn doTest() !void {
@@ -1850,9 +1850,8 @@ test "reinterpret packed union" {
 
{
// Union initialization
var u: U = .{
.qux = 0xe2a,
};
var u: U = .{ .baz = 0 }; // ensure all bits are defined
u.qux = 0xe2a;
try expectEqual(@as(u8, 0x2a), u.foo);
try expectEqual(@as(u12, 0xe2a), u.qux);
try expectEqual(@as(u29, 0xe2a), u.bar & 0xfff);
 
filename was Deleted added: 4888, removed: 2579, total 2309
@@ -0,0 +1,31 @@
//! The full test name would be:
//! struct field type resolution marks transitive error from bad usingnamespace in @typeInfo call from non-initial field type
//!
//! This test is rather esoteric. It's ensuring that errors triggered by `@typeInfo` analyzing
//! a bad `usingnamespace` correctly trigger transitive errors when analyzed by struct field type
//! resolution, meaning we don't incorrectly analyze code past the uses of `S`.
 
const S = struct {
ok: u32,
bad: @typeInfo(T),
};
 
const T = struct {
pub usingnamespace @compileError("usingnamespace analyzed");
};
 
comptime {
const a: S = .{ .ok = 123, .bad = undefined };
_ = a;
@compileError("should not be reached");
}
 
comptime {
const b: S = .{ .ok = 123, .bad = undefined };
_ = b;
@compileError("should not be reached");
}
 
// error
//
// :14:24: error: usingnamespace analyzed
 
filename was Deleted added: 4888, removed: 2579, total 2309
@@ -0,0 +1,22 @@
export fn entry1() void {
const S = extern struct { x: u32 };
_ = *align(1:2:8) S;
}
 
export fn entry2() void {
const S = struct { x: u32 };
_ = *align(1:2:@sizeOf(S) * 2) S;
}
 
export fn entry3() void {
const E = enum { implicit, backing, type };
_ = *align(1:2:8) E;
}
 
// error
//
// :3:23: error: bit-pointer cannot refer to value of type 'tmp.entry1.S'
// :3:23: note: only packed structs layout are allowed in packed types
// :8:36: error: bit-pointer cannot refer to value of type 'tmp.entry2.S'
// :8:36: note: only packed structs layout are allowed in packed types
// :13:23: error: bit-pointer cannot refer to value of type 'tmp.entry3.E'
 
filename was Deleted added: 4888, removed: 2579, total 2309
@@ -0,0 +1,12 @@
export fn entry1() void {
const x: i32 = undefined;
const y: u32 = @bitCast(x);
@compileLog(y);
}
 
// error
//
// :4:5: error: found compile log statement
//
// Compile Log Output:
// @as(u32, undefined)
 
test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig added: 4888, removed: 2579, total 2309
@@ -9,4 +9,4 @@ export fn entry() void {
// :2:5: error: found compile log statement
//
// Compile Log Output:
// @as(*const anyopaque, &tmp.entry)
// @as(*const anyopaque, @as(*const anyopaque, @ptrCast(tmp.entry)))
 
ev/null added: 4888, removed: 2579, total 2309
@@ -1,13 +0,0 @@
const MyStruct = struct { x: bool = false };
 
comptime {
const x = &[_]MyStruct{ .{}, .{} };
const y = x[0..1] ++ &[_]MyStruct{};
_ = y;
}
 
// error
// backend=stage2
// target=native
//
// :5:16: error: comptime dereference requires '[1]tmp.MyStruct' to have a well-defined layout, but it does not.
 
test/cases/compile_errors/dereferencing_invalid_payload_ptr_at_comptime.zig added: 4888, removed: 2579, total 2309
@@ -6,7 +6,7 @@ comptime {
 
const payload_ptr = &opt_ptr.?;
opt_ptr = null;
_ = payload_ptr.*.*;
_ = payload_ptr.*.*; // TODO: this case was regressed by #19630
}
comptime {
var opt: ?u8 = 15;
@@ -28,6 +28,5 @@ comptime {
// backend=stage2
// target=native
//
// :9:20: error: attempt to use null value
// :16:20: error: attempt to use null value
// :24:20: error: attempt to unwrap error: Foo
 
test/cases/compile_errors/function_call_assigned_to_incorrect_type.zig added: 4888, removed: 2579, total 2309
@@ -11,4 +11,5 @@ fn concat() [16]f32 {
// target=native
//
// :3:17: error: expected type '[4]f32', found '[16]f32'
// :3:17: note: array of length 16 cannot cast into an array of length 4
// :3:17: note: destination has length 4
// :3:17: note: source has length 16
 
test/cases/compile_errors/issue_7810-comptime_slice-len_increment_beyond_bounds.zig added: 4888, removed: 2579, total 2309
@@ -8,7 +8,5 @@ export fn foo_slice_len_increment_beyond_bounds() void {
}
 
// error
// backend=stage2
// target=native
//
// :6:16: error: comptime store of index 8 out of bounds of array length 8
// :6:16: error: dereference of '*u8' exceeds bounds of containing decl of type '[8]u8'
 
filename was Deleted added: 4888, removed: 2579, total 2309
@@ -0,0 +1,26 @@
comptime {
const a: @Vector(3, u8) = .{ 1, 200, undefined };
@compileLog(@addWithOverflow(a, a));
}
 
comptime {
const a: @Vector(3, u8) = .{ 1, 2, undefined };
const b: @Vector(3, u8) = .{ 0, 3, 10 };
@compileLog(@subWithOverflow(a, b));
}
 
comptime {
const a: @Vector(3, u8) = .{ 1, 200, undefined };
@compileLog(@mulWithOverflow(a, a));
}
 
// error
//
// :3:5: error: found compile log statement
// :9:5: note: also here
// :14:5: note: also here
//
// Compile Log Output:
// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 2, 144, undefined }, .{ 0, 1, undefined } })
// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 1, 255, undefined }, .{ 0, 1, undefined } })
// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 1, 64, undefined }, .{ 0, 1, undefined } })
 
test/cases/compile_errors/packed_struct_with_fields_of_not_allowed_types.zig added: 4888, removed: 2579, total 2309
@@ -30,7 +30,7 @@ export fn entry6() void {
}
export fn entry7() void {
_ = @sizeOf(packed struct {
x: enum { A, B },
x: enum(u1) { A, B },
});
}
export fn entry8() void {
@@ -70,6 +70,12 @@ export fn entry13() void {
x: *type,
});
}
export fn entry14() void {
const E = enum { implicit, backing, type };
_ = @sizeOf(packed struct {
x: E,
});
}
 
// error
// backend=llvm
@@ -97,3 +103,5 @@ export fn entry13() void {
// :70:12: error: packed structs cannot contain fields of type '*type'
// :70:12: note: comptime-only pointer has no guaranteed in-memory representation
// :70:12: note: types are not available at runtime
// :76:12: error: packed structs cannot contain fields of type 'tmp.entry14.E'
// :74:15: note: enum declared here
 
filename was Deleted added: 4888, removed: 2579, total 2309
@@ -0,0 +1,19 @@
export fn entry1() void {
const x: u32 = 123;
const ptr: [*]const u32 = @ptrCast(&x);
_ = ptr - 1;
}
 
export fn entry2() void {
const S = extern struct { x: u32, y: u32 };
const y: u32 = 123;
const parent_ptr: *const S = @fieldParentPtr("y", &y);
_ = parent_ptr;
}
 
// error
//
// :4:13: error: pointer computation here causes undefined behavior
// :4:13: note: resulting pointer exceeds bounds of containing value which may trigger overflow
// :10:55: error: pointer computation here causes undefined behavior
// :10:55: note: resulting pointer exceeds bounds of containing value which may trigger overflow
 
test/cases/compile_errors/reading_past_end_of_pointer_casted_array.zig added: 4888, removed: 2579, total 2309
@@ -5,9 +5,17 @@ comptime {
const deref = int_ptr.*;
_ = deref;
}
comptime {
const array: [4]u8 = "aoeu".*;
const sub_array = array[1..];
const int_ptr: *const u32 = @ptrCast(@alignCast(sub_array));
const deref = int_ptr.*;
_ = deref;
}
 
// error
// backend=stage2
// target=native
//
// :5:26: error: dereference of '*const u24' exceeds bounds of containing decl of type '[4]u8'
// :12:26: error: dereference of '*const u32' exceeds bounds of containing decl of type '[4]u8'
 
test/cases/compile_errors/slice_cannot_have_its_bytes_reinterpreted.zig added: 4888, removed: 2579, total 2309
@@ -7,4 +7,4 @@ export fn foo() void {
// backend=stage2
// target=native
//
// :3:49: error: comptime dereference requires '[]const u8' to have a well-defined layout, but it does not.
// :3:49: error: comptime dereference requires '[]const u8' to have a well-defined layout
 
test/cases/comptime_aggregate_print.zig added: 4888, removed: 2579, total 2309
@@ -31,5 +31,5 @@ pub fn main() !void {}
// :20:5: error: found compile log statement
//
// Compile Log Output:
// @as([]i32, &(comptime alloc).buf[0..2])
// @as([]i32, &(comptime alloc).buf[0..2])
// @as([]i32, @as([*]i32, @ptrCast(@as(tmp.UnionContainer, .{ .buf = .{ 1, 2 } }).buf[0]))[0..2])
// @as([]i32, @as([*]i32, @ptrCast(@as(tmp.StructContainer, .{ .buf = .{ 3, 4 } }).buf[0]))[0..2])