srctree

Jacob Young parent 6fd09f8d 5d745d94
x86_64: fix C abi for unions

Closes #19721

inlinesplit
src/InternPool.zig added: 399, removed: 398, total 1
@@ -1921,7 +1921,7 @@ pub const LoadedUnionType = struct {
return self.flagsPtr(ip).layout;
}
 
pub fn fieldAlign(self: LoadedUnionType, ip: *const InternPool, field_index: u32) Alignment {
pub fn fieldAlign(self: LoadedUnionType, ip: *const InternPool, field_index: usize) Alignment {
if (self.field_aligns.len == 0) return .none;
return self.field_aligns.get(ip)[field_index];
}
@@ -2087,41 +2087,41 @@ pub const LoadedStructType = struct {
 
/// Returns the already-existing field with the same name, if any.
pub fn addFieldName(
self: @This(),
self: LoadedStructType,
ip: *InternPool,
name: NullTerminatedString,
) ?u32 {
return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name);
}
 
pub fn fieldAlign(s: @This(), ip: *const InternPool, i: usize) Alignment {
pub fn fieldAlign(s: LoadedStructType, ip: *const InternPool, i: usize) Alignment {
if (s.field_aligns.len == 0) return .none;
return s.field_aligns.get(ip)[i];
}
 
pub fn fieldInit(s: @This(), ip: *const InternPool, i: usize) Index {
pub fn fieldInit(s: LoadedStructType, ip: *const InternPool, i: usize) Index {
if (s.field_inits.len == 0) return .none;
assert(s.haveFieldInits(ip));
return s.field_inits.get(ip)[i];
}
 
/// Returns `none` in the case the struct is a tuple.
pub fn fieldName(s: @This(), ip: *const InternPool, i: usize) OptionalNullTerminatedString {
pub fn fieldName(s: LoadedStructType, ip: *const InternPool, i: usize) OptionalNullTerminatedString {
if (s.field_names.len == 0) return .none;
return s.field_names.get(ip)[i].toOptional();
}
 
pub fn fieldIsComptime(s: @This(), ip: *const InternPool, i: usize) bool {
pub fn fieldIsComptime(s: LoadedStructType, ip: *const InternPool, i: usize) bool {
return s.comptime_bits.getBit(ip, i);
}
 
pub fn setFieldComptime(s: @This(), ip: *InternPool, i: usize) void {
pub fn setFieldComptime(s: LoadedStructType, ip: *InternPool, i: usize) void {
s.comptime_bits.setBit(ip, i);
}
 
/// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
/// complicated logic.
pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
pub fn knownNonOpv(s: LoadedStructType, ip: *InternPool) bool {
return switch (s.layout) {
.@"packed" => false,
.auto, .@"extern" => s.flagsPtr(ip).known_non_opv,
@@ -2130,7 +2130,7 @@ pub const LoadedStructType = struct {
 
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStruct.Flags {
pub fn flagsPtr(self: LoadedStructType, ip: *const InternPool) *Tag.TypeStruct.Flags {
assert(self.layout != .@"packed");
const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
@@ -2138,13 +2138,13 @@ pub const LoadedStructType = struct {
 
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts that the struct is packed.
pub fn packedFlagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStructPacked.Flags {
pub fn packedFlagsPtr(self: LoadedStructType, ip: *const InternPool) *Tag.TypeStructPacked.Flags {
assert(self.layout == .@"packed");
const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
}
 
pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
pub fn assumeRuntimeBitsIfFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool {
if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) {
@@ -2154,7 +2154,7 @@ pub const LoadedStructType = struct {
return false;
}
 
pub fn setTypesWip(s: @This(), ip: *InternPool) bool {
pub fn setTypesWip(s: LoadedStructType, ip: *InternPool) bool {
if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) return true;
@@ -2162,12 +2162,12 @@ pub const LoadedStructType = struct {
return false;
}
 
pub fn clearTypesWip(s: @This(), ip: *InternPool) void {
pub fn clearTypesWip(s: LoadedStructType, ip: *InternPool) void {
if (s.layout == .@"packed") return;
s.flagsPtr(ip).field_types_wip = false;
}
 
pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
pub fn setLayoutWip(s: LoadedStructType, ip: *InternPool) bool {
if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.layout_wip) return true;
@@ -2175,12 +2175,12 @@ pub const LoadedStructType = struct {
return false;
}
 
pub fn clearLayoutWip(s: @This(), ip: *InternPool) void {
pub fn clearLayoutWip(s: LoadedStructType, ip: *InternPool) void {
if (s.layout == .@"packed") return;
s.flagsPtr(ip).layout_wip = false;
}
 
pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool {
pub fn setAlignmentWip(s: LoadedStructType, ip: *InternPool) bool {
if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.alignment_wip) return true;
@@ -2188,12 +2188,12 @@ pub const LoadedStructType = struct {
return false;
}
 
pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void {
pub fn clearAlignmentWip(s: LoadedStructType, ip: *InternPool) void {
if (s.layout == .@"packed") return;
s.flagsPtr(ip).alignment_wip = false;
}
 
pub fn setInitsWip(s: @This(), ip: *InternPool) bool {
pub fn setInitsWip(s: LoadedStructType, ip: *InternPool) bool {
switch (s.layout) {
.@"packed" => {
const flag = &s.packedFlagsPtr(ip).field_inits_wip;
@@ -2210,14 +2210,14 @@ pub const LoadedStructType = struct {
}
}
 
pub fn clearInitsWip(s: @This(), ip: *InternPool) void {
pub fn clearInitsWip(s: LoadedStructType, ip: *InternPool) void {
switch (s.layout) {
.@"packed" => s.packedFlagsPtr(ip).field_inits_wip = false,
.auto, .@"extern" => s.flagsPtr(ip).field_inits_wip = false,
}
}
 
pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
pub fn setFullyResolved(s: LoadedStructType, ip: *InternPool) bool {
if (s.layout == .@"packed") return true;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.fully_resolved) return true;
@@ -2225,13 +2225,13 @@ pub const LoadedStructType = struct {
return false;
}
 
pub fn clearFullyResolved(s: @This(), ip: *InternPool) void {
pub fn clearFullyResolved(s: LoadedStructType, ip: *InternPool) void {
s.flagsPtr(ip).fully_resolved = false;
}
 
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn size(self: @This(), ip: *InternPool) *u32 {
pub fn size(self: LoadedStructType, ip: *InternPool) *u32 {
assert(self.layout != .@"packed");
const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
@@ -2241,50 +2241,50 @@ pub const LoadedStructType = struct {
/// this type or the user specifies it, it is stored here. This will be
/// set to `none` until the layout is resolved.
/// Asserts the struct is packed.
pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
pub fn backingIntType(s: LoadedStructType, ip: *const InternPool) *Index {
assert(s.layout == .@"packed");
const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
}
 
/// Asserts the struct is not packed.
pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
pub fn setZirIndex(s: LoadedStructType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
assert(s.layout != .@"packed");
const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
}
 
pub fn haveFieldTypes(s: @This(), ip: *const InternPool) bool {
pub fn haveFieldTypes(s: LoadedStructType, ip: *const InternPool) bool {
const types = s.field_types.get(ip);
return types.len == 0 or types[0] != .none;
}
 
pub fn haveFieldInits(s: @This(), ip: *const InternPool) bool {
pub fn haveFieldInits(s: LoadedStructType, ip: *const InternPool) bool {
return switch (s.layout) {
.@"packed" => s.packedFlagsPtr(ip).inits_resolved,
.auto, .@"extern" => s.flagsPtr(ip).inits_resolved,
};
}
 
pub fn setHaveFieldInits(s: @This(), ip: *InternPool) void {
pub fn setHaveFieldInits(s: LoadedStructType, ip: *InternPool) void {
switch (s.layout) {
.@"packed" => s.packedFlagsPtr(ip).inits_resolved = true,
.auto, .@"extern" => s.flagsPtr(ip).inits_resolved = true,
}
}
 
pub fn haveLayout(s: @This(), ip: *InternPool) bool {
pub fn haveLayout(s: LoadedStructType, ip: *InternPool) bool {
return switch (s.layout) {
.@"packed" => s.backingIntType(ip).* != .none,
.auto, .@"extern" => s.flagsPtr(ip).layout_resolved,
};
}
 
pub fn isTuple(s: @This(), ip: *InternPool) bool {
pub fn isTuple(s: LoadedStructType, ip: *InternPool) bool {
return s.layout != .@"packed" and s.flagsPtr(ip).is_tuple;
}
 
pub fn hasReorderedFields(s: @This()) bool {
pub fn hasReorderedFields(s: LoadedStructType) bool {
return s.layout == .auto;
}
 
@@ -2318,7 +2318,7 @@ pub const LoadedStructType = struct {
/// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
/// May or may not include zero-bit fields.
/// Asserts the struct is not packed.
pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
pub fn iterateRuntimeOrder(s: LoadedStructType, ip: *InternPool) RuntimeOrderIterator {
assert(s.layout != .@"packed");
return .{
.ip = ip,
@@ -2358,7 +2358,7 @@ pub const LoadedStructType = struct {
}
};
 
pub fn iterateRuntimeOrderReverse(s: @This(), ip: *InternPool) ReverseRuntimeOrderIterator {
pub fn iterateRuntimeOrderReverse(s: LoadedStructType, ip: *InternPool) ReverseRuntimeOrderIterator {
assert(s.layout != .@"packed");
return .{
.ip = ip,
 
src/Module.zig added: 399, removed: 398, total 1
@@ -6140,18 +6140,18 @@ pub const UnionLayout = struct {
padding: u32,
};
 
pub fn getUnionLayout(mod: *Module, u: InternPool.LoadedUnionType) UnionLayout {
pub fn getUnionLayout(mod: *Module, loaded_union: InternPool.LoadedUnionType) UnionLayout {
const ip = &mod.intern_pool;
assert(u.haveLayout(ip));
assert(loaded_union.haveLayout(ip));
var most_aligned_field: u32 = undefined;
var most_aligned_field_size: u64 = undefined;
var biggest_field: u32 = undefined;
var payload_size: u64 = 0;
var payload_align: Alignment = .@"1";
for (u.field_types.get(ip), 0..) |field_ty, i| {
for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
 
const explicit_align = u.fieldAlign(ip, @intCast(i));
const explicit_align = loaded_union.fieldAlign(ip, field_index);
const field_align = if (explicit_align != .none)
explicit_align
else
@@ -6159,16 +6159,16 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.LoadedUnionType) UnionLayout {
const field_size = Type.fromInterned(field_ty).abiSize(mod);
if (field_size > payload_size) {
payload_size = field_size;
biggest_field = @intCast(i);
biggest_field = @intCast(field_index);
}
if (field_align.compare(.gte, payload_align)) {
payload_align = field_align;
most_aligned_field = @intCast(i);
most_aligned_field = @intCast(field_index);
most_aligned_field_size = field_size;
}
}
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
if (!have_tag or !Type.fromInterned(u.enum_tag_ty).hasRuntimeBits(mod)) {
const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(mod)) {
return .{
.abi_size = payload_align.forward(payload_size),
.abi_align = payload_align,
@@ -6183,10 +6183,10 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.LoadedUnionType) UnionLayout {
};
}
 
const tag_size = Type.fromInterned(u.enum_tag_ty).abiSize(mod);
const tag_align = Type.fromInterned(u.enum_tag_ty).abiAlignment(mod).max(.@"1");
const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(mod);
const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(mod).max(.@"1");
return .{
.abi_size = u.size(ip).*,
.abi_size = loaded_union.size(ip).*,
.abi_align = tag_align.max(payload_align),
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
@@ -6195,24 +6195,24 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.LoadedUnionType) UnionLayout {
.payload_align = payload_align,
.tag_align = tag_align,
.tag_size = tag_size,
.padding = u.padding(ip).*,
.padding = loaded_union.padding(ip).*,
};
}
 
pub fn unionAbiSize(mod: *Module, u: InternPool.LoadedUnionType) u64 {
return mod.getUnionLayout(u).abi_size;
pub fn unionAbiSize(mod: *Module, loaded_union: InternPool.LoadedUnionType) u64 {
return mod.getUnionLayout(loaded_union).abi_size;
}
 
/// Returns 0 if the union is represented with 0 bits at runtime.
pub fn unionAbiAlignment(mod: *Module, u: InternPool.LoadedUnionType) Alignment {
pub fn unionAbiAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType) Alignment {
const ip = &mod.intern_pool;
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
var max_align: Alignment = .none;
if (have_tag) max_align = Type.fromInterned(u.enum_tag_ty).abiAlignment(mod);
for (u.field_types.get(ip), 0..) |field_ty, field_index| {
if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(mod);
for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
 
const field_align = mod.unionFieldNormalAlignment(u, @intCast(field_index));
const field_align = mod.unionFieldNormalAlignment(loaded_union, @intCast(field_index));
max_align = max_align.max(field_align);
}
return max_align;
@@ -6221,20 +6221,20 @@ pub fn unionAbiAlignment(mod: *Module, u: InternPool.LoadedUnionType) Alignment
/// Returns the field alignment, assuming the union is not packed.
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
/// Prefer to call that function instead of this one during Sema.
pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.LoadedUnionType, field_index: u32) Alignment {
pub fn unionFieldNormalAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment {
const ip = &mod.intern_pool;
const field_align = u.fieldAlign(ip, field_index);
const field_align = loaded_union.fieldAlign(ip, field_index);
if (field_align != .none) return field_align;
const field_ty = Type.fromInterned(u.field_types.get(ip)[field_index]);
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
return field_ty.abiAlignment(mod);
}
 
/// Returns the index of the active field, given the current tag value
pub fn unionTagFieldIndex(mod: *Module, u: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
pub fn unionTagFieldIndex(mod: *Module, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
const ip = &mod.intern_pool;
if (enum_tag.toIntern() == .none) return null;
assert(ip.typeOf(enum_tag.toIntern()) == u.enum_tag_ty);
return u.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
assert(ip.typeOf(enum_tag.toIntern()) == loaded_union.enum_tag_ty);
return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
}
 
/// Returns the field alignment of a non-packed struct in byte units.
 
src/Sema.zig added: 399, removed: 398, total 1
@@ -35405,7 +35405,7 @@ pub fn resolveUnionAlignment(
const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
 
const explicit_align = union_type.fieldAlign(ip, @intCast(field_index));
const explicit_align = union_type.fieldAlign(ip, field_index);
const field_align = if (explicit_align != .none)
explicit_align
else
@@ -35465,7 +35465,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
else => return err,
});
 
const explicit_align = union_type.fieldAlign(ip, @intCast(field_index));
const explicit_align = union_type.fieldAlign(ip, field_index);
const field_align = if (explicit_align != .none)
explicit_align
else
 
src/arch/x86_64/CodeGen.zig added: 399, removed: 398, total 1
@@ -14316,7 +14316,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
.mmx => {},
.sse => switch (ty.zigTypeTag(mod)) {
else => {
const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, .other), .none);
const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .other), .none);
assert(std.mem.indexOfNone(abi.Class, classes, &.{
.integer, .sse, .memory, .float, .float_combine,
}) == null);
@@ -18450,7 +18450,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
const overflow_arg_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 8 } };
const reg_save_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 16 } };
 
const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, mod, .arg), .none);
const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, mod, self.target.*, .arg), .none);
switch (classes[0]) {
.integer => {
assert(classes.len == 1);
@@ -18800,7 +18800,7 @@ fn resolveCallingConventionValues(
var ret_tracking_i: usize = 0;
 
const classes = switch (resolved_cc) {
.SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, .ret), .none),
.SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, self.target.*, .ret), .none),
.Win64 => &.{abi.classifyWindows(ret_ty, mod)},
else => unreachable,
};
@@ -18875,7 +18875,7 @@ fn resolveCallingConventionValues(
var arg_mcv_i: usize = 0;
 
const classes = switch (resolved_cc) {
.SysV => mem.sliceTo(&abi.classifySystemV(ty, mod, .arg), .none),
.SysV => mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .arg), .none),
.Win64 => &.{abi.classifyWindows(ty, mod)},
else => unreachable,
};
@@ -19090,7 +19090,7 @@ fn memSize(self: *Self, ty: Type) Memory.Size {
 
fn splitType(self: *Self, ty: Type) ![2]Type {
const mod = self.bin_file.comp.module.?;
const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, .other), .none);
const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .other), .none);
var parts: [2]Type = undefined;
if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| {
part.* = switch (class) {
 
src/arch/x86_64/abi.zig added: 399, removed: 398, total 1
@@ -11,6 +11,37 @@ pub const Class = enum {
float,
float_combine,
integer_per_element,
 
fn isX87(class: Class) bool {
return switch (class) {
.x87, .x87up, .complex_x87 => true,
else => false,
};
}
 
/// Combine a field class with the prev one.
fn combineSystemV(prev_class: Class, next_class: Class) Class {
// "If both classes are equal, this is the resulting class."
if (prev_class == next_class)
return if (prev_class == .float) .float_combine else prev_class;
 
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (prev_class == .none) return next_class;
 
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (prev_class == .memory or next_class == .memory) return .memory;
 
// "If one of the classes is INTEGER, the result is the INTEGER."
if (prev_class == .integer or next_class == .integer) return .integer;
 
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (prev_class.isX87() or next_class.isX87()) return .memory;
 
// "Otherwise class SSE is used."
return .sse;
}
};
 
pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
@@ -69,9 +100,7 @@ pub const Context = enum { ret, arg, field, other };
 
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
pub fn classifySystemV(ty: Type, zcu: *Zcu, ctx: Context) [8]Class {
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8]Class {
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
@@ -231,121 +260,30 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, ctx: Context) [8]Class {
}
return memory_class;
},
.Struct => {
.Struct, .Union => {
// "If the size of an object is larger than eight eightbytes, or
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const loaded_struct = ip.loadStructType(ty.toIntern());
const ty_size = ty.abiSize(zcu);
if (loaded_struct.layout == .@"packed") {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
return result;
switch (ty.containerLayout(zcu)) {
.auto, .@"extern" => {},
.@"packed" => {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
return result;
},
}
if (ty_size > 64)
return memory_class;
 
var byte_offset: u64 = 0;
classifySystemVStruct(&result, &byte_offset, loaded_struct, zcu);
 
// Post-merger cleanup
 
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
for (result, 0..) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
else => continue,
};
// "If the size of the aggregate exceeds two eightbytes and the first eight-
// byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole argument
// is passed in memory."
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
 
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
for (&result, 0..) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
};
}
return result;
},
.Union => {
// "If the size of an object is larger than eight eightbytes, or
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const union_obj = zcu.typeToUnion(ty).?;
const ty_size = zcu.unionAbiSize(union_obj);
if (union_obj.getLayout(ip) == .@"packed") {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
return result;
}
if (ty_size > 64)
return memory_class;
 
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
const field_align = union_obj.fieldAlign(ip, @intCast(field_index));
if (field_align != .none and
field_align.compare(.lt, Type.fromInterned(field_ty).abiAlignment(zcu)))
{
return memory_class;
}
// Combine this field with the previous one.
const field_class = classifySystemV(Type.fromInterned(field_ty), zcu, .field);
for (&result, 0..) |*result_item, i| {
const field_item = field_class[i];
// "If both classes are equal, this is the resulting class."
if (result_item.* == field_item) {
continue;
}
 
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (result_item.* == .none) {
result_item.* = field_item;
continue;
}
if (field_item == .none) {
continue;
}
 
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (result_item.* == .memory or field_item == .memory) {
result_item.* = .memory;
continue;
}
 
// "If one of the classes is INTEGER, the result is the INTEGER."
if (result_item.* == .integer or field_item == .integer) {
result_item.* = .integer;
continue;
}
 
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (result_item.* == .x87 or
result_item.* == .x87up or
result_item.* == .complex_x87 or
field_item == .x87 or
field_item == .x87up or
field_item == .complex_x87)
{
result_item.* = .memory;
continue;
}
 
// "Otherwise class SSE is used."
result_item.* = .sse;
}
}
_ = if (zcu.typeToStruct(ty)) |loaded_struct|
classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
else if (zcu.typeToUnion(ty)) |loaded_union|
classifySystemVUnion(&result, 0, loaded_union, zcu, target)
else
unreachable;
 
// Post-merger cleanup
 
@@ -391,78 +329,85 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, ctx: Context) [8]Class {
 
fn classifySystemVStruct(
result: *[8]Class,
byte_offset: *u64,
starting_byte_offset: u64,
loaded_struct: InternPool.LoadedStructType,
zcu: *Zcu,
) void {
target: std.Target,
) u64 {
const ip = &zcu.intern_pool;
var byte_offset = starting_byte_offset;
var field_it = loaded_struct.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
const field_align = loaded_struct.fieldAlign(ip, field_index);
byte_offset.* = std.mem.alignForward(
byte_offset = std.mem.alignForward(
u64,
byte_offset.*,
byte_offset,
field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?,
);
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
if (field_loaded_struct.layout != .@"packed") {
classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu);
continue;
switch (field_loaded_struct.layout) {
.auto, .@"extern" => {
byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu, target);
continue;
},
.@"packed" => {},
}
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, zcu, target);
continue;
},
.@"packed" => {},
}
}
const field_class = std.mem.sliceTo(&classifySystemV(field_ty, zcu, .field), .none);
const field_size = field_ty.abiSize(zcu);
combine: {
// Combine this field with the previous one.
const result_class = &result[@intCast(byte_offset.* / 8)];
// "If both classes are equal, this is the resulting class."
if (result_class.* == field_class[0]) {
if (result_class.* == .float) {
result_class.* = .float_combine;
}
break :combine;
}
 
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (result_class.* == .none) {
result_class.* = field_class[0];
break :combine;
}
assert(field_class[0] != .none);
 
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (result_class.* == .memory or field_class[0] == .memory) {
result_class.* = .memory;
break :combine;
}
 
// "If one of the classes is INTEGER, the result is the INTEGER."
if (result_class.* == .integer or field_class[0] == .integer) {
result_class.* = .integer;
break :combine;
}
 
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (result_class.* == .x87 or
result_class.* == .x87up or
result_class.* == .complex_x87 or
field_class[0] == .x87 or
field_class[0] == .x87up or
field_class[0] == .complex_x87)
{
result_class.* = .memory;
break :combine;
}
 
// "Otherwise class SSE is used."
result_class.* = .sse;
}
@memcpy(result[@intCast(byte_offset.* / 8 + 1)..][0 .. field_class.len - 1], field_class[1..]);
byte_offset.* += field_size;
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
byte_offset += field_ty.abiSize(zcu);
}
const final_byte_offset = starting_byte_offset + loaded_struct.size(ip).*;
std.debug.assert(final_byte_offset == std.mem.alignForward(
u64,
byte_offset,
loaded_struct.flagsPtr(ip).alignment.toByteUnits().?,
));
return final_byte_offset;
}
 
fn classifySystemVUnion(
result: *[8]Class,
starting_byte_offset: u64,
loaded_union: InternPool.LoadedUnionType,
zcu: *Zcu,
target: std.Target,
) u64 {
const ip = &zcu.intern_pool;
for (0..loaded_union.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
switch (field_loaded_struct.layout) {
.auto, .@"extern" => {
_ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, zcu, target);
continue;
},
.@"packed" => {},
}
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
_ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, zcu, target);
continue;
},
.@"packed" => {},
}
}
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
}
return starting_byte_offset + loaded_union.size(ip).*;
}
 
pub const SysV = struct {
 
src/codegen/c/Type.zig added: 399, removed: 398, total 1
@@ -1858,7 +1858,7 @@ pub const Pool = struct {
loaded_tag.names.get(ip)[field_index].toSlice(ip),
);
const field_alignas = AlignAs.fromAlignment(.{
.@"align" = loaded_union.fieldAlign(ip, @intCast(field_index)),
.@"align" = loaded_union.fieldAlign(ip, field_index),
.abi = field_type.abiAlignment(zcu),
});
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
 
src/codegen/llvm.zig added: 399, removed: 398, total 1
@@ -1384,7 +1384,7 @@ pub const Object = struct {
const namespace = zcu.namespacePtr(decl.src_namespace);
const owner_mod = namespace.file_scope.mod;
const fn_info = zcu.typeToFunc(decl.typeOf(zcu)).?;
const target = zcu.getTarget();
const target = owner_mod.resolved_target.result;
const ip = &zcu.intern_pool;
 
var dg: DeclGen = .{
@@ -1456,7 +1456,7 @@ pub const Object = struct {
var llvm_arg_i: u32 = 0;
 
// This gets the LLVM values from the function and stores them in `dg.args`.
const sret = firstParamSRet(fn_info, zcu);
const sret = firstParamSRet(fn_info, zcu, target);
const ret_ptr: Builder.Value = if (sret) param: {
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
@@ -2755,7 +2755,7 @@ pub const Object = struct {
 
// Return type goes first.
if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(mod)) {
const sret = firstParamSRet(fn_info, mod);
const sret = firstParamSRet(fn_info, mod, target);
const ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type);
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ret_ty));
 
@@ -2881,7 +2881,7 @@ pub const Object = struct {
assert(decl.has_tv);
const fn_info = zcu.typeToFunc(zig_fn_type).?;
const target = owner_mod.resolved_target.result;
const sret = firstParamSRet(fn_info, zcu);
const sret = firstParamSRet(fn_info, zcu, target);
 
const is_extern = decl.isExtern(zcu);
const function_index = try o.builder.addFunction(
@@ -3604,7 +3604,7 @@ pub const Object = struct {
var llvm_params = std.ArrayListUnmanaged(Builder.Type){};
defer llvm_params.deinit(o.gpa);
 
if (firstParamSRet(fn_info, mod)) {
if (firstParamSRet(fn_info, mod, target)) {
try llvm_params.append(o.gpa, .ptr);
}
 
@@ -5130,7 +5130,7 @@ pub const FuncGen = struct {
const return_type = Type.fromInterned(fn_info.return_type);
const llvm_fn = try self.resolveInst(pl_op.operand);
const target = mod.getTarget();
const sret = firstParamSRet(fn_info, mod);
const sret = firstParamSRet(fn_info, mod, target);
 
var llvm_args = std.ArrayList(Builder.Value).init(self.gpa);
defer llvm_args.deinit();
@@ -10865,38 +10865,38 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ
};
}
 
fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool {
fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Target) bool {
const return_type = Type.fromInterned(fn_info.return_type);
if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) return false;
if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false;
 
const target = mod.getTarget();
switch (fn_info.cc) {
.Unspecified, .Inline => return isByRef(return_type, mod),
return switch (fn_info.cc) {
.Unspecified, .Inline => isByRef(return_type, zcu),
.C => switch (target.cpu.arch) {
.mips, .mipsel => return false,
.mips, .mipsel => false,
.x86 => isByRef(return_type, zcu),
.x86_64 => switch (target.os.tag) {
.windows => return x86_64_abi.classifyWindows(return_type, mod) == .memory,
else => return firstParamSRetSystemV(return_type, mod),
.windows => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
else => firstParamSRetSystemV(return_type, zcu, target),
},
.wasm32 => return wasm_c_abi.classifyType(return_type, mod)[0] == .indirect,
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(return_type, mod) == .memory,
.arm, .armeb => switch (arm_c_abi.classifyType(return_type, mod, .ret)) {
.memory, .i64_array => return true,
.i32_array => |size| return size != 1,
.byval => return false,
.wasm32 => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect,
.aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, zcu) == .memory,
.arm, .armeb => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
.memory, .i64_array => true,
.i32_array => |size| size != 1,
.byval => false,
},
.riscv32, .riscv64 => return riscv_c_abi.classifyType(return_type, mod) == .memory,
else => return false, // TODO investigate C ABI for other architectures
.riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, zcu) == .memory,
else => false, // TODO investigate C ABI for other architectures
},
.SysV => return firstParamSRetSystemV(return_type, mod),
.Win64 => return x86_64_abi.classifyWindows(return_type, mod) == .memory,
.Stdcall => return !isScalar(mod, return_type),
else => return false,
}
.SysV => firstParamSRetSystemV(return_type, zcu, target),
.Win64 => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
.Stdcall => !isScalar(zcu, return_type),
else => false,
};
}
 
fn firstParamSRetSystemV(ty: Type, mod: *Module) bool {
const class = x86_64_abi.classifySystemV(ty, mod, .ret);
fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool {
const class = x86_64_abi.classifySystemV(ty, zcu, target, .ret);
if (class[0] == .memory) return true;
if (class[0] == .x87 and class[2] != .none) return true;
return false;
@@ -10922,6 +10922,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
.C => {
switch (target.cpu.arch) {
.mips, .mipsel => return o.lowerType(return_type),
.x86 => return if (isByRef(return_type, mod)) .void else o.lowerType(return_type),
.x86_64 => switch (target.os.tag) {
.windows => return lowerWin64FnRetTy(o, fn_info),
else => return lowerSystemVFnRetTy(o, fn_info),
@@ -11014,7 +11015,8 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
if (isScalar(mod, return_type)) {
return o.lowerType(return_type);
}
const classes = x86_64_abi.classifySystemV(return_type, mod, .ret);
const target = mod.getTarget();
const classes = x86_64_abi.classifySystemV(return_type, mod, target, .ret);
if (classes[0] == .memory) return .void;
var types_index: u32 = 0;
var types_buffer: [8]Builder.Type = undefined;
@@ -11098,8 +11100,8 @@ const ParamTypeIterator = struct {
 
pub fn next(it: *ParamTypeIterator) Allocator.Error!?Lowering {
if (it.zig_index >= it.fn_info.param_types.len) return null;
const mod = it.object.module;
const ip = &mod.intern_pool;
const zcu = it.object.module;
const ip = &zcu.intern_pool;
const ty = it.fn_info.param_types.get(ip)[it.zig_index];
it.byval_attr = false;
return nextInner(it, Type.fromInterned(ty));
@@ -11107,8 +11109,8 @@ const ParamTypeIterator = struct {
 
/// `airCall` uses this instead of `next` so that it can take into account variadic functions.
pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) Allocator.Error!?Lowering {
const mod = it.object.module;
const ip = &mod.intern_pool;
const zcu = it.object.module;
const ip = &zcu.intern_pool;
if (it.zig_index >= it.fn_info.param_types.len) {
if (it.zig_index >= args.len) {
return null;
@@ -11121,10 +11123,10 @@ const ParamTypeIterator = struct {
}
 
fn nextInner(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
const mod = it.object.module;
const target = mod.getTarget();
const zcu = it.object.module;
const target = zcu.getTarget();
 
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
it.zig_index += 1;
return .no_bits;
}
@@ -11132,12 +11134,12 @@ const ParamTypeIterator = struct {
.Unspecified, .Inline => {
it.zig_index += 1;
it.llvm_index += 1;
if (ty.isSlice(mod) or
(ty.zigTypeTag(mod) == .Optional and ty.optionalChild(mod).isSlice(mod) and !ty.ptrAllowsZero(mod)))
if (ty.isSlice(zcu) or
(ty.zigTypeTag(zcu) == .Optional and ty.optionalChild(zcu).isSlice(zcu) and !ty.ptrAllowsZero(zcu)))
{
it.llvm_index += 1;
return .slice;
} else if (isByRef(ty, mod)) {
} else if (isByRef(ty, zcu)) {
return .byref;
} else {
return .byval;
@@ -11146,87 +11148,85 @@ const ParamTypeIterator = struct {
.Async => {
@panic("TODO implement async function lowering in the LLVM backend");
},
.C => {
switch (target.cpu.arch) {
.mips, .mipsel => {
it.zig_index += 1;
it.llvm_index += 1;
.C => switch (target.cpu.arch) {
.mips, .mipsel => {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
},
.x86_64 => switch (target.os.tag) {
.windows => return it.nextWin64(ty),
else => return it.nextSystemV(ty),
},
.wasm32 => {
it.zig_index += 1;
it.llvm_index += 1;
if (isScalar(zcu, ty)) {
return .byval;
},
.x86_64 => switch (target.os.tag) {
.windows => return it.nextWin64(ty),
else => return it.nextSystemV(ty),
},
.wasm32 => {
it.zig_index += 1;
it.llvm_index += 1;
if (isScalar(mod, ty)) {
return .byval;
}
const classes = wasm_c_abi.classifyType(ty, mod);
if (classes[0] == .indirect) {
}
const classes = wasm_c_abi.classifyType(ty, zcu);
if (classes[0] == .indirect) {
return .byref;
}
return .abi_sized_int;
},
.aarch64, .aarch64_be => {
it.zig_index += 1;
it.llvm_index += 1;
switch (aarch64_c_abi.classifyType(ty, zcu)) {
.memory => return .byref_mut,
.float_array => |len| return Lowering{ .float_array = len },
.byval => return .byval,
.integer => {
it.types_len = 1;
it.types_buffer[0] = .i64;
return .multiple_llvm_types;
},
.double_integer => return Lowering{ .i64_array = 2 },
}
},
.arm, .armeb => {
it.zig_index += 1;
it.llvm_index += 1;
switch (arm_c_abi.classifyType(ty, zcu, .arg)) {
.memory => {
it.byval_attr = true;
return .byref;
}
return .abi_sized_int;
},
.aarch64, .aarch64_be => {
it.zig_index += 1;
it.llvm_index += 1;
switch (aarch64_c_abi.classifyType(ty, mod)) {
.memory => return .byref_mut,
.float_array => |len| return Lowering{ .float_array = len },
.byval => return .byval,
.integer => {
it.types_len = 1;
it.types_buffer[0] = .i64;
return .multiple_llvm_types;
},
.double_integer => return Lowering{ .i64_array = 2 },
}
},
.arm, .armeb => {
it.zig_index += 1;
it.llvm_index += 1;
switch (arm_c_abi.classifyType(ty, mod, .arg)) {
.memory => {
it.byval_attr = true;
return .byref;
},
.byval => return .byval,
.i32_array => |size| return Lowering{ .i32_array = size },
.i64_array => |size| return Lowering{ .i64_array = size },
}
},
.riscv32, .riscv64 => {
it.zig_index += 1;
it.llvm_index += 1;
if (ty.toIntern() == .f16_type and
!std.Target.riscv.featureSetHas(target.cpu.features, .d)) return .as_u16;
switch (riscv_c_abi.classifyType(ty, mod)) {
.memory => return .byref_mut,
.byval => return .byval,
.integer => return .abi_sized_int,
.double_integer => return Lowering{ .i64_array = 2 },
.fields => {
it.types_len = 0;
for (0..ty.structFieldCount(mod)) |field_index| {
const field_ty = ty.structFieldType(field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
it.types_len += 1;
}
it.llvm_index += it.types_len - 1;
return .multiple_llvm_types;
},
}
},
// TODO investigate C ABI for other architectures
else => {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
},
}
},
.byval => return .byval,
.i32_array => |size| return Lowering{ .i32_array = size },
.i64_array => |size| return Lowering{ .i64_array = size },
}
},
.riscv32, .riscv64 => {
it.zig_index += 1;
it.llvm_index += 1;
if (ty.toIntern() == .f16_type and
!std.Target.riscv.featureSetHas(target.cpu.features, .d)) return .as_u16;
switch (riscv_c_abi.classifyType(ty, zcu)) {
.memory => return .byref_mut,
.byval => return .byval,
.integer => return .abi_sized_int,
.double_integer => return Lowering{ .i64_array = 2 },
.fields => {
it.types_len = 0;
for (0..ty.structFieldCount(zcu)) |field_index| {
const field_ty = ty.structFieldType(field_index, zcu);
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
it.types_len += 1;
}
it.llvm_index += it.types_len - 1;
return .multiple_llvm_types;
},
}
},
// TODO investigate C ABI for other architectures
else => {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
},
},
.Win64 => return it.nextWin64(ty),
.SysV => return it.nextSystemV(ty),
@@ -11234,7 +11234,7 @@ const ParamTypeIterator = struct {
it.zig_index += 1;
it.llvm_index += 1;
 
if (isScalar(mod, ty)) {
if (isScalar(zcu, ty)) {
return .byval;
} else {
it.byval_attr = true;
@@ -11250,10 +11250,10 @@ const ParamTypeIterator = struct {
}
 
fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering {
const mod = it.object.module;
switch (x86_64_abi.classifyWindows(ty, mod)) {
const zcu = it.object.module;
switch (x86_64_abi.classifyWindows(ty, zcu)) {
.integer => {
if (isScalar(mod, ty)) {
if (isScalar(zcu, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
@@ -11283,16 +11283,17 @@ const ParamTypeIterator = struct {
}
 
fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
const mod = it.object.module;
const ip = &mod.intern_pool;
const classes = x86_64_abi.classifySystemV(ty, mod, .arg);
const zcu = it.object.module;
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
const classes = x86_64_abi.classifySystemV(ty, zcu, target, .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
it.byval_attr = true;
return .byref;
}
if (isScalar(mod, ty)) {
if (isScalar(zcu, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
 
test/c_abi/cfuncs.c added: 399, removed: 398, total 1
@@ -269,6 +269,33 @@ void c_struct_f32_f32f32(struct Struct_f32_f32f32 s) {
assert_or_panic(s.b.d == 3.0f);
}
 
struct Struct_u32_Union_u32_u32u32 {
uint32_t a;
union {
struct {
uint32_t d, e;
} c;
} b;
};
 
struct Struct_u32_Union_u32_u32u32 zig_ret_struct_u32_union_u32_u32u32(void);
 
void zig_struct_u32_union_u32_u32u32(struct Struct_u32_Union_u32_u32u32);
 
struct Struct_u32_Union_u32_u32u32 c_ret_struct_u32_union_u32_u32u32(void) {
struct Struct_u32_Union_u32_u32u32 s;
s.a = 1;
s.b.c.d = 2;
s.b.c.e = 3;
return s;
}
 
void c_struct_u32_union_u32_u32u32(struct Struct_u32_Union_u32_u32u32 s) {
assert_or_panic(s.a == 1);
assert_or_panic(s.b.c.d == 2);
assert_or_panic(s.b.c.e == 3);
}
 
struct BigStruct {
uint64_t a;
uint64_t b;
@@ -2664,6 +2691,16 @@ void run_c_tests(void) {
}
#endif
 
#if !defined(__powerpc__)
{
struct Struct_u32_Union_u32_u32u32 s = zig_ret_struct_u32_union_u32_u32u32();
assert_or_panic(s.a == 1);
assert_or_panic(s.b.c.d == 2);
assert_or_panic(s.b.c.e == 3);
zig_struct_u32_union_u32_u32u32(s);
}
#endif
 
{
struct BigStruct s = {1, 2, 3, 4, 5};
zig_big_struct(s);
@@ -2678,7 +2715,7 @@ void run_c_tests(void) {
}
#endif
 
#if !defined __i386__ && !defined __arm__ && !defined __aarch64__ && \
#if !defined __arm__ && !defined __aarch64__ && \
!defined __mips__ && !defined __powerpc__ && !defined ZIG_RISCV64
{
struct MedStructInts s = {1, 2, 3};
 
test/c_abi/main.zig added: 399, removed: 398, total 1
@@ -10,11 +10,11 @@ const builtin = @import("builtin");
const print = std.debug.print;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const has_i128 = builtin.cpu.arch != .x86 and !builtin.cpu.arch.isARM() and
const have_i128 = builtin.cpu.arch != .x86 and !builtin.cpu.arch.isARM() and
!builtin.cpu.arch.isMIPS() and !builtin.cpu.arch.isPPC();
 
const has_f128 = builtin.cpu.arch.isX86() and !builtin.os.tag.isDarwin();
const has_f80 = builtin.cpu.arch.isX86();
const have_f128 = builtin.cpu.arch.isX86() and !builtin.os.tag.isDarwin();
const have_f80 = builtin.cpu.arch.isX86();
 
extern fn run_c_tests() void;
 
@@ -53,13 +53,13 @@ test "C ABI integers" {
c_u16(0xfffe);
c_u32(0xfffffffd);
c_u64(0xfffffffffffffffc);
if (has_i128) c_struct_u128(.{ .value = 0xfffffffffffffffc });
if (have_i128) c_struct_u128(.{ .value = 0xfffffffffffffffc });
 
c_i8(-1);
c_i16(-2);
c_i32(-3);
c_i64(-4);
if (has_i128) c_struct_i128(.{ .value = -6 });
if (have_i128) c_struct_i128(.{ .value = -6 });
c_five_integers(12, 34, 56, 78, 90);
}
 
@@ -186,7 +186,6 @@ const complex_abi_compatible = builtin.cpu.arch != .x86 and !builtin.cpu.arch.is
 
test "C ABI complex float" {
if (!complex_abi_compatible) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .x86_64) return error.SkipZigTest; // See https://github.com/ziglang/zig/issues/8465
 
const a = ComplexFloat{ .real = 1.25, .imag = 2.6 };
const b = ComplexFloat{ .real = 11.3, .imag = -1.5 };
@@ -401,6 +400,42 @@ test "C ABI struct f32 {f32,f32}" {
c_struct_f32_f32f32(.{ .a = 1.0, .b = .{ .c = 2.0, .d = 3.0 } });
}
 
const Struct_u32_Union_u32_u32u32 = extern struct {
a: u32,
b: extern union {
c: extern struct {
d: u32,
e: u32,
},
},
};
 
export fn zig_ret_struct_u32_union_u32_u32u32() Struct_u32_Union_u32_u32u32 {
return .{ .a = 1, .b = .{ .c = .{ .d = 2, .e = 3 } } };
}
 
export fn zig_struct_u32_union_u32_u32u32(s: Struct_u32_Union_u32_u32u32) void {
expect(s.a == 1) catch @panic("test failure");
expect(s.b.c.d == 2) catch @panic("test failure");
expect(s.b.c.e == 3) catch @panic("test failure");
}
 
extern fn c_ret_struct_u32_union_u32_u32u32() Struct_u32_Union_u32_u32u32;
 
extern fn c_struct_u32_union_u32_u32u32(Struct_u32_Union_u32_u32u32) void;
 
test "C ABI struct{u32,union{u32,struct{u32,u32}}}" {
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
 
const s = c_ret_struct_u32_union_u32_u32u32();
try expect(s.a == 1);
try expect(s.b.c.d == 2);
try expect(s.b.c.e == 3);
c_struct_u32_union_u32_u32u32(.{ .a = 1, .b = .{ .c = .{ .d = 2, .e = 3 } } });
}
 
const BigStruct = extern struct {
a: u64,
b: u64,
@@ -470,7 +505,6 @@ extern fn c_med_struct_mixed(MedStructMixed) void;
extern fn c_ret_med_struct_mixed() MedStructMixed;
 
test "C ABI medium struct of ints and floats" {
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@@ -538,7 +572,6 @@ extern fn c_med_struct_ints(MedStructInts) void;
extern fn c_ret_med_struct_ints() MedStructInts;
 
test "C ABI medium struct of ints" {
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@@ -600,7 +633,7 @@ export fn zig_big_packed_struct(x: BigPackedStruct) void {
}
 
test "C ABI big packed struct" {
if (!has_i128) return error.SkipZigTest;
if (!have_i128) return error.SkipZigTest;
 
const s = BigPackedStruct{ .a = 1, .b = 2 };
c_big_packed_struct(s);
@@ -943,7 +976,6 @@ extern fn c_float_array_struct(FloatArrayStruct) void;
extern fn c_ret_float_array_struct() FloatArrayStruct;
 
test "Float array like struct" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
 
@@ -5318,7 +5350,6 @@ extern fn c_ptr_size_float_struct(Vector2) void;
extern fn c_ret_ptr_size_float_struct() Vector2;
 
test "C ABI pointer sized float struct" {
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
@@ -5348,7 +5379,6 @@ test "DC: Zig passes to C" {
try expectOk(c_assert_DC(.{ .v1 = -0.25, .v2 = 15 }));
}
test "DC: Zig returns to C" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
@@ -5363,7 +5393,6 @@ test "DC: C passes to Zig" {
try expectOk(c_send_DC());
}
test "DC: C returns to Zig" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
@@ -5397,7 +5426,6 @@ test "CFF: Zig passes to C" {
try expectOk(c_assert_CFF(.{ .v1 = 39, .v2 = 0.875, .v3 = 1.0 }));
}
test "CFF: Zig returns to C" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@@ -5414,7 +5442,6 @@ test "CFF: C passes to Zig" {
try expectOk(c_send_CFF());
}
test "CFF: C returns to Zig" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch == .aarch64 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isRISCV() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
@@ -5442,28 +5469,24 @@ pub export fn zig_ret_CFF() CFF {
const PD = extern struct { v1: ?*anyopaque, v2: f64 };
 
test "PD: Zig passes to C" {
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
try expectOk(c_assert_PD(.{ .v1 = null, .v2 = 0.5 }));
}
test "PD: Zig returns to C" {
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
try expectOk(c_assert_ret_PD());
}
test "PD: C passes to Zig" {
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
try expectOk(c_send_PD());
}
test "PD: C returns to Zig" {
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@@ -5519,7 +5542,6 @@ const ByVal = extern struct {
 
extern fn c_func_ptr_byval(*anyopaque, *anyopaque, ByVal, c_ulong, *anyopaque, c_ulong) void;
test "C function that takes byval struct called via function pointer" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
 
@@ -5551,7 +5573,6 @@ const f16_struct = extern struct {
};
extern fn c_f16_struct(f16_struct) f16_struct;
test "f16 struct" {
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.target.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.target.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.target.cpu.arch.isPPC()) return error.SkipZigTest;
@@ -5563,7 +5584,7 @@ test "f16 struct" {
 
extern fn c_f80(f80) f80;
test "f80 bare" {
if (!has_f80) return error.SkipZigTest;
if (!have_f80) return error.SkipZigTest;
 
const a = c_f80(12.34);
try expect(@as(f64, @floatCast(a)) == 56.78);
@@ -5574,9 +5595,7 @@ const f80_struct = extern struct {
};
extern fn c_f80_struct(f80_struct) f80_struct;
test "f80 struct" {
if (!has_f80) return error.SkipZigTest;
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.mode != .Debug) return error.SkipZigTest;
if (!have_f80) return error.SkipZigTest;
 
const a = c_f80_struct(.{ .a = 12.34 });
try expect(@as(f64, @floatCast(a.a)) == 56.78);
@@ -5588,8 +5607,7 @@ const f80_extra_struct = extern struct {
};
extern fn c_f80_extra_struct(f80_extra_struct) f80_extra_struct;
test "f80 extra struct" {
if (!has_f80) return error.SkipZigTest;
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (!have_f80) return error.SkipZigTest;
 
const a = c_f80_extra_struct(.{ .a = 12.34, .b = 42 });
try expect(@as(f64, @floatCast(a.a)) == 56.78);
@@ -5598,7 +5616,7 @@ test "f80 extra struct" {
 
extern fn c_f128(f128) f128;
test "f128 bare" {
if (!has_f128) return error.SkipZigTest;
if (!have_f128) return error.SkipZigTest;
 
const a = c_f128(12.34);
try expect(@as(f64, @floatCast(a)) == 56.78);
@@ -5609,7 +5627,7 @@ const f128_struct = extern struct {
};
extern fn c_f128_struct(f128_struct) f128_struct;
test "f128 struct" {
if (!has_f128) return error.SkipZigTest;
if (!have_f128) return error.SkipZigTest;
 
const a = c_f128_struct(.{ .a = 12.34 });
try expect(@as(f64, @floatCast(a.a)) == 56.78);