srctree

Andrew Kelley parent 598db831 78902db6
stage2: fix comptime `@bitCast`

Before, Sema for comptime @bitCast would return the same Value butchange the Type. This gave invalid results because, for example, aninteger Value when the Type is a float would be interpreted numerically,but @bitCast needs it to reinterpret how they would be stored inmemory.

This requires a mechanism to serialize a Value to a byte buffer anddeserialize a Value from a byte buffer.

Not done yet, but needs to happen: comptime dereferencing a pointerto a Decl needs to perform a comptime bitcast on the loaded value.Currently the value is silently wrong in the same way that @bitCastwas silently wrong before this commit.

The logic in Value for handling readFromMemory for large integers isonly correct for small integers. It needs to be fleshed out for properbig integers.

As part of this change:

  • * std.math.big.Int: initial implementations of readTwosComplement and
  • writeTwosComplement. They only support bit_count <= 128 so far and
  • panic otherwise.
  • * compiler-rt: move the compareXf2 exports over to the stage2 section.
  • Even with the improvements in this commit, I'm still seeing test
  • failures in the widening behavior tests; more investigation is
  • needed.

inlinesplit
lib/std/math/big/int.zig added: 460, removed: 277, total 183
@@ -10,6 +10,8 @@ const mem = std.mem;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
const assert = std.debug.assert;
const Endian = std.builtin.Endian;
const Signedness = std.builtin.Signedness;
 
const debug_safety = false;
 
@@ -328,7 +330,7 @@ pub const Mutable = struct {
pub fn setTwosCompIntLimit(
r: *Mutable,
limit: TwosCompIntLimit,
signedness: std.builtin.Signedness,
signedness: Signedness,
bit_count: usize,
) void {
// Handle zero-bit types.
@@ -457,7 +459,7 @@ pub const Mutable = struct {
///
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
/// r is `calcTwosCompLimbCount(bit_count)`.
pub fn addWrap(r: *Mutable, a: Const, b: Const, signedness: std.builtin.Signedness, bit_count: usize) void {
pub fn addWrap(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) void {
const req_limbs = calcTwosCompLimbCount(bit_count);
 
// Slice of the upper bits if they exist, these will be ignored and allows us to use addCarry to determine
@@ -493,7 +495,7 @@ pub const Mutable = struct {
///
/// Assets the result fits in `r`. Upper bound on the number of limbs needed by
/// r is `calcTwosCompLimbCount(bit_count)`.
pub fn addSat(r: *Mutable, a: Const, b: Const, signedness: std.builtin.Signedness, bit_count: usize) void {
pub fn addSat(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) void {
const req_limbs = calcTwosCompLimbCount(bit_count);
 
// Slice of the upper bits if they exist, these will be ignored and allows us to use addCarry to determine
@@ -595,7 +597,7 @@ pub const Mutable = struct {
/// r, a and b may be aliases
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
/// r is `calcTwosCompLimbCount(bit_count)`.
pub fn subWrap(r: *Mutable, a: Const, b: Const, signedness: std.builtin.Signedness, bit_count: usize) void {
pub fn subWrap(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) void {
r.addWrap(a, b.negate(), signedness, bit_count);
}
 
@@ -604,7 +606,7 @@ pub const Mutable = struct {
///
/// Assets the result fits in `r`. Upper bound on the number of limbs needed by
/// r is `calcTwosCompLimbCount(bit_count)`.
pub fn subSat(r: *Mutable, a: Const, b: Const, signedness: std.builtin.Signedness, bit_count: usize) void {
pub fn subSat(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) void {
r.addSat(a, b.negate(), signedness, bit_count);
}
 
@@ -680,7 +682,7 @@ pub const Mutable = struct {
rma: *Mutable,
a: Const,
b: Const,
signedness: std.builtin.Signedness,
signedness: Signedness,
bit_count: usize,
limbs_buffer: []Limb,
allocator: ?*Allocator,
@@ -721,7 +723,7 @@ pub const Mutable = struct {
rma: *Mutable,
a: Const,
b: Const,
signedness: std.builtin.Signedness,
signedness: Signedness,
bit_count: usize,
allocator: ?*Allocator,
) void {
@@ -1284,7 +1286,7 @@ pub const Mutable = struct {
///
/// Asserts `r` has enough storage to store the result.
/// The upper bound is `calcTwosCompLimbCount(a.len)`.
pub fn truncate(r: *Mutable, a: Const, signedness: std.builtin.Signedness, bit_count: usize) void {
pub fn truncate(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void {
const req_limbs = calcTwosCompLimbCount(bit_count);
 
// Handle 0-bit integers.
@@ -1369,12 +1371,47 @@ pub const Mutable = struct {
///
/// Asserts `r` has enough storage to store the result.
/// The upper bound is `calcTwosCompLimbCount(a.len)`.
pub fn saturate(r: *Mutable, a: Const, signedness: std.builtin.Signedness, bit_count: usize) void {
pub fn saturate(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void {
if (!a.fitsInTwosComp(signedness, bit_count)) {
r.setTwosCompIntLimit(if (r.positive) .max else .min, signedness, bit_count);
}
}
 
pub fn readTwosComplement(
x: *Mutable,
buffer: []const u8,
bit_count: usize,
endian: Endian,
signedness: Signedness,
) void {
if (bit_count == 0) {
x.limbs[0] = 0;
x.len = 1;
x.positive = true;
return;
}
// zig fmt: off
switch (signedness) {
.signed => {
if (bit_count <= 8) return x.set(mem.readInt( i8, buffer[0.. 1], endian));
if (bit_count <= 16) return x.set(mem.readInt( i16, buffer[0.. 2], endian));
if (bit_count <= 32) return x.set(mem.readInt( i32, buffer[0.. 4], endian));
if (bit_count <= 64) return x.set(mem.readInt( i64, buffer[0.. 8], endian));
if (bit_count <= 128) return x.set(mem.readInt(i128, buffer[0..16], endian));
},
.unsigned => {
if (bit_count <= 8) return x.set(mem.readInt( u8, buffer[0.. 1], endian));
if (bit_count <= 16) return x.set(mem.readInt( u16, buffer[0.. 2], endian));
if (bit_count <= 32) return x.set(mem.readInt( u32, buffer[0.. 4], endian));
if (bit_count <= 64) return x.set(mem.readInt( u64, buffer[0.. 8], endian));
if (bit_count <= 128) return x.set(mem.readInt(u128, buffer[0..16], endian));
},
}
// zig fmt: on
 
@panic("TODO implement std lib big int readTwosComplement");
}
 
/// Normalize a possible sequence of leading zeros.
///
/// [1, 2, 3, 4, 0] -> [1, 2, 3, 4]
@@ -1485,7 +1522,7 @@ pub const Const = struct {
return bits;
}
 
pub fn fitsInTwosComp(self: Const, signedness: std.builtin.Signedness, bit_count: usize) bool {
pub fn fitsInTwosComp(self: Const, signedness: Signedness, bit_count: usize) bool {
if (self.eqZero()) {
return true;
}
@@ -1731,6 +1768,29 @@ pub const Const = struct {
return s.len;
}
 
/// Asserts that `buffer` and `bit_count` are large enough to store the value.
pub fn writeTwosComplement(x: Const, buffer: []u8, bit_count: usize, endian: Endian) void {
if (bit_count == 0) return;
 
// zig fmt: off
if (x.positive) {
if (bit_count <= 8) return mem.writeInt( u8, buffer[0.. 1], x.to( u8) catch unreachable, endian);
if (bit_count <= 16) return mem.writeInt( u16, buffer[0.. 2], x.to( u16) catch unreachable, endian);
if (bit_count <= 32) return mem.writeInt( u32, buffer[0.. 4], x.to( u32) catch unreachable, endian);
if (bit_count <= 64) return mem.writeInt( u64, buffer[0.. 8], x.to( u64) catch unreachable, endian);
if (bit_count <= 128) return mem.writeInt(u128, buffer[0..16], x.to(u128) catch unreachable, endian);
} else {
if (bit_count <= 8) return mem.writeInt( i8, buffer[0.. 1], x.to( i8) catch unreachable, endian);
if (bit_count <= 16) return mem.writeInt( i16, buffer[0.. 2], x.to( i16) catch unreachable, endian);
if (bit_count <= 32) return mem.writeInt( i32, buffer[0.. 4], x.to( i32) catch unreachable, endian);
if (bit_count <= 64) return mem.writeInt( i64, buffer[0.. 8], x.to( i64) catch unreachable, endian);
if (bit_count <= 128) return mem.writeInt(i128, buffer[0..16], x.to(i128) catch unreachable, endian);
}
// zig fmt: on
 
@panic("TODO implement std lib big int writeTwosComplement for larger than 128 bits");
}
 
/// Returns `math.Order.lt`, `math.Order.eq`, `math.Order.gt` if
/// `|a| < |b|`, `|a| == |b|`, or `|a| > |b|` respectively.
pub fn orderAbs(a: Const, b: Const) math.Order {
@@ -1992,7 +2052,7 @@ pub const Managed = struct {
return self.toConst().bitCountTwosComp();
}
 
pub fn fitsInTwosComp(self: Managed, signedness: std.builtin.Signedness, bit_count: usize) bool {
pub fn fitsInTwosComp(self: Managed, signedness: Signedness, bit_count: usize) bool {
return self.toConst().fitsInTwosComp(signedness, bit_count);
}
 
@@ -2051,7 +2111,7 @@ pub const Managed = struct {
pub fn setTwosCompIntLimit(
r: *Managed,
limit: TwosCompIntLimit,
signedness: std.builtin.Signedness,
signedness: Signedness,
bit_count: usize,
) !void {
try r.ensureCapacity(calcTwosCompLimbCount(bit_count));
@@ -2164,7 +2224,7 @@ pub const Managed = struct {
/// `r.ensureTwosCompCapacity` prior to calling `add`.
///
/// Returns an error if memory could not be allocated.
pub fn addWrap(r: *Managed, a: Const, b: Const, signedness: std.builtin.Signedness, bit_count: usize) Allocator.Error!void {
pub fn addWrap(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!void {
try r.ensureTwosCompCapacity(bit_count);
var m = r.toMutable();
m.addWrap(a, b, signedness, bit_count);
@@ -2177,7 +2237,7 @@ pub const Managed = struct {
/// `r.ensureTwosCompCapacity` prior to calling `add`.
///
/// Returns an error if memory could not be allocated.
pub fn addSat(r: *Managed, a: Const, b: Const, signedness: std.builtin.Signedness, bit_count: usize) Allocator.Error!void {
pub fn addSat(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!void {
try r.ensureTwosCompCapacity(bit_count);
var m = r.toMutable();
m.addSat(a, b, signedness, bit_count);
@@ -2202,7 +2262,7 @@ pub const Managed = struct {
/// `r.ensureTwosCompCapacity` prior to calling `add`.
///
/// Returns an error if memory could not be allocated.
pub fn subWrap(r: *Managed, a: Const, b: Const, signedness: std.builtin.Signedness, bit_count: usize) Allocator.Error!void {
pub fn subWrap(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!void {
try r.ensureTwosCompCapacity(bit_count);
var m = r.toMutable();
m.subWrap(a, b, signedness, bit_count);
@@ -2215,7 +2275,7 @@ pub const Managed = struct {
/// `r.ensureTwosCompCapacity` prior to calling `add`.
///
/// Returns an error if memory could not be allocated.
pub fn subSat(r: *Managed, a: Const, b: Const, signedness: std.builtin.Signedness, bit_count: usize) Allocator.Error!void {
pub fn subSat(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!void {
try r.ensureTwosCompCapacity(bit_count);
var m = r.toMutable();
m.subSat(a, b, signedness, bit_count);
@@ -2259,7 +2319,7 @@ pub const Managed = struct {
/// Returns an error if memory could not be allocated.
///
/// rma's allocator is used for temporary storage to speed up the multiplication.
pub fn mulWrap(rma: *Managed, a: Const, b: Const, signedness: std.builtin.Signedness, bit_count: usize) !void {
pub fn mulWrap(rma: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) !void {
var alias_count: usize = 0;
if (rma.limbs.ptr == a.limbs.ptr)
alias_count += 1;
@@ -2445,7 +2505,7 @@ pub const Managed = struct {
}
 
/// r = truncate(Int(signedness, bit_count), a)
pub fn truncate(r: *Managed, a: Const, signedness: std.builtin.Signedness, bit_count: usize) !void {
pub fn truncate(r: *Managed, a: Const, signedness: Signedness, bit_count: usize) !void {
try r.ensureCapacity(calcTwosCompLimbCount(bit_count));
var m = r.toMutable();
m.truncate(a, signedness, bit_count);
@@ -2453,7 +2513,7 @@ pub const Managed = struct {
}
 
/// r = saturate(Int(signedness, bit_count), a)
pub fn saturate(r: *Managed, a: Const, signedness: std.builtin.Signedness, bit_count: usize) !void {
pub fn saturate(r: *Managed, a: Const, signedness: Signedness, bit_count: usize) !void {
try r.ensureCapacity(calcTwosCompLimbCount(bit_count));
var m = r.toMutable();
m.saturate(a, signedness, bit_count);
 
lib/std/special/compiler_rt.zig added: 460, removed: 277, total 183
@@ -28,6 +28,52 @@ comptime {
const __extendhftf2 = @import("compiler_rt/extendXfYf2.zig").__extendhftf2;
@export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = linkage });
 
const __lesf2 = @import("compiler_rt/compareXf2.zig").__lesf2;
@export(__lesf2, .{ .name = "__lesf2", .linkage = linkage });
const __ledf2 = @import("compiler_rt/compareXf2.zig").__ledf2;
@export(__ledf2, .{ .name = "__ledf2", .linkage = linkage });
const __letf2 = @import("compiler_rt/compareXf2.zig").__letf2;
@export(__letf2, .{ .name = "__letf2", .linkage = linkage });
 
const __gesf2 = @import("compiler_rt/compareXf2.zig").__gesf2;
@export(__gesf2, .{ .name = "__gesf2", .linkage = linkage });
const __gedf2 = @import("compiler_rt/compareXf2.zig").__gedf2;
@export(__gedf2, .{ .name = "__gedf2", .linkage = linkage });
const __getf2 = @import("compiler_rt/compareXf2.zig").__getf2;
@export(__getf2, .{ .name = "__getf2", .linkage = linkage });
 
if (!is_test) {
@export(__lesf2, .{ .name = "__cmpsf2", .linkage = linkage });
@export(__ledf2, .{ .name = "__cmpdf2", .linkage = linkage });
@export(__letf2, .{ .name = "__cmptf2", .linkage = linkage });
 
const __eqsf2 = @import("compiler_rt/compareXf2.zig").__eqsf2;
@export(__eqsf2, .{ .name = "__eqsf2", .linkage = linkage });
const __eqdf2 = @import("compiler_rt/compareXf2.zig").__eqdf2;
@export(__eqdf2, .{ .name = "__eqdf2", .linkage = linkage });
@export(__letf2, .{ .name = "__eqtf2", .linkage = linkage });
 
const __ltsf2 = @import("compiler_rt/compareXf2.zig").__ltsf2;
@export(__ltsf2, .{ .name = "__ltsf2", .linkage = linkage });
const __ltdf2 = @import("compiler_rt/compareXf2.zig").__ltdf2;
@export(__ltdf2, .{ .name = "__ltdf2", .linkage = linkage });
@export(__letf2, .{ .name = "__lttf2", .linkage = linkage });
 
const __nesf2 = @import("compiler_rt/compareXf2.zig").__nesf2;
@export(__nesf2, .{ .name = "__nesf2", .linkage = linkage });
const __nedf2 = @import("compiler_rt/compareXf2.zig").__nedf2;
@export(__nedf2, .{ .name = "__nedf2", .linkage = linkage });
@export(__letf2, .{ .name = "__netf2", .linkage = linkage });
 
const __gtsf2 = @import("compiler_rt/compareXf2.zig").__gtsf2;
@export(__gtsf2, .{ .name = "__gtsf2", .linkage = linkage });
const __gtdf2 = @import("compiler_rt/compareXf2.zig").__gtdf2;
@export(__gtdf2, .{ .name = "__gtdf2", .linkage = linkage });
@export(__getf2, .{ .name = "__gttf2", .linkage = linkage });
 
@export(__extendhfsf2, .{ .name = "__gnu_h2f_ieee", .linkage = linkage });
}
 
if (!builtin.zig_is_stage2) {
switch (arch) {
.i386,
@@ -46,59 +92,6 @@ comptime {
// __clear_cache manages its own logic about whether to be exported or not.
_ = @import("compiler_rt/clear_cache.zig").clear_cache;
 
const __lesf2 = @import("compiler_rt/compareXf2.zig").__lesf2;
@export(__lesf2, .{ .name = "__lesf2", .linkage = linkage });
const __ledf2 = @import("compiler_rt/compareXf2.zig").__ledf2;
@export(__ledf2, .{ .name = "__ledf2", .linkage = linkage });
const __letf2 = @import("compiler_rt/compareXf2.zig").__letf2;
@export(__letf2, .{ .name = "__letf2", .linkage = linkage });
 
const __gesf2 = @import("compiler_rt/compareXf2.zig").__gesf2;
@export(__gesf2, .{ .name = "__gesf2", .linkage = linkage });
const __gedf2 = @import("compiler_rt/compareXf2.zig").__gedf2;
@export(__gedf2, .{ .name = "__gedf2", .linkage = linkage });
const __getf2 = @import("compiler_rt/compareXf2.zig").__getf2;
@export(__getf2, .{ .name = "__getf2", .linkage = linkage });
 
if (!is_test) {
@export(__lesf2, .{ .name = "__cmpsf2", .linkage = linkage });
@export(__ledf2, .{ .name = "__cmpdf2", .linkage = linkage });
@export(__letf2, .{ .name = "__cmptf2", .linkage = linkage });
 
const __eqsf2 = @import("compiler_rt/compareXf2.zig").__eqsf2;
@export(__eqsf2, .{ .name = "__eqsf2", .linkage = linkage });
const __eqdf2 = @import("compiler_rt/compareXf2.zig").__eqdf2;
@export(__eqdf2, .{ .name = "__eqdf2", .linkage = linkage });
@export(__letf2, .{ .name = "__eqtf2", .linkage = linkage });
 
const __ltsf2 = @import("compiler_rt/compareXf2.zig").__ltsf2;
@export(__ltsf2, .{ .name = "__ltsf2", .linkage = linkage });
const __ltdf2 = @import("compiler_rt/compareXf2.zig").__ltdf2;
@export(__ltdf2, .{ .name = "__ltdf2", .linkage = linkage });
@export(__letf2, .{ .name = "__lttf2", .linkage = linkage });
 
const __nesf2 = @import("compiler_rt/compareXf2.zig").__nesf2;
@export(__nesf2, .{ .name = "__nesf2", .linkage = linkage });
const __nedf2 = @import("compiler_rt/compareXf2.zig").__nedf2;
@export(__nedf2, .{ .name = "__nedf2", .linkage = linkage });
@export(__letf2, .{ .name = "__netf2", .linkage = linkage });
 
const __gtsf2 = @import("compiler_rt/compareXf2.zig").__gtsf2;
@export(__gtsf2, .{ .name = "__gtsf2", .linkage = linkage });
const __gtdf2 = @import("compiler_rt/compareXf2.zig").__gtdf2;
@export(__gtdf2, .{ .name = "__gtdf2", .linkage = linkage });
@export(__getf2, .{ .name = "__gttf2", .linkage = linkage });
 
@export(@import("compiler_rt/extendXfYf2.zig").__extendhfsf2, .{
.name = "__gnu_h2f_ieee",
.linkage = linkage,
});
@export(@import("compiler_rt/truncXfYf2.zig").__truncsfhf2, .{
.name = "__gnu_f2h_ieee",
.linkage = linkage,
});
}
 
const __unordsf2 = @import("compiler_rt/compareXf2.zig").__unordsf2;
@export(__unordsf2, .{ .name = "__unordsf2", .linkage = linkage });
const __unorddf2 = @import("compiler_rt/compareXf2.zig").__unorddf2;
@@ -189,6 +182,9 @@ comptime {
 
const __truncsfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncsfhf2;
@export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = linkage });
if (!is_test) {
@export(__truncsfhf2, .{ .name = "__gnu_f2h_ieee", .linkage = linkage });
}
const __truncdfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfhf2;
@export(__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = linkage });
const __trunctfhf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfhf2;
 
lib/std/special/compiler_rt/compareXf2.zig added: 460, removed: 277, total 183
@@ -21,7 +21,7 @@ const GE = enum(i32) {
const Unordered: GE = .Less;
};
 
pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
pub inline fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
@setRuntimeSafety(builtin.is_test);
 
const bits = @typeInfo(T).Float.bits;
@@ -32,7 +32,8 @@ pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
const exponentBits = std.math.floatExponentBits(T);
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
const absMask = signBit - 1;
const infRep = @bitCast(rep_t, std.math.inf(T));
const infT = std.math.inf(T);
const infRep = @bitCast(rep_t, infT);
 
const aInt = @bitCast(srep_t, a);
const bInt = @bitCast(srep_t, b);
@@ -46,20 +47,18 @@ pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
if ((aAbs | bAbs) == 0) return .Equal;
 
// If at least one of a and b is positive, we get the same result comparing
// a and b as signed integers as we would with a fp_ting-point compare.
// a and b as signed integers as we would with a floating-point compare.
if ((aInt & bInt) >= 0) {
if (aInt < bInt) {
return .Less;
} else if (aInt == bInt) {
return .Equal;
} else return .Greater;
}
 
// Otherwise, both are negative, so we need to flip the sense of the
// comparison to get the correct result. (This assumes a twos- or ones-
// complement integer representation; if integers are represented in a
// sign-magnitude representation, then this flip is incorrect).
else {
} else {
// Otherwise, both are negative, so we need to flip the sense of the
// comparison to get the correct result. (This assumes a twos- or ones-
// complement integer representation; if integers are represented in a
// sign-magnitude representation, then this flip is incorrect).
if (aInt > bInt) {
return .Less;
} else if (aInt == bInt) {
@@ -68,7 +67,7 @@ pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
}
}
 
pub fn unordcmp(comptime T: type, a: T, b: T) i32 {
pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 {
@setRuntimeSafety(builtin.is_test);
 
const rep_t = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
@@ -89,12 +88,14 @@ pub fn unordcmp(comptime T: type, a: T, b: T) i32 {
 
pub fn __lesf2(a: f32, b: f32) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return @bitCast(i32, @call(.{ .modifier = .always_inline }, cmp, .{ f32, LE, a, b }));
const float = cmp(f32, LE, a, b);
return @bitCast(i32, float);
}
 
pub fn __gesf2(a: f32, b: f32) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return @bitCast(i32, @call(.{ .modifier = .always_inline }, cmp, .{ f32, GE, a, b }));
const float = cmp(f32, GE, a, b);
return @bitCast(i32, float);
}
 
pub fn __eqsf2(a: f32, b: f32) callconv(.C) i32 {
@@ -117,12 +118,14 @@ pub fn __gtsf2(a: f32, b: f32) callconv(.C) i32 {
 
pub fn __ledf2(a: f64, b: f64) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return @bitCast(i32, @call(.{ .modifier = .always_inline }, cmp, .{ f64, LE, a, b }));
const float = cmp(f64, LE, a, b);
return @bitCast(i32, float);
}
 
pub fn __gedf2(a: f64, b: f64) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return @bitCast(i32, @call(.{ .modifier = .always_inline }, cmp, .{ f64, GE, a, b }));
const float = cmp(f64, GE, a, b);
return @bitCast(i32, float);
}
 
pub fn __eqdf2(a: f64, b: f64) callconv(.C) i32 {
@@ -145,12 +148,14 @@ pub fn __gtdf2(a: f64, b: f64) callconv(.C) i32 {
 
pub fn __letf2(a: f128, b: f128) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return @bitCast(i32, @call(.{ .modifier = .always_inline }, cmp, .{ f128, LE, a, b }));
const float = cmp(f128, LE, a, b);
return @bitCast(i32, float);
}
 
pub fn __getf2(a: f128, b: f128) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return @bitCast(i32, @call(.{ .modifier = .always_inline }, cmp, .{ f128, GE, a, b }));
const float = cmp(f128, GE, a, b);
return @bitCast(i32, float);
}
 
pub fn __eqtf2(a: f128, b: f128) callconv(.C) i32 {
@@ -173,17 +178,17 @@ pub fn __gttf2(a: f128, b: f128) callconv(.C) i32 {
 
pub fn __unordsf2(a: f32, b: f32) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return @call(.{ .modifier = .always_inline }, unordcmp, .{ f32, a, b });
return unordcmp(f32, a, b);
}
 
pub fn __unorddf2(a: f64, b: f64) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return @call(.{ .modifier = .always_inline }, unordcmp, .{ f64, a, b });
return unordcmp(f64, a, b);
}
 
pub fn __unordtf2(a: f128, b: f128) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return @call(.{ .modifier = .always_inline }, unordcmp, .{ f128, a, b });
return unordcmp(f128, a, b);
}
 
// ARM EABI intrinsics
 
src/Sema.zig added: 460, removed: 277, total 183
@@ -5064,7 +5064,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
 
const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = sema.resolveInst(extra.rhs);
return sema.bitcast(block, dest_type, operand, operand_src);
return sema.bitCast(block, dest_type, operand, operand_src);
}
 
fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -11016,7 +11016,7 @@ fn coerce(
 
const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty, false, target);
if (in_memory_result == .ok) {
return sema.bitcast(block, dest_type, inst, inst_src);
return sema.bitCast(block, dest_type, inst, inst_src);
}
 
// undefined to anything
@@ -11439,18 +11439,20 @@ fn storePtrVal(
}
}
 
fn bitcast(
fn bitCast(
sema: *Sema,
block: *Block,
dest_type: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
// Keep the comptime Value representation; take the new type.
return sema.addConstant(dest_type, val);
}
// TODO validate the type size and other compile errors
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
const target = sema.mod.getTarget();
const old_ty = sema.typeOf(inst);
const result_val = try val.bitCast(old_ty, dest_type, target, sema.gpa, sema.arena);
return sema.addConstant(dest_type, result_val);
}
try sema.requireRuntimeBlock(block, inst_src);
return block.addTyOp(.bitcast, dest_type, inst);
}
@@ -11482,7 +11484,7 @@ fn coerceArrayPtrToMany(
return sema.addConstant(dest_type, val);
}
try sema.requireRuntimeBlock(block, inst_src);
return sema.bitcast(block, dest_type, inst, inst_src);
return sema.bitCast(block, dest_type, inst, inst_src);
}
 
fn analyzeDeclVal(
@@ -11571,7 +11573,7 @@ fn analyzeRef(
try sema.storePtr(block, src, alloc, operand);
 
// TODO: Replace with sema.coerce when that supports adding pointer constness.
return sema.bitcast(block, ptr_type, alloc, src);
return sema.bitCast(block, ptr_type, alloc, src);
}
 
fn analyzeLoad(
 
src/codegen/llvm.zig added: 460, removed: 277, total 183
@@ -1,4 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Compilation = @import("../Compilation.zig");
@@ -6,6 +7,7 @@ const llvm = @import("llvm/bindings.zig");
const link = @import("../link.zig");
const log = std.log.scoped(.codegen);
const math = std.math;
const native_endian = builtin.cpu.arch.endian();
 
const build_options = @import("build_options");
const Module = @import("../Module.zig");
@@ -958,11 +960,20 @@ pub const DeclGen = struct {
return llvm_int;
},
.Float => {
const llvm_ty = try self.llvmType(tv.ty);
if (tv.ty.floatBits(self.module.getTarget()) <= 64) {
const llvm_ty = try self.llvmType(tv.ty);
return llvm_ty.constReal(tv.val.toFloat(f64));
}
return self.todo("bitcast to f128 from an integer", .{});
 
var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128));
// LLVM seems to require that the lower half of the f128 be placed first
// in the buffer.
if (native_endian == .Big) {
std.mem.swap(u64, &buf[0], &buf[1]);
}
 
const int = self.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf);
return int.constBitCast(llvm_ty);
},
.Pointer => switch (tv.val.tag()) {
.decl_ref => {
 
src/value.zig added: 460, removed: 277, total 183
@@ -938,30 +938,132 @@ pub const Value = extern union {
 
pub fn toBool(self: Value) bool {
return switch (self.tag()) {
.bool_true => true,
.bool_true, .one => true,
.bool_false, .zero => false,
else => unreachable,
};
}
 
pub fn bitCast(
val: Value,
old_ty: Type,
new_ty: Type,
target: Target,
gpa: *Allocator,
arena: *Allocator,
) !Value {
// For types with well-defined memory layouts, we serialize them a byte buffer,
// then deserialize to the new type.
const buffer = try gpa.alloc(u8, old_ty.abiSize(target));
defer gpa.free(buffer);
val.writeToMemory(old_ty, target, buffer);
return Value.readFromMemory(new_ty, target, buffer, arena);
}
 
pub fn writeToMemory(val: Value, ty: Type, target: Target, buffer: []u8) void {
switch (ty.zigTypeTag()) {
.Int => {
var bigint_buffer: BigIntSpace = undefined;
const bigint = val.toBigInt(&bigint_buffer);
const bits = ty.intInfo(target).bits;
bigint.writeTwosComplement(buffer, bits, target.cpu.arch.endian());
},
.Float => switch (ty.floatBits(target)) {
16 => return floatWriteToMemory(f16, val.toFloat(f16), target, buffer),
32 => return floatWriteToMemory(f32, val.toFloat(f32), target, buffer),
64 => return floatWriteToMemory(f64, val.toFloat(f64), target, buffer),
128 => return floatWriteToMemory(f128, val.toFloat(f128), target, buffer),
else => unreachable,
},
else => @panic("TODO implement writeToMemory for more types"),
}
}
 
pub fn readFromMemory(ty: Type, target: Target, buffer: []const u8, arena: *Allocator) !Value {
switch (ty.zigTypeTag()) {
.Int => {
const int_info = ty.intInfo(target);
const endian = target.cpu.arch.endian();
// TODO use a correct amount of limbs
const limbs_buffer = try arena.alloc(std.math.big.Limb, 2);
var bigint = BigIntMutable.init(limbs_buffer, 0);
bigint.readTwosComplement(buffer, int_info.bits, endian, int_info.signedness);
// TODO if it fits in 64 bits then use one of those tags
 
const result_limbs = bigint.limbs[0..bigint.len];
if (bigint.positive) {
return Value.Tag.int_big_positive.create(arena, result_limbs);
} else {
return Value.Tag.int_big_negative.create(arena, result_limbs);
}
},
.Float => switch (ty.floatBits(target)) {
16 => return Value.Tag.float_16.create(arena, floatReadFromMemory(f16, target, buffer)),
32 => return Value.Tag.float_32.create(arena, floatReadFromMemory(f32, target, buffer)),
64 => return Value.Tag.float_64.create(arena, floatReadFromMemory(f64, target, buffer)),
128 => return Value.Tag.float_128.create(arena, floatReadFromMemory(f128, target, buffer)),
else => unreachable,
},
else => @panic("TODO implement readFromMemory for more types"),
}
}
 
fn floatWriteToMemory(comptime F: type, f: F, target: Target, buffer: []u8) void {
const Int = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(F).Float.bits,
} });
const int = @bitCast(Int, f);
std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], int, target.cpu.arch.endian());
}
 
fn floatReadFromMemory(comptime F: type, target: Target, buffer: []const u8) F {
const Int = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(F).Float.bits,
} });
const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], target.cpu.arch.endian());
return @bitCast(F, int);
}
 
/// Asserts that the value is a float or an integer.
pub fn toFloat(self: Value, comptime T: type) T {
return switch (self.tag()) {
.float_16 => @floatCast(T, self.castTag(.float_16).?.data),
.float_32 => @floatCast(T, self.castTag(.float_32).?.data),
.float_64 => @floatCast(T, self.castTag(.float_64).?.data),
.float_128 => @floatCast(T, self.castTag(.float_128).?.data),
pub fn toFloat(val: Value, comptime T: type) T {
return switch (val.tag()) {
.float_16 => @floatCast(T, val.castTag(.float_16).?.data),
.float_32 => @floatCast(T, val.castTag(.float_32).?.data),
.float_64 => @floatCast(T, val.castTag(.float_64).?.data),
.float_128 => @floatCast(T, val.castTag(.float_128).?.data),
 
.zero => 0,
.one => 1,
.int_u64 => @intToFloat(T, self.castTag(.int_u64).?.data),
.int_i64 => @intToFloat(T, self.castTag(.int_i64).?.data),
.int_u64 => @intToFloat(T, val.castTag(.int_u64).?.data),
.int_i64 => @intToFloat(T, val.castTag(.int_i64).?.data),
 
.int_big_positive, .int_big_negative => @panic("big int to f128"),
.int_big_positive => @floatCast(T, bigIntToFloat(val.castTag(.int_big_positive).?.data, true)),
.int_big_negative => @floatCast(T, bigIntToFloat(val.castTag(.int_big_negative).?.data, false)),
else => unreachable,
};
}
 
/// TODO move this to std lib big int code
fn bigIntToFloat(limbs: []const std.math.big.Limb, positive: bool) f128 {
if (limbs.len == 0) return 0;
 
const base = std.math.maxInt(std.math.big.Limb) + 1;
var result: f128 = 0;
var i: usize = limbs.len;
while (i != 0) {
i -= 1;
const limb: f128 = @intToFloat(f128, limbs[i]);
result = @mulAdd(f128, base, limb, result);
}
if (positive) {
return result;
} else {
return -result;
}
}
 
pub fn clz(val: Value, ty: Type, target: Target) u64 {
const ty_bits = ty.intInfo(target).bits;
switch (val.tag()) {
 
test/behavior.zig added: 460, removed: 277, total 183
@@ -5,6 +5,7 @@ test {
_ = @import("behavior/array.zig");
_ = @import("behavior/atomics.zig");
_ = @import("behavior/basic.zig");
_ = @import("behavior/bitcast.zig");
_ = @import("behavior/bool.zig");
_ = @import("behavior/bugs/655.zig");
_ = @import("behavior/bugs/1277.zig");
@@ -50,7 +51,7 @@ test {
}
_ = @import("behavior/await_struct.zig");
_ = @import("behavior/bit_shifting.zig");
_ = @import("behavior/bitcast.zig");
_ = @import("behavior/bitcast_stage1.zig");
_ = @import("behavior/bitreverse.zig");
_ = @import("behavior/bugs/394.zig");
_ = @import("behavior/bugs/421.zig");
 
test/behavior/bitcast.zig added: 460, removed: 277, total 183
@@ -22,133 +22,6 @@ fn conv2(x: u32) i32 {
return @bitCast(i32, x);
}
 
test "@bitCast enum to its integer type" {
const SOCK = enum(c_int) {
A,
B,
 
fn testBitCastExternEnum() !void {
var SOCK_DGRAM = @This().B;
var sock_dgram = @bitCast(c_int, SOCK_DGRAM);
try expect(sock_dgram == 1);
}
};
 
try SOCK.testBitCastExternEnum();
comptime try SOCK.testBitCastExternEnum();
}
 
test "@bitCast packed structs at runtime and comptime" {
const Full = packed struct {
number: u16,
};
const Divided = packed struct {
half1: u8,
quarter3: u4,
quarter4: u4,
};
const S = struct {
fn doTheTest() !void {
var full = Full{ .number = 0x1234 };
var two_halves = @bitCast(Divided, full);
switch (native_endian) {
.Big => {
try expect(two_halves.half1 == 0x12);
try expect(two_halves.quarter3 == 0x3);
try expect(two_halves.quarter4 == 0x4);
},
.Little => {
try expect(two_halves.half1 == 0x34);
try expect(two_halves.quarter3 == 0x2);
try expect(two_halves.quarter4 == 0x1);
},
}
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
 
test "@bitCast extern structs at runtime and comptime" {
const Full = extern struct {
number: u16,
};
const TwoHalves = extern struct {
half1: u8,
half2: u8,
};
const S = struct {
fn doTheTest() !void {
var full = Full{ .number = 0x1234 };
var two_halves = @bitCast(TwoHalves, full);
switch (native_endian) {
.Big => {
try expect(two_halves.half1 == 0x12);
try expect(two_halves.half2 == 0x34);
},
.Little => {
try expect(two_halves.half1 == 0x34);
try expect(two_halves.half2 == 0x12);
},
}
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
 
test "bitcast packed struct to integer and back" {
const LevelUpMove = packed struct {
move_id: u9,
level: u7,
};
const S = struct {
fn doTheTest() !void {
var move = LevelUpMove{ .move_id = 1, .level = 2 };
var v = @bitCast(u16, move);
var back_to_a_move = @bitCast(LevelUpMove, v);
try expect(back_to_a_move.move_id == 1);
try expect(back_to_a_move.level == 2);
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
 
test "implicit cast to error union by returning" {
const S = struct {
fn entry() !void {
try expect((func(-1) catch unreachable) == maxInt(u64));
}
pub fn func(sz: i64) anyerror!u64 {
return @bitCast(u64, sz);
}
};
try S.entry();
comptime try S.entry();
}
 
// issue #3010: compiler segfault
test "bitcast literal [4]u8 param to u32" {
const ip = @bitCast(u32, [_]u8{ 255, 255, 255, 255 });
try expect(ip == maxInt(u32));
}
 
test "bitcast packed struct literal to byte" {
const Foo = packed struct {
value: u8,
};
const casted = @bitCast(u8, Foo{ .value = 0xF });
try expect(casted == 0xf);
}
 
test "comptime bitcast used in expression has the correct type" {
const Foo = packed struct {
value: u8,
};
try expect(@bitCast(u8, Foo{ .value = 0xF }) == 0xf);
}
 
test "bitcast result to _" {
_ = @bitCast(u8, @as(i8, 1));
}
@@ -156,7 +29,7 @@ test "bitcast result to _" {
test "nested bitcast" {
const S = struct {
fn moo(x: isize) !void {
try @import("std").testing.expectEqual(@intCast(isize, 42), x);
try expect(@intCast(isize, 42) == x);
}
 
fn foo(x: isize) !void {
@@ -169,29 +42,3 @@ test "nested bitcast" {
try S.foo(42);
comptime try S.foo(42);
}
 
test "bitcast passed as tuple element" {
const S = struct {
fn foo(args: anytype) !void {
comptime try expect(@TypeOf(args[0]) == f32);
try expect(args[0] == 12.34);
}
};
try S.foo(.{@bitCast(f32, @as(u32, 0x414570A4))});
}
 
test "triple level result location with bitcast sandwich passed as tuple element" {
const S = struct {
fn foo(args: anytype) !void {
comptime try expect(@TypeOf(args[0]) == f64);
try expect(args[0] > 12.33 and args[0] < 12.35);
}
};
try S.foo(.{@as(f64, @bitCast(f32, @as(u32, 0x414570A4)))});
}
 
test "bitcast generates a temporary value" {
var y = @as(u16, 0x55AA);
const x = @bitCast(u16, @bitCast([2]u8, y));
try expectEqual(y, x);
}
 
filename was Deleted added: 460, removed: 277, total 183
@@ -0,0 +1,159 @@
const std = @import("std");
const builtin = @import("builtin");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const maxInt = std.math.maxInt;
const native_endian = builtin.target.cpu.arch.endian();
 
test "@bitCast enum to its integer type" {
const SOCK = enum(c_int) {
A,
B,
 
fn testBitCastExternEnum() !void {
var SOCK_DGRAM = @This().B;
var sock_dgram = @bitCast(c_int, SOCK_DGRAM);
try expect(sock_dgram == 1);
}
};
 
try SOCK.testBitCastExternEnum();
comptime try SOCK.testBitCastExternEnum();
}
 
test "@bitCast packed structs at runtime and comptime" {
const Full = packed struct {
number: u16,
};
const Divided = packed struct {
half1: u8,
quarter3: u4,
quarter4: u4,
};
const S = struct {
fn doTheTest() !void {
var full = Full{ .number = 0x1234 };
var two_halves = @bitCast(Divided, full);
switch (native_endian) {
.Big => {
try expect(two_halves.half1 == 0x12);
try expect(two_halves.quarter3 == 0x3);
try expect(two_halves.quarter4 == 0x4);
},
.Little => {
try expect(two_halves.half1 == 0x34);
try expect(two_halves.quarter3 == 0x2);
try expect(two_halves.quarter4 == 0x1);
},
}
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
 
test "@bitCast extern structs at runtime and comptime" {
const Full = extern struct {
number: u16,
};
const TwoHalves = extern struct {
half1: u8,
half2: u8,
};
const S = struct {
fn doTheTest() !void {
var full = Full{ .number = 0x1234 };
var two_halves = @bitCast(TwoHalves, full);
switch (native_endian) {
.Big => {
try expect(two_halves.half1 == 0x12);
try expect(two_halves.half2 == 0x34);
},
.Little => {
try expect(two_halves.half1 == 0x34);
try expect(two_halves.half2 == 0x12);
},
}
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
 
test "bitcast packed struct to integer and back" {
const LevelUpMove = packed struct {
move_id: u9,
level: u7,
};
const S = struct {
fn doTheTest() !void {
var move = LevelUpMove{ .move_id = 1, .level = 2 };
var v = @bitCast(u16, move);
var back_to_a_move = @bitCast(LevelUpMove, v);
try expect(back_to_a_move.move_id == 1);
try expect(back_to_a_move.level == 2);
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
 
test "implicit cast to error union by returning" {
const S = struct {
fn entry() !void {
try expect((func(-1) catch unreachable) == maxInt(u64));
}
pub fn func(sz: i64) anyerror!u64 {
return @bitCast(u64, sz);
}
};
try S.entry();
comptime try S.entry();
}
 
// issue #3010: compiler segfault
test "bitcast literal [4]u8 param to u32" {
const ip = @bitCast(u32, [_]u8{ 255, 255, 255, 255 });
try expect(ip == maxInt(u32));
}
 
test "bitcast packed struct literal to byte" {
const Foo = packed struct {
value: u8,
};
const casted = @bitCast(u8, Foo{ .value = 0xF });
try expect(casted == 0xf);
}
 
test "comptime bitcast used in expression has the correct type" {
const Foo = packed struct {
value: u8,
};
try expect(@bitCast(u8, Foo{ .value = 0xF }) == 0xf);
}
 
test "bitcast passed as tuple element" {
const S = struct {
fn foo(args: anytype) !void {
comptime try expect(@TypeOf(args[0]) == f32);
try expect(args[0] == 12.34);
}
};
try S.foo(.{@bitCast(f32, @as(u32, 0x414570A4))});
}
 
test "triple level result location with bitcast sandwich passed as tuple element" {
const S = struct {
fn foo(args: anytype) !void {
comptime try expect(@TypeOf(args[0]) == f64);
try expect(args[0] > 12.33 and args[0] < 12.35);
}
};
try S.foo(.{@as(f64, @bitCast(f32, @as(u32, 0x414570A4)))});
}
 
test "bitcast generates a temporary value" {
var y = @as(u16, 0x55AA);
const x = @bitCast(u16, @bitCast([2]u8, y));
try expectEqual(y, x);
}