srctree

Robin Voetter parent 3eeb7054 436f53f5 39420838
Merge pull request #18984 from alichraghi/vector

spirv: implement @divFloor, @floor, @mod and @mulWithOverflow

inlinesplit
src/codegen/spirv.zig added: 252, removed: 86, total 166
@@ -1016,7 +1016,7 @@ const DeclGen = struct {
const elem_ty = Type.fromInterned(array_type.child);
const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
 
const constituents = try self.gpa.alloc(IdRef, @as(u32, @intCast(ty.arrayLenIncludingSentinel(mod))));
const constituents = try self.gpa.alloc(IdRef, @intCast(ty.arrayLenIncludingSentinel(mod)));
defer self.gpa.free(constituents);
 
switch (aggregate.storage) {
@@ -1736,7 +1736,6 @@ const DeclGen = struct {
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
.Type,
=> unreachable, // Must be comptime.
 
else => |tag| return self.todo("Implement zig type '{}'", .{tag}),
@@ -2316,21 +2315,23 @@ const DeclGen = struct {
.sub, .sub_wrap, .sub_optimized => try self.airArithOp(inst, .OpFSub, .OpISub, .OpISub),
.mul, .mul_wrap, .mul_optimized => try self.airArithOp(inst, .OpFMul, .OpIMul, .OpIMul),
 
 
.abs => try self.airAbs(inst),
.floor => try self.airFloor(inst),
 
.div_floor => try self.airDivFloor(inst),
 
.div_float,
.div_float_optimized,
// TODO: Check that this is the right operation.
.div_trunc,
.div_trunc_optimized,
=> try self.airArithOp(inst, .OpFDiv, .OpSDiv, .OpUDiv),
// TODO: Check if this is the right operation
.rem,
.rem_optimized,
=> try self.airArithOp(inst, .OpFRem, .OpSRem, .OpSRem),
.div_trunc_optimized => try self.airArithOp(inst, .OpFDiv, .OpSDiv, .OpUDiv),
.rem, .rem_optimized => try self.airArithOp(inst, .OpFRem, .OpSRem, .OpSRem),
.mod, .mod_optimized => try self.airArithOp(inst, .OpFMod, .OpSMod, .OpSMod),
 
 
.add_with_overflow => try self.airAddSubOverflow(inst, .OpIAdd, .OpULessThan, .OpSLessThan),
.sub_with_overflow => try self.airAddSubOverflow(inst, .OpISub, .OpUGreaterThan, .OpSGreaterThan),
.mul_with_overflow => try self.airMulOverflow(inst),
.shl_with_overflow => try self.airShlOverflow(inst),
 
.mul_add => try self.airMulAdd(inst),
@@ -2340,7 +2341,7 @@ const DeclGen = struct {
 
.splat => try self.airSplat(inst),
.reduce, .reduce_optimized => try self.airReduce(inst),
.shuffle => try self.airShuffle(inst),
.shuffle => try self.airShuffle(inst),
 
.ptr_add => try self.airPtrAdd(inst),
.ptr_sub => try self.airPtrSub(inst),
@@ -2661,6 +2662,95 @@ const DeclGen = struct {
}
}
 
fn airDivFloor(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
const ty = self.typeOfIndex(inst);
const ty_ref = try self.resolveType(ty, .direct);
const info = self.arithmeticTypeInfo(ty);
switch (info.class) {
.composite_integer => unreachable, // TODO
.integer, .strange_integer => {
const zero_id = try self.constInt(ty_ref, 0);
const one_id = try self.constInt(ty_ref, 1);
 
// (a ^ b) > 0
const bin_bitwise_id = try self.binOpSimple(ty, lhs_id, rhs_id, .OpBitwiseXor);
const is_positive_id = try self.cmp(.gt, Type.bool, ty, bin_bitwise_id, zero_id);
 
// a / b
const positive_div_id = try self.arithOp(ty, lhs_id, rhs_id, .OpFDiv, .OpSDiv, .OpUDiv);
 
// - (abs(a) + abs(b) - 1) / abs(b)
const lhs_abs = try self.abs(ty, ty, lhs_id);
const rhs_abs = try self.abs(ty, ty, rhs_id);
const negative_div_lhs = try self.arithOp(
ty,
try self.arithOp(ty, lhs_abs, rhs_abs, .OpFAdd, .OpIAdd, .OpIAdd),
one_id,
.OpFSub,
.OpISub,
.OpISub,
);
const negative_div_id = try self.arithOp(ty, negative_div_lhs, rhs_abs, .OpFDiv, .OpSDiv, .OpUDiv);
const negated_negative_div_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpSNegate, .{
.id_result_type = self.typeId(ty_ref),
.id_result = negated_negative_div_id,
.operand = negative_div_id,
});
 
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpSelect, .{
.id_result_type = self.typeId(ty_ref),
.id_result = result_id,
.condition = is_positive_id,
.object_1 = positive_div_id,
.object_2 = negated_negative_div_id,
});
return result_id;
},
.float => {
const div_id = try self.arithOp(ty, lhs_id, rhs_id, .OpFDiv, .OpSDiv, .OpUDiv);
return try self.floor(ty, div_id);
},
.bool => unreachable,
}
}
 
fn airFloor(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand_id = try self.resolve(un_op);
const result_ty = self.typeOfIndex(inst);
return try self.floor(result_ty, operand_id);
}
 
fn floor(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
const target = self.getTarget();
const ty_ref = try self.resolveType(ty, .direct);
const ext_inst: Word = switch (target.os.tag) {
.opencl => 25,
.vulkan => 8,
else => unreachable,
};
const set_id = switch (target.os.tag) {
.opencl => try self.spv.importInstructionSet(.@"OpenCL.std"),
.vulkan => try self.spv.importInstructionSet(.@"GLSL.std.450"),
else => unreachable,
};
 
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpExtInst, .{
.id_result_type = self.typeId(ty_ref),
.id_result = result_id,
.set = set_id,
.instruction = .{ .inst = ext_inst },
.id_ref_4 = &.{operand_id},
});
return result_id;
}
 
fn airArithOp(
self: *DeclGen,
inst: Air.Inst.Index,
@@ -2668,7 +2758,6 @@ const DeclGen = struct {
comptime sop: Opcode,
comptime uop: Opcode,
) !?IdRef {
 
// LHS and RHS are guaranteed to have the same type, and AIR guarantees
// the result to be the same as the LHS and RHS, which matches SPIR-V.
const ty = self.typeOfIndex(inst);
@@ -2700,8 +2789,8 @@ const DeclGen = struct {
return self.todo("binary operations for composite integers", .{});
},
.integer, .strange_integer => switch (info.signedness) {
.signed => @as(usize, 1),
.unsigned => @as(usize, 2),
.signed => 1,
.unsigned => 2,
},
.float => 0,
.bool => unreachable,
@@ -2737,12 +2826,16 @@ const DeclGen = struct {
}
 
fn airAbs(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
const target = self.getTarget();
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
// Note: operand_ty may be signed, while ty is always unsigned!
const operand_ty = self.typeOf(ty_op.operand);
const result_ty = self.typeOfIndex(inst);
return try self.abs(result_ty, operand_ty, operand_id);
}
 
fn abs(self: *DeclGen, result_ty: Type, operand_ty: Type, operand_id: IdRef) !IdRef {
const target = self.getTarget();
const operand_info = self.arithmeticTypeInfo(operand_ty);
 
var wip = try self.elementWise(result_ty, false);
@@ -2907,6 +3000,61 @@ const DeclGen = struct {
);
}
 
fn airMulOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs = try self.resolve(extra.lhs);
const rhs = try self.resolve(extra.rhs);
 
const result_ty = self.typeOfIndex(inst);
const operand_ty = self.typeOf(extra.lhs);
const ov_ty = result_ty.structFieldType(1, self.module);
 
const info = self.arithmeticTypeInfo(operand_ty);
switch (info.class) {
.composite_integer => return self.todo("overflow ops for composite integers", .{}),
.strange_integer, .integer => {},
.float, .bool => unreachable,
}
 
var wip_result = try self.elementWise(operand_ty, true);
defer wip_result.deinit();
var wip_ov = try self.elementWise(ov_ty, true);
defer wip_ov.deinit();
 
const zero_id = try self.constInt(wip_result.ty_ref, 0);
const zero_ov_id = try self.constInt(wip_ov.ty_ref, 0);
const one_ov_id = try self.constInt(wip_ov.ty_ref, 1);
 
for (wip_result.results, wip_ov.results, 0..) |*result_id, *ov_id, i| {
const lhs_elem_id = try wip_result.elementAt(operand_ty, lhs, i);
const rhs_elem_id = try wip_result.elementAt(operand_ty, rhs, i);
 
result_id.* = try self.arithOp(wip_result.ty, lhs_elem_id, rhs_elem_id, .OpFMul, .OpIMul, .OpIMul);
 
// (a != 0) and (x / a != b)
const not_zero_id = try self.cmp(.neq, Type.bool, wip_result.ty, lhs_elem_id, zero_id);
const res_rhs_id = try self.arithOp(wip_result.ty, result_id.*, lhs_elem_id, .OpFDiv, .OpSDiv, .OpUDiv);
const res_rhs_not_rhs_id = try self.cmp(.neq, Type.bool, wip_result.ty, res_rhs_id, rhs_elem_id);
const cond_id = try self.binOpSimple(Type.bool, not_zero_id, res_rhs_not_rhs_id, .OpLogicalAnd);
 
ov_id.* = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpSelect, .{
.id_result_type = wip_ov.ty_id,
.id_result = ov_id.*,
.condition = cond_id,
.object_1 = one_ov_id,
.object_2 = zero_ov_id,
});
}
 
return try self.constructStruct(
result_ty,
&.{ operand_ty, ov_ty },
&.{ try wip_result.finalize(), try wip_ov.finalize() },
);
}
 
fn airShlOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.module;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -3692,19 +3840,22 @@ const DeclGen = struct {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
const operand_id = try self.resolve(ty_op.operand);
const operand_info = self.arithmeticTypeInfo(operand_ty);
const dest_ty = self.typeOfIndex(inst);
const dest_ty_id = try self.resolveTypeId(dest_ty);
const result_ty = self.typeOfIndex(inst);
const result_ty_ref = try self.resolveType(result_ty, .direct);
return try self.floatFromInt(result_ty_ref, operand_ty, operand_id);
}
 
fn floatFromInt(self: *DeclGen, result_ty_ref: CacheRef, operand_ty: Type, operand_id: IdRef) !IdRef {
const operand_info = self.arithmeticTypeInfo(operand_ty);
const result_id = self.spv.allocId();
switch (operand_info.signedness) {
.signed => try self.func.body.emit(self.spv.gpa, .OpConvertSToF, .{
.id_result_type = dest_ty_id,
.id_result_type = self.typeId(result_ty_ref),
.id_result = result_id,
.signed_value = operand_id,
}),
.unsigned => try self.func.body.emit(self.spv.gpa, .OpConvertUToF, .{
.id_result_type = dest_ty_id,
.id_result_type = self.typeId(result_ty_ref),
.id_result = result_id,
.unsigned_value = operand_id,
}),
@@ -3715,19 +3866,22 @@ const DeclGen = struct {
fn airIntFromFloat(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const dest_ty = self.typeOfIndex(inst);
const dest_info = self.arithmeticTypeInfo(dest_ty);
const dest_ty_id = try self.resolveTypeId(dest_ty);
const result_ty = self.typeOfIndex(inst);
return try self.intFromFloat(result_ty, operand_id);
}
 
fn intFromFloat(self: *DeclGen, result_ty: Type, operand_id: IdRef) !IdRef {
const result_info = self.arithmeticTypeInfo(result_ty);
const result_ty_ref = try self.resolveType(result_ty, .direct);
const result_id = self.spv.allocId();
switch (dest_info.signedness) {
switch (result_info.signedness) {
.signed => try self.func.body.emit(self.spv.gpa, .OpConvertFToS, .{
.id_result_type = dest_ty_id,
.id_result_type = self.typeId(result_ty_ref),
.id_result = result_id,
.float_value = operand_id,
}),
.unsigned => try self.func.body.emit(self.spv.gpa, .OpConvertFToU, .{
.id_result_type = dest_ty_id,
.id_result_type = self.typeId(result_ty_ref),
.id_result = result_id,
.float_value = operand_id,
}),
@@ -5237,20 +5391,21 @@ const DeclGen = struct {
 
fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void {
const mod = self.module;
const target = self.getTarget();
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond_ty = self.typeOf(pl_op.operand);
const cond = try self.resolve(pl_op.operand);
const cond_indirect = try self.convertToIndirect(cond_ty, cond);
var cond_indirect = try self.convertToIndirect(cond_ty, cond);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
 
const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) {
.Bool => 1,
.Bool, .ErrorSet => 1,
.Int => blk: {
const bits = cond_ty.intInfo(mod).bits;
const backing_bits = self.backingIntBits(bits) orelse {
return self.todo("implement composite int switch", .{});
};
break :blk if (backing_bits <= 32) @as(u32, 1) else 2;
break :blk if (backing_bits <= 32) 1 else 2;
},
.Enum => blk: {
const int_ty = cond_ty.intTagType(mod);
@@ -5258,10 +5413,14 @@ const DeclGen = struct {
const backing_bits = self.backingIntBits(int_info.bits) orelse {
return self.todo("implement composite int switch", .{});
};
break :blk if (backing_bits <= 32) @as(u32, 1) else 2;
break :blk if (backing_bits <= 32) 1 else 2;
},
.ErrorSet => 1,
else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}), // TODO: Figure out which types apply here, and work around them as we can only do integers.
.Pointer => blk: {
cond_indirect = try self.intFromPtr(cond_indirect);
break :blk target.ptrBitWidth() / 32;
},
// TODO: Figure out which types apply here, and work around them as we can only do integers.
else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}),
};
 
const num_cases = switch_br.data.cases_len;
@@ -5308,7 +5467,7 @@ const DeclGen = struct {
for (0..num_cases) |case_i| {
// SPIR-V needs a literal here, which' width depends on the case condition.
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
const items: []const Air.Inst.Ref = @ptrCast(self.air.extra[case.end..][0..case.data.items_len]);
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
 
@@ -5316,13 +5475,14 @@ const DeclGen = struct {
 
for (items) |item| {
const value = (try self.air.value(item, mod)) orelse unreachable;
const int_val = switch (cond_ty.zigTypeTag(mod)) {
.Bool, .Int => if (cond_ty.isSignedInt(mod)) @as(u64, @bitCast(value.toSignedInt(mod))) else value.toUnsignedInt(mod),
const int_val: u64 = switch (cond_ty.zigTypeTag(mod)) {
.Bool, .Int => if (cond_ty.isSignedInt(mod)) @bitCast(value.toSignedInt(mod)) else value.toUnsignedInt(mod),
.Enum => blk: {
// TODO: figure out of cond_ty is correct (something with enum literals)
break :blk (try value.intFromEnum(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants
},
.ErrorSet => value.getErrorInt(mod),
.Pointer => value.toUnsignedInt(mod),
else => unreachable,
};
const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) {
@@ -5438,14 +5598,14 @@ const DeclGen = struct {
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
 
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
const clobbers_len = @as(u31, @truncate(extra.data.flags));
const clobbers_len: u31 = @truncate(extra.data.flags);
 
if (!is_volatile and self.liveness.isUnused(inst)) return null;
 
var extra_i: usize = extra.end;
const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
 
if (outputs.len > 1) {
@@ -5567,7 +5727,7 @@ const DeclGen = struct {
const mod = self.module;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
const callee_ty = self.typeOf(pl_op.operand);
const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
 
src/codegen/spirv/Assembler.zig added: 252, removed: 86, total 166
@@ -256,10 +256,18 @@ fn todo(self: *Assembler, comptime fmt: []const u8, args: anytype) Error {
/// If this function returns `error.AssembleFail`, an explanatory
/// error message has already been emitted into `self.errors`.
fn processInstruction(self: *Assembler) !void {
const result = switch (self.inst.opcode) {
const result: AsmValue = switch (self.inst.opcode) {
.OpEntryPoint => {
return self.fail(0, "cannot export entry points via OpEntryPoint, export the kernel using callconv(.Kernel)", .{});
},
.OpExtInstImport => blk: {
const set_name_offset = self.inst.operands.items[1].string;
const set_name = std.mem.sliceTo(self.inst.string_bytes.items[set_name_offset..], 0);
const set_tag = std.meta.stringToEnum(spec.InstructionSet, set_name) orelse {
return self.fail(set_name_offset, "unknown instruction set: {s}", .{set_name});
};
break :blk .{ .value = try self.spv.importInstructionSet(set_tag) };
},
else => switch (self.inst.opcode.class()) {
.TypeDeclaration => try self.processTypeInstruction(),
else => if (try self.processGenericInstruction()) |result|
@@ -309,7 +317,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
return self.fail(0, "{} is not a valid bit count for floats (expected 16, 32 or 64)", .{bits});
},
}
break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @as(u16, @intCast(bits)) } });
break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @intCast(bits) } });
},
.OpTypeVector => try self.spv.resolve(.{ .vector_type = .{
.component_type = try self.resolveTypeRef(operands[1].ref_id),
@@ -364,6 +372,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue {
.OpExecutionMode, .OpExecutionModeId => &self.spv.sections.execution_modes,
.OpVariable => switch (@as(spec.StorageClass, @enumFromInt(operands[2].value))) {
.Function => &self.func.prologue,
.UniformConstant => &self.spv.sections.types_globals_constants,
else => {
// This is currently disabled because global variables are required to be
// emitted in the proper order, and this should be honored in inline assembly
@@ -473,14 +482,14 @@ fn parseInstruction(self: *Assembler) !void {
self.inst.string_bytes.shrinkRetainingCapacity(0);
 
const lhs_result_tok = self.currentToken();
const maybe_lhs_result = if (self.eatToken(.result_id_assign)) blk: {
const maybe_lhs_result: ?AsmValue.Ref = if (self.eatToken(.result_id_assign)) blk: {
const name = self.tokenText(lhs_result_tok)[1..];
const entry = try self.value_map.getOrPut(self.gpa, name);
try self.expectToken(.equals);
if (!entry.found_existing) {
entry.value_ptr.* = .just_declared;
}
break :blk @as(AsmValue.Ref, @intCast(entry.index));
break :blk @intCast(entry.index);
} else null;
 
const opcode_tok = self.currentToken();
@@ -550,6 +559,7 @@ fn parseOperand(self: *Assembler, kind: spec.OperandKind) Error!void {
.LiteralInteger => try self.parseLiteralInteger(),
.LiteralString => try self.parseString(),
.LiteralContextDependentNumber => try self.parseContextDependentNumber(),
.LiteralExtInstInteger => try self.parseLiteralExtInstInteger(),
.PairIdRefIdRef => try self.parsePhiSource(),
else => return self.todo("parse operand of type {s}", .{@tagName(kind)}),
},
@@ -641,7 +651,7 @@ fn parseRefId(self: *Assembler) !void {
entry.value_ptr.* = .unresolved_forward_reference;
}
 
const index = @as(AsmValue.Ref, @intCast(entry.index));
const index: AsmValue.Ref = @intCast(entry.index);
try self.inst.operands.append(self.gpa, .{ .ref_id = index });
}
 
@@ -660,6 +670,16 @@ fn parseLiteralInteger(self: *Assembler) !void {
try self.inst.operands.append(self.gpa, .{ .literal32 = value });
}
 
fn parseLiteralExtInstInteger(self: *Assembler) !void {
const tok = self.currentToken();
try self.expectToken(.value);
const text = self.tokenText(tok);
const value = std.fmt.parseInt(u32, text, 0) catch {
return self.fail(tok.start, "'{s}' is not a valid 32-bit integer literal", .{text});
};
try self.inst.operands.append(self.gpa, .{ .literal32 = value });
}
 
fn parseString(self: *Assembler) !void {
const tok = self.currentToken();
try self.expectToken(.string);
@@ -673,7 +693,7 @@ fn parseString(self: *Assembler) !void {
else
text[1..];
 
const string_offset = @as(u32, @intCast(self.inst.string_bytes.items.len));
const string_offset: u32 = @intCast(self.inst.string_bytes.items.len);
try self.inst.string_bytes.ensureUnusedCapacity(self.gpa, literal.len + 1);
self.inst.string_bytes.appendSliceAssumeCapacity(literal);
self.inst.string_bytes.appendAssumeCapacity(0);
@@ -730,9 +750,9 @@ fn parseContextDependentInt(self: *Assembler, signedness: std.builtin.Signedness
 
// Note, we store the sign-extended version here.
if (width <= @bitSizeOf(spec.Word)) {
try self.inst.operands.append(self.gpa, .{ .literal32 = @as(u32, @truncate(@as(u128, @bitCast(int)))) });
try self.inst.operands.append(self.gpa, .{ .literal32 = @truncate(@as(u128, @bitCast(int))) });
} else {
try self.inst.operands.append(self.gpa, .{ .literal64 = @as(u64, @truncate(@as(u128, @bitCast(int)))) });
try self.inst.operands.append(self.gpa, .{ .literal64 = @truncate(@as(u128, @bitCast(int))) });
}
return;
}
@@ -753,7 +773,7 @@ fn parseContextDependentFloat(self: *Assembler, comptime width: u16) !void {
return self.fail(tok.start, "'{s}' is not a valid {}-bit float literal", .{ text, width });
};
 
const float_bits = @as(Int, @bitCast(value));
const float_bits: Int = @bitCast(value);
if (width <= @bitSizeOf(spec.Word)) {
try self.inst.operands.append(self.gpa, .{ .literal32 = float_bits });
} else {
 
src/codegen/spirv/Module.zig added: 252, removed: 86, total 166
@@ -429,8 +429,8 @@ pub fn constInt(self: *Module, ty_ref: CacheRef, value: anytype) !IdRef {
return try self.resolveId(.{ .int = .{
.ty = ty_ref,
.value = switch (ty.signedness) {
.signed => Value{ .int64 = @as(i64, @intCast(value)) },
.unsigned => Value{ .uint64 = @as(u64, @intCast(value)) },
.signed => Value{ .int64 = @intCast(value) },
.unsigned => Value{ .uint64 = @intCast(value) },
},
} });
}
@@ -500,9 +500,9 @@ pub fn declPtr(self: *Module, index: Decl.Index) *Decl {
 
/// Declare ALL dependencies for a decl.
pub fn declareDeclDeps(self: *Module, decl_index: Decl.Index, deps: []const Decl.Index) !void {
const begin_dep = @as(u32, @intCast(self.decl_deps.items.len));
const begin_dep: u32 = @intCast(self.decl_deps.items.len);
try self.decl_deps.appendSlice(self.gpa, deps);
const end_dep = @as(u32, @intCast(self.decl_deps.items.len));
const end_dep: u32 = @intCast(self.decl_deps.items.len);
 
const decl = self.declPtr(decl_index);
decl.begin_dep = begin_dep;
 
src/codegen/spirv/Section.zig added: 252, removed: 86, total 166
@@ -115,8 +115,8 @@ pub fn writeWords(section: *Section, words: []const Word) void {
 
pub fn writeDoubleWord(section: *Section, dword: DoubleWord) void {
section.writeWords(&.{
@as(Word, @truncate(dword)),
@as(Word, @truncate(dword >> @bitSizeOf(Word))),
@truncate(dword),
@truncate(dword >> @bitSizeOf(Word)),
});
}
 
@@ -196,12 +196,12 @@ fn writeString(section: *Section, str: []const u8) void {
 
fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDependentNumber) void {
switch (operand) {
.int32 => |int| section.writeWord(@as(Word, @bitCast(int))),
.uint32 => |int| section.writeWord(@as(Word, @bitCast(int))),
.int64 => |int| section.writeDoubleWord(@as(DoubleWord, @bitCast(int))),
.uint64 => |int| section.writeDoubleWord(@as(DoubleWord, @bitCast(int))),
.float32 => |float| section.writeWord(@as(Word, @bitCast(float))),
.float64 => |float| section.writeDoubleWord(@as(DoubleWord, @bitCast(float))),
.int32 => |int| section.writeWord(@bitCast(int)),
.uint32 => |int| section.writeWord(@bitCast(int)),
.int64 => |int| section.writeDoubleWord(@bitCast(int)),
.uint64 => |int| section.writeDoubleWord(@bitCast(int)),
.float32 => |float| section.writeWord(@bitCast(float)),
.float64 => |float| section.writeDoubleWord(@bitCast(float)),
}
}
 
@@ -274,8 +274,8 @@ fn operandSize(comptime Operand: type, operand: Operand) usize {
spec.LiteralString => std.math.divCeil(usize, operand.len + 1, @sizeOf(Word)) catch unreachable, // Add one for zero-terminator
 
spec.LiteralContextDependentNumber => switch (operand) {
.int32, .uint32, .float32 => @as(usize, 1),
.int64, .uint64, .float64 => @as(usize, 2),
.int32, .uint32, .float32 => 1,
.int64, .uint64, .float64 => 2,
},
 
// TODO: Where this type is used (OpSpecConstantOp) is currently not correct in the spec
 
test/behavior/floatop.zig added: 252, removed: 86, total 166
@@ -1089,7 +1089,6 @@ test "@floor f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
try testFloor(f16);
try comptime testFloor(f16);
@@ -1100,7 +1099,6 @@ test "@floor f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
try testFloor(f32);
try comptime testFloor(f32);
@@ -1162,7 +1160,6 @@ fn testFloor(comptime T: type) !void {
test "@floor with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
 
test/behavior/for.zig added: 252, removed: 86, total 166
@@ -226,7 +226,6 @@ test "else continue outer for" {
 
test "for loop with else branch" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
{
var x = [_]u32{ 1, 2 };
 
test/behavior/hasdecl.zig added: 252, removed: 86, total 166
@@ -12,8 +12,6 @@ const Bar = struct {
};
 
test "@hasDecl" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
try expect(@hasDecl(Foo, "public_thing"));
try expect(!@hasDecl(Foo, "private_thing"));
try expect(!@hasDecl(Foo, "no_thing"));
@@ -24,8 +22,6 @@ test "@hasDecl" {
}
 
test "@hasDecl using a sliced string literal" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
try expect(@hasDecl(@This(), "std") == true);
try expect(@hasDecl(@This(), "std"[0..0]) == false);
try expect(@hasDecl(@This(), "std"[0..1]) == false);
 
test/behavior/int_div.zig added: 252, removed: 86, total 166
@@ -6,7 +6,6 @@ test "integer division" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
try testDivision();
try comptime testDivision();
 
test/behavior/math.zig added: 252, removed: 86, total 166
@@ -788,7 +788,6 @@ test "small int addition" {
test "basic @mulWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
{
var a: u8 = 86;
@@ -821,7 +820,6 @@ test "basic @mulWithOverflow" {
test "extensive @mulWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
{
var a: u5 = 3;
@@ -998,7 +996,6 @@ test "@mulWithOverflow bitsize > 32" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
{
var a: u62 = 3;
 
test/behavior/switch.zig added: 252, removed: 86, total 166
@@ -640,7 +640,6 @@ test "switch prong pointer capture alignment" {
test "switch on pointer type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
const S = struct {
const X = struct {
 
test/behavior/vector.zig added: 252, removed: 86, total 166
@@ -1136,7 +1136,6 @@ test "@mulWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
const S = struct {
fn doTheTest() !void {