srctree

Jacob Young parent 8d651f51 edb6486b b344ff01
Merge pull request #19031 from antlilja/llvm-bc

Emit LLVM bitcode without using LLVM

inlinesplit
lib/std/meta.zig added: 8538, removed: 5358, total 3180
@@ -460,13 +460,12 @@ test "std.meta.FieldType" {
try testing.expect(FieldType(U, .d) == *const u8);
}
 
pub fn fieldNames(comptime T: type) *const [fields(T).len][]const u8 {
pub fn fieldNames(comptime T: type) *const [fields(T).len][:0]const u8 {
return comptime blk: {
const fieldInfos = fields(T);
var names: [fieldInfos.len][]const u8 = undefined;
for (fieldInfos, 0..) |field, i| {
names[i] = field.name;
}
var names: [fieldInfos.len][:0]const u8 = undefined;
// This concat can be removed with the next zig1 update.
for (&names, fieldInfos) |*name, field| name.* = field.name ++ "";
break :blk &names;
};
}
 
src/arch/x86_64/CodeGen.zig added: 8538, removed: 5358, total 3180
@@ -16683,36 +16683,44 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
else => null,
};
defer if (elem_lock) |lock| self.register_manager.unlockReg(lock);
const elem_reg = registerAlias(
try self.copyToTmpRegister(elem_ty, mat_elem_mcv),
elem_abi_size,
);
 
const elem_extra_bits = self.regExtraBits(elem_ty);
if (elem_bit_off < elem_extra_bits) {
try self.truncateRegister(elem_ty, elem_reg);
{
const temp_reg = try self.copyToTmpRegister(elem_ty, mat_elem_mcv);
const temp_alias = registerAlias(temp_reg, elem_abi_size);
const temp_lock = self.register_manager.lockRegAssumeUnused(temp_reg);
defer self.register_manager.unlockReg(temp_lock);
 
if (elem_bit_off < elem_extra_bits) {
try self.truncateRegister(elem_ty, temp_alias);
}
if (elem_bit_off > 0) try self.genShiftBinOpMir(
.{ ._l, .sh },
elem_ty,
.{ .register = temp_alias },
Type.u8,
.{ .immediate = elem_bit_off },
);
try self.genBinOpMir(
.{ ._, .@"or" },
elem_ty,
.{ .load_frame = .{ .index = frame_index, .off = elem_byte_off } },
.{ .register = temp_alias },
);
}
if (elem_bit_off > 0) try self.genShiftBinOpMir(
.{ ._l, .sh },
elem_ty,
.{ .register = elem_reg },
Type.u8,
.{ .immediate = elem_bit_off },
);
try self.genBinOpMir(
.{ ._, .@"or" },
elem_ty,
.{ .load_frame = .{ .index = frame_index, .off = elem_byte_off } },
.{ .register = elem_reg },
);
if (elem_bit_off > elem_extra_bits) {
const reg = try self.copyToTmpRegister(elem_ty, mat_elem_mcv);
const temp_reg = try self.copyToTmpRegister(elem_ty, mat_elem_mcv);
const temp_alias = registerAlias(temp_reg, elem_abi_size);
const temp_lock = self.register_manager.lockRegAssumeUnused(temp_reg);
defer self.register_manager.unlockReg(temp_lock);
 
if (elem_extra_bits > 0) {
try self.truncateRegister(elem_ty, registerAlias(reg, elem_abi_size));
try self.truncateRegister(elem_ty, temp_alias);
}
try self.genShiftBinOpMir(
.{ ._r, .sh },
elem_ty,
.{ .register = reg },
.{ .register = temp_reg },
Type.u8,
.{ .immediate = elem_abi_bits - elem_bit_off },
);
@@ -16723,7 +16731,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
.index = frame_index,
.off = elem_byte_off + @as(i32, @intCast(elem_abi_size)),
} },
.{ .register = reg },
.{ .register = temp_alias },
);
}
}
 
src/codegen/llvm.zig added: 8538, removed: 5358, total 3180
@@ -770,37 +770,20 @@ pub const Object = struct {
builder: Builder,
 
module: *Module,
di_builder: ?if (build_options.have_llvm) *llvm.DIBuilder else noreturn,
/// One of these mappings:
/// - *Module.File => *DIFile
/// - *Module.Decl (Fn) => *DISubprogram
/// - *Module.Decl (Non-Fn) => *DIGlobalVariable
di_map: if (build_options.have_llvm) std.AutoHashMapUnmanaged(*const anyopaque, *llvm.DINode) else struct {
const K = *const anyopaque;
const V = noreturn;
 
const Self = @This();
debug_compile_unit: Builder.Metadata,
 
metadata: ?noreturn = null,
size: Size = 0,
available: Size = 0,
debug_enums_fwd_ref: Builder.Metadata,
debug_globals_fwd_ref: Builder.Metadata,
 
pub const Size = u0;
debug_enums: std.ArrayListUnmanaged(Builder.Metadata),
debug_globals: std.ArrayListUnmanaged(Builder.Metadata),
 
pub fn deinit(self: *Self, allocator: Allocator) void {
_ = allocator;
self.* = undefined;
}
debug_file_map: std.AutoHashMapUnmanaged(*const Module.File, Builder.Metadata),
debug_type_map: std.AutoHashMapUnmanaged(Type, Builder.Metadata),
 
debug_unresolved_namespace_scopes: std.AutoArrayHashMapUnmanaged(InternPool.NamespaceIndex, Builder.Metadata),
 
pub fn get(self: Self, key: K) ?V {
_ = self;
_ = key;
return null;
}
},
di_compile_unit: ?if (build_options.have_llvm) *llvm.DICompileUnit else noreturn,
target_machine: if (build_options.have_llvm) *llvm.TargetMachine else void,
target_data: if (build_options.have_llvm) *llvm.TargetData else void,
target: std.Target,
/// Ideally we would use `llvm_module.getNamedFunction` to go from *Decl to LLVM function,
/// but that has some downsides:
@@ -820,7 +803,6 @@ pub const Object = struct {
/// TODO when InternPool garbage collection is implemented, this map needs
/// to be garbage collected as well.
type_map: TypeMap,
di_type_map: DITypeMap,
/// The LLVM global table which holds the names corresponding to Zig errors.
/// Note that the values are not added until `emit`, when all errors in
/// the compilation are known.
@@ -850,164 +832,144 @@ pub const Object = struct {
 
pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, Builder.Type);
 
/// This is an ArrayHashMap as opposed to a HashMap because in `emit` we
/// want to iterate over it while adding entries to it.
pub const DITypeMap = std.AutoArrayHashMapUnmanaged(InternPool.Index, AnnotatedDITypePtr);
 
pub fn create(arena: Allocator, comp: *Compilation) !*Object {
if (build_options.only_c) unreachable;
const gpa = comp.gpa;
const target = comp.root_mod.resolved_target.result;
const llvm_target_triple = try targetTriple(arena, target);
const strip = comp.root_mod.strip;
const optimize_mode = comp.root_mod.optimize_mode;
const pic = comp.root_mod.pic;
 
var builder = try Builder.init(.{
.allocator = gpa,
.use_lib_llvm = comp.config.use_lib_llvm,
.strip = strip or !comp.config.use_lib_llvm, // TODO
.strip = strip,
.name = comp.root_name,
.target = target,
.triple = llvm_target_triple,
});
errdefer builder.deinit();
 
var target_machine: if (build_options.have_llvm) *llvm.TargetMachine else void = undefined;
var target_data: if (build_options.have_llvm) *llvm.TargetData else void = undefined;
if (builder.useLibLlvm()) {
debug_info: {
switch (comp.config.debug_format) {
.strip => break :debug_info,
.code_view => builder.llvm.module.?.addModuleCodeViewFlag(),
.dwarf => |f| builder.llvm.module.?.addModuleDebugInfoFlag(f == .@"64"),
}
builder.llvm.di_builder = builder.llvm.module.?.createDIBuilder(true);
builder.data_layout = try builder.fmt("{}", .{DataLayoutBuilder{ .target = target }});
 
const debug_compile_unit, const debug_enums_fwd_ref, const debug_globals_fwd_ref =
if (!builder.strip)
debug_info: {
// We fully resolve all paths at this point to avoid lack of
// source line info in stack traces or lack of debugging
// information which, if relative paths were used, would be
// very location dependent.
// TODO: the only concern I have with this is WASI as either host or target, should
// we leave the paths as relative then?
// TODO: This is totally wrong. In dwarf, paths are encoded as relative to
// a particular directory, and then the directory path is specified elsewhere.
// In the compiler frontend we have it stored correctly in this
// way already, but here we throw all that sweet information
// into the garbage can by converting into absolute paths. What
// a terrible tragedy.
const compile_unit_dir = blk: {
if (comp.module) |zcu| m: {
const d = try zcu.root_mod.root.joinString(arena, "");
if (d.len == 0) break :m;
if (std.fs.path.isAbsolute(d)) break :blk d;
break :blk std.fs.realpathAlloc(arena, d) catch break :blk d;
}
break :blk try std.process.getCwdAlloc(arena);
};
 
const debug_file = try builder.debugFile(
try builder.metadataString(comp.root_name),
try builder.metadataString(compile_unit_dir),
);
 
const debug_enums_fwd_ref = try builder.debugForwardReference();
const debug_globals_fwd_ref = try builder.debugForwardReference();
 
const debug_compile_unit = try builder.debugCompileUnit(
debug_file,
// Don't use the version string here; LLVM misparses it when it
// includes the git revision.
const producer = try builder.fmt("zig {d}.{d}.{d}", .{
try builder.metadataStringFmt("zig {d}.{d}.{d}", .{
build_options.semver.major,
build_options.semver.minor,
build_options.semver.patch,
});
 
// We fully resolve all paths at this point to avoid lack of
// source line info in stack traces or lack of debugging
// information which, if relative paths were used, would be
// very location dependent.
// TODO: the only concern I have with this is WASI as either host or target, should
// we leave the paths as relative then?
// TODO: This is totally wrong. In dwarf, paths are encoded as relative to
// a particular directory, and then the directory path is specified elsewhere.
// In the compiler frontend we have it stored correctly in this
// way already, but here we throw all that sweet information
// into the garbage can by converting into absolute paths. What
// a terrible tragedy.
const compile_unit_dir_z = blk: {
if (comp.module) |zcu| m: {
const d = try zcu.root_mod.root.joinStringZ(arena, "");
if (d.len == 0) break :m;
if (std.fs.path.isAbsolute(d)) break :blk d;
const realpath = std.fs.realpathAlloc(arena, d) catch break :blk d;
break :blk try arena.dupeZ(u8, realpath);
}
const cwd = try std.process.getCwdAlloc(arena);
break :blk try arena.dupeZ(u8, cwd);
};
 
builder.llvm.di_compile_unit = builder.llvm.di_builder.?.createCompileUnit(
DW.LANG.C99,
builder.llvm.di_builder.?.createFile(comp.root_name, compile_unit_dir_z),
producer.slice(&builder).?,
optimize_mode != .Debug,
"", // flags
0, // runtime version
"", // split name
0, // dwo id
true, // emit debug info
);
}
 
const opt_level: llvm.CodeGenOptLevel = if (optimize_mode == .Debug)
.None
else
.Aggressive;
 
const reloc_mode: llvm.RelocMode = if (pic)
.PIC
else if (comp.config.link_mode == .Dynamic)
llvm.RelocMode.DynamicNoPIC
else
.Static;
 
const code_model: llvm.CodeModel = switch (comp.root_mod.code_model) {
.default => .Default,
.tiny => .Tiny,
.small => .Small,
.kernel => .Kernel,
.medium => .Medium,
.large => .Large,
};
 
// TODO handle float ABI better- it should depend on the ABI portion of std.Target
const float_abi: llvm.ABIType = .Default;
 
target_machine = llvm.TargetMachine.create(
builder.llvm.target.?,
builder.target_triple.slice(&builder).?,
if (target.cpu.model.llvm_name) |s| s.ptr else null,
comp.root_mod.resolved_target.llvm_cpu_features.?,
opt_level,
reloc_mode,
code_model,
comp.function_sections,
comp.data_sections,
float_abi,
if (target_util.llvmMachineAbi(target)) |s| s.ptr else null,
}),
debug_enums_fwd_ref,
debug_globals_fwd_ref,
.{ .optimized = comp.root_mod.optimize_mode != .Debug },
);
errdefer target_machine.dispose();
 
target_data = target_machine.createTargetDataLayout();
errdefer target_data.dispose();
const i32_2 = try builder.intConst(.i32, 2);
const i32_3 = try builder.intConst(.i32, 3);
const debug_info_version = try builder.debugModuleFlag(
try builder.debugConstant(i32_2),
try builder.metadataString("Debug Info Version"),
try builder.debugConstant(i32_3),
);
 
builder.llvm.module.?.setModuleDataLayout(target_data);
 
if (pic) builder.llvm.module.?.setModulePICLevel();
if (comp.config.pie) builder.llvm.module.?.setModulePIELevel();
if (code_model != .Default) builder.llvm.module.?.setModuleCodeModel(code_model);
 
if (comp.llvm_opt_bisect_limit >= 0) {
builder.llvm.context.setOptBisectLimit(comp.llvm_opt_bisect_limit);
switch (comp.config.debug_format) {
.strip => unreachable,
.dwarf => |f| {
const i32_4 = try builder.intConst(.i32, 4);
const dwarf_version = try builder.debugModuleFlag(
try builder.debugConstant(i32_2),
try builder.metadataString("Dwarf Version"),
try builder.debugConstant(i32_4),
);
switch (f) {
.@"32" => {
try builder.debugNamed(try builder.metadataString("llvm.module.flags"), &.{
debug_info_version,
dwarf_version,
});
},
.@"64" => {
const dwarf64 = try builder.debugModuleFlag(
try builder.debugConstant(i32_2),
try builder.metadataString("DWARF64"),
try builder.debugConstant(.@"1"),
);
try builder.debugNamed(try builder.metadataString("llvm.module.flags"), &.{
debug_info_version,
dwarf_version,
dwarf64,
});
},
}
},
.code_view => {
const code_view = try builder.debugModuleFlag(
try builder.debugConstant(i32_2),
try builder.metadataString("CodeView"),
try builder.debugConstant(.@"1"),
);
try builder.debugNamed(try builder.metadataString("llvm.module.flags"), &.{
debug_info_version,
code_view,
});
},
}
 
builder.data_layout = try builder.fmt("{}", .{DataLayoutBuilder{ .target = target }});
if (std.debug.runtime_safety) {
const rep = target_data.stringRep();
defer llvm.disposeMessage(rep);
std.testing.expectEqualStrings(
std.mem.span(rep),
builder.data_layout.slice(&builder).?,
) catch unreachable;
}
}
try builder.debugNamed(try builder.metadataString("llvm.dbg.cu"), &.{debug_compile_unit});
break :debug_info .{ debug_compile_unit, debug_enums_fwd_ref, debug_globals_fwd_ref };
} else .{.none} ** 3;
 
const obj = try arena.create(Object);
obj.* = .{
.gpa = gpa,
.builder = builder,
.module = comp.module.?,
.di_map = .{},
.di_builder = if (builder.useLibLlvm()) builder.llvm.di_builder else null, // TODO
.di_compile_unit = if (builder.useLibLlvm()) builder.llvm.di_compile_unit else null,
.target_machine = target_machine,
.target_data = target_data,
.debug_compile_unit = debug_compile_unit,
.debug_enums_fwd_ref = debug_enums_fwd_ref,
.debug_globals_fwd_ref = debug_globals_fwd_ref,
.debug_enums = .{},
.debug_globals = .{},
.debug_file_map = .{},
.debug_type_map = .{},
.debug_unresolved_namespace_scopes = .{},
.target = target,
.decl_map = .{},
.anon_decl_map = .{},
.named_enum_map = .{},
.type_map = .{},
.di_type_map = .{},
.error_name_table = .none,
.extern_collisions = .{},
.null_opt_usize = .no_init,
@@ -1018,12 +980,11 @@ pub const Object = struct {
 
pub fn deinit(self: *Object) void {
const gpa = self.gpa;
self.di_map.deinit(gpa);
self.di_type_map.deinit(gpa);
if (self.builder.useLibLlvm()) {
self.target_data.dispose();
self.target_machine.dispose();
}
self.debug_enums.deinit(gpa);
self.debug_globals.deinit(gpa);
self.debug_file_map.deinit(gpa);
self.debug_type_map.deinit(gpa);
self.debug_unresolved_namespace_scopes.deinit(gpa);
self.decl_map.deinit(gpa);
self.anon_decl_map.deinit(gpa);
self.named_enum_map.deinit(gpa);
@@ -1052,8 +1013,8 @@ pub const Object = struct {
 
llvm_errors[0] = try o.builder.undefConst(llvm_slice_ty);
for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name| {
const name_string = try o.builder.string(mod.intern_pool.stringToSlice(name));
const name_init = try o.builder.stringNullConst(name_string);
const name_string = try o.builder.stringNull(mod.intern_pool.stringToSlice(name));
const name_init = try o.builder.stringConst(name_string);
const name_variable_index =
try o.builder.addVariable(.empty, name_init.typeOf(&o.builder), .default);
try name_variable_index.setInitializer(name_init, &o.builder);
@@ -1064,7 +1025,7 @@ pub const Object = struct {
 
llvm_error.* = try o.builder.structConst(llvm_slice_ty, &.{
name_variable_index.toConst(&o.builder),
try o.builder.intConst(llvm_usize_ty, name_string.slice(&o.builder).?.len),
try o.builder.intConst(llvm_usize_ty, name_string.slice(&o.builder).?.len - 1),
});
}
 
@@ -1193,24 +1154,29 @@ pub const Object = struct {
try self.genCmpLtErrorsLenFunction();
try self.genModuleLevelAssembly();
 
if (self.di_builder) |dib| {
// When lowering debug info for pointers, we emitted the element types as
// forward decls. Now we must go flesh those out.
// Here we iterate over a hash map while modifying it but it is OK because
// we never add or remove entries during this loop.
var i: usize = 0;
while (i < self.di_type_map.count()) : (i += 1) {
const value_ptr = &self.di_type_map.values()[i];
const annotated = value_ptr.*;
if (!annotated.isFwdOnly()) continue;
const entry: Object.DITypeMap.Entry = .{
.key_ptr = &self.di_type_map.keys()[i],
.value_ptr = value_ptr,
};
_ = try self.lowerDebugTypeImpl(entry, .full, annotated.toDIType());
if (!self.builder.strip) {
{
var i: usize = 0;
while (i < self.debug_unresolved_namespace_scopes.count()) : (i += 1) {
const namespace_index = self.debug_unresolved_namespace_scopes.keys()[i];
const fwd_ref = self.debug_unresolved_namespace_scopes.values()[i];
 
const namespace = self.module.namespacePtr(namespace_index);
const debug_type = try self.lowerDebugType(namespace.ty);
 
self.builder.debugForwardReferenceSetType(fwd_ref, debug_type);
}
}
 
dib.finalize();
self.builder.debugForwardReferenceSetType(
self.debug_enums_fwd_ref,
try self.builder.debugTuple(self.debug_enums.items),
);
 
self.builder.debugForwardReferenceSetType(
self.debug_globals_fwd_ref,
try self.builder.debugTuple(self.debug_globals.items),
);
}
 
if (options.pre_ir_path) |path| {
@@ -1221,10 +1187,21 @@ pub const Object = struct {
}
}
 
if (options.pre_bc_path) |path| _ = try self.builder.writeBitcodeToFile(path);
var bitcode_arena_allocator = std.heap.ArenaAllocator.init(
std.heap.page_allocator,
);
errdefer bitcode_arena_allocator.deinit();
 
if (std.debug.runtime_safety and !try self.builder.verify()) {
@panic("LLVM module verification failed");
const bitcode = try self.builder.toBitcode(
bitcode_arena_allocator.allocator(),
);
 
if (options.pre_bc_path) |path| {
var file = try std.fs.cwd().createFile(path, .{});
defer file.close();
 
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
try file.writeAll(ptr[0..(bitcode.len * 4)]);
}
 
const emit_asm_msg = options.asm_path orelse "(none)";
@@ -1238,16 +1215,116 @@ pub const Object = struct {
if (options.asm_path == null and options.bin_path == null and
options.post_ir_path == null and options.post_bc_path == null) return;
 
if (!self.builder.useLibLlvm()) unreachable; // caught in Compilation.Config.resolve
if (options.post_bc_path) |path| {
var file = try std.fs.cwd().createFileZ(path, .{});
defer file.close();
 
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
try file.writeAll(ptr[0..(bitcode.len * 4)]);
}
 
if (!build_options.have_llvm or !self.module.comp.config.use_lib_llvm) {
log.err("emitting without libllvm not implemented", .{});
return error.FailedToEmit;
}
 
initializeLLVMTarget(self.module.comp.root_mod.resolved_target.result.cpu.arch);
 
const context: *llvm.Context = llvm.Context.create();
defer context.dispose();
 
const module = blk: {
const bitcode_memory_buffer = llvm.MemoryBuffer.createMemoryBufferWithMemoryRange(
@ptrCast(bitcode.ptr),
bitcode.len * 4,
"BitcodeBuffer",
llvm.Bool.False,
);
defer bitcode_memory_buffer.dispose();
 
var module: *llvm.Module = undefined;
if (context.parseBitcodeInContext2(bitcode_memory_buffer, &module).toBool()) {
std.debug.print("Failed to parse bitcode\n", .{});
return error.FailedToEmit;
}
 
break :blk module;
};
bitcode_arena_allocator.deinit();
 
const target_triple_sentinel =
try self.gpa.dupeZ(u8, self.builder.target_triple.slice(&self.builder).?);
defer self.gpa.free(target_triple_sentinel);
var target: *llvm.Target = undefined;
var error_message: [*:0]const u8 = undefined;
if (llvm.Target.getFromTriple(target_triple_sentinel, &target, &error_message).toBool()) {
defer llvm.disposeMessage(error_message);
 
log.err("LLVM failed to parse '{s}': {s}", .{
self.builder.target_triple.slice(&self.builder).?,
error_message,
});
@panic("Invalid LLVM triple");
}
 
const optimize_mode = self.module.comp.root_mod.optimize_mode;
const pic = self.module.comp.root_mod.pic;
 
const opt_level: llvm.CodeGenOptLevel = if (optimize_mode == .Debug)
.None
else
.Aggressive;
 
const reloc_mode: llvm.RelocMode = if (pic)
.PIC
else if (self.module.comp.config.link_mode == .Dynamic)
llvm.RelocMode.DynamicNoPIC
else
.Static;
 
const code_model: llvm.CodeModel = switch (self.module.comp.root_mod.code_model) {
.default => .Default,
.tiny => .Tiny,
.small => .Small,
.kernel => .Kernel,
.medium => .Medium,
.large => .Large,
};
 
// TODO handle float ABI better- it should depend on the ABI portion of std.Target
const float_abi: llvm.ABIType = .Default;
 
var target_machine = llvm.TargetMachine.create(
target,
target_triple_sentinel,
if (self.module.comp.root_mod.resolved_target.result.cpu.model.llvm_name) |s| s.ptr else null,
self.module.comp.root_mod.resolved_target.llvm_cpu_features.?,
opt_level,
reloc_mode,
code_model,
self.module.comp.function_sections,
self.module.comp.data_sections,
float_abi,
if (target_util.llvmMachineAbi(self.module.comp.root_mod.resolved_target.result)) |s| s.ptr else null,
);
errdefer target_machine.dispose();
 
if (pic) module.setModulePICLevel();
if (self.module.comp.config.pie) module.setModulePIELevel();
if (code_model != .Default) module.setModuleCodeModel(code_model);
 
if (self.module.comp.llvm_opt_bisect_limit >= 0) {
context.setOptBisectLimit(self.module.comp.llvm_opt_bisect_limit);
}
 
// Unfortunately, LLVM shits the bed when we ask for both binary and assembly.
// So we call the entire pipeline multiple times if this is requested.
var error_message: [*:0]const u8 = undefined;
// var error_message: [*:0]const u8 = undefined;
var emit_bin_path = options.bin_path;
var post_ir_path = options.post_ir_path;
if (options.asm_path != null and options.bin_path != null) {
if (self.target_machine.emitToFile(
self.builder.llvm.module.?,
if (target_machine.emitToFile(
module,
&error_message,
options.is_debug,
options.is_small,
@@ -1270,8 +1347,8 @@ pub const Object = struct {
post_ir_path = null;
}
 
if (self.target_machine.emitToFile(
self.builder.llvm.module.?,
if (target_machine.emitToFile(
module,
&error_message,
options.is_debug,
options.is_small,
@@ -1281,7 +1358,7 @@ pub const Object = struct {
options.asm_path,
emit_bin_path,
post_ir_path,
options.post_bc_path,
null,
)) {
defer llvm.disposeMessage(error_message);
 
@@ -1421,7 +1498,7 @@ pub const Object = struct {
if (isByRef(param_ty, zcu)) {
const alignment = param_ty.abiAlignment(zcu).toLlvm();
const param_llvm_ty = param.typeOfWip(&wip);
const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
args.appendAssumeCapacity(arg_ptr);
} else {
@@ -1469,7 +1546,7 @@ pub const Object = struct {
 
const param_llvm_ty = try o.lowerType(param_ty);
const alignment = param_ty.abiAlignment(zcu).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
 
args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
@@ -1514,7 +1591,7 @@ pub const Object = struct {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
const param_alignment = param_ty.abiAlignment(zcu).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, param_alignment, target);
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, param_alignment, target);
const llvm_ty = try o.builder.structType(.normal, field_types);
for (0..field_types.len) |field_i| {
const param = wip.arg(llvm_arg_i);
@@ -1544,7 +1621,7 @@ pub const Object = struct {
llvm_arg_i += 1;
 
const alignment = param_ty.abiAlignment(zcu).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
 
args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
@@ -1559,7 +1636,7 @@ pub const Object = struct {
llvm_arg_i += 1;
 
const alignment = param_ty.abiAlignment(zcu).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
 
args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
@@ -1573,40 +1650,37 @@ pub const Object = struct {
 
function_index.setAttributes(try attributes.finish(&o.builder), &o.builder);
 
var di_file: ?if (build_options.have_llvm) *llvm.DIFile else noreturn = null;
var di_scope: ?if (build_options.have_llvm) *llvm.DIScope else noreturn = null;
 
if (o.di_builder) |dib| {
di_file = try o.getDIFile(gpa, namespace.file_scope);
const file, const subprogram = if (!o.builder.strip) debug_info: {
const file = try o.getDebugFile(namespace.file_scope);
 
const line_number = decl.src_line + 1;
const is_internal_linkage = decl.val.getExternFunc(zcu) == null and
!zcu.decl_exports.contains(decl_index);
const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type)
llvm.DIFlags.NoReturn
else
0;
const decl_di_ty = try o.lowerDebugType(decl.ty, .full);
const subprogram = dib.createFunction(
di_file.?.toScope(),
ip.stringToSlice(decl.name),
function_index.name(&o.builder).slice(&o.builder).?,
di_file.?,
const debug_decl_type = try o.lowerDebugType(decl.ty);
 
const subprogram = try o.builder.debugSubprogram(
file,
try o.builder.metadataString(ip.stringToSlice(decl.name)),
try o.builder.metadataStringFromString(function_index.name(&o.builder)),
line_number,
decl_di_ty,
is_internal_linkage,
true, // is definition
line_number + func.lbrace_line, // scope line
llvm.DIFlags.StaticMember | noret_bit,
owner_mod.optimize_mode != .Debug,
null, // decl_subprogram
line_number + func.lbrace_line,
debug_decl_type,
.{
.di_flags = .{
.StaticMember = true,
.NoReturn = fn_info.return_type == .noreturn_type,
},
.sp_flags = .{
.Optimized = owner_mod.optimize_mode != .Debug,
.Definition = true,
.LocalToUnit = is_internal_linkage,
},
},
o.debug_compile_unit,
);
try o.di_map.put(gpa, decl, subprogram.toNode());
 
function_index.toLlvm(&o.builder).fnSetSubprogram(subprogram);
 
di_scope = subprogram.toScope();
}
function_index.setSubprogram(subprogram, &o.builder);
break :debug_info .{ file, subprogram };
} else .{.none} ** 2;
 
var fg: FuncGen = .{
.gpa = gpa,
@@ -1620,8 +1694,8 @@ pub const Object = struct {
.func_inst_table = .{},
.blocks = .{},
.sync_scope = if (owner_mod.single_threaded) .singlethread else .system,
.di_scope = di_scope,
.di_file = di_file,
.file = file,
.scope = subprogram,
.base_line = dg.decl.src_line,
.prev_dbg_line = 0,
.prev_dbg_column = 0,
@@ -1707,26 +1781,7 @@ pub const Object = struct {
global_index.setUnnamedAddr(.default, &self.builder);
if (comp.config.dll_export_fns)
global_index.setDllStorageClass(.default, &self.builder);
if (self.di_map.get(decl)) |di_node| {
const decl_name_slice = decl_name.slice(&self.builder).?;
if (try decl.isFunction(mod)) {
const di_func: *llvm.DISubprogram = @ptrCast(di_node);
const linkage_name = llvm.MDString.get(
self.builder.llvm.context,
decl_name_slice.ptr,
decl_name_slice.len,
);
di_func.replaceLinkageName(linkage_name);
} else {
const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node);
const linkage_name = llvm.MDString.get(
self.builder.llvm.context,
decl_name_slice.ptr,
decl_name_slice.len,
);
di_global.replaceLinkageName(linkage_name);
}
}
 
if (decl.val.getVariable(mod)) |decl_var| {
global_index.ptrConst(&self.builder).kind.variable.setThreadLocal(
if (decl_var.is_threadlocal) .generaldynamic else .default,
@@ -1740,27 +1795,6 @@ pub const Object = struct {
);
try global_index.rename(main_exp_name, &self.builder);
 
if (self.di_map.get(decl)) |di_node| {
const main_exp_name_slice = main_exp_name.slice(&self.builder).?;
if (try decl.isFunction(mod)) {
const di_func: *llvm.DISubprogram = @ptrCast(di_node);
const linkage_name = llvm.MDString.get(
self.builder.llvm.context,
main_exp_name_slice.ptr,
main_exp_name_slice.len,
);
di_func.replaceLinkageName(linkage_name);
} else {
const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node);
const linkage_name = llvm.MDString.get(
self.builder.llvm.context,
main_exp_name_slice.ptr,
main_exp_name_slice.len,
);
di_global.replaceLinkageName(linkage_name);
}
}
 
if (decl.val.getVariable(mod)) |decl_var| if (decl_var.is_threadlocal)
global_index.ptrConst(&self.builder).kind
.variable.setThreadLocal(.generaldynamic, &self.builder);
@@ -1890,119 +1924,79 @@ pub const Object = struct {
global.delete(&self.builder);
}
 
fn getDIFile(o: *Object, gpa: Allocator, file: *const Module.File) !*llvm.DIFile {
const gop = try o.di_map.getOrPut(gpa, file);
errdefer assert(o.di_map.remove(file));
if (gop.found_existing) {
return @ptrCast(gop.value_ptr.*);
}
const dir_path_z = d: {
var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const sub_path = std.fs.path.dirname(file.sub_file_path) orelse "";
const dir_path = try file.mod.root.joinStringZ(gpa, sub_path);
if (std.fs.path.isAbsolute(dir_path)) break :d dir_path;
const abs = std.fs.realpath(dir_path, &buffer) catch break :d dir_path;
gpa.free(dir_path);
break :d try gpa.dupeZ(u8, abs);
};
defer gpa.free(dir_path_z);
const sub_file_path_z = try gpa.dupeZ(u8, std.fs.path.basename(file.sub_file_path));
defer gpa.free(sub_file_path_z);
const di_file = o.di_builder.?.createFile(sub_file_path_z, dir_path_z);
gop.value_ptr.* = di_file.toNode();
return di_file;
fn getDebugFile(o: *Object, file: *const Module.File) Allocator.Error!Builder.Metadata {
const gpa = o.gpa;
const gop = try o.debug_file_map.getOrPut(gpa, file);
errdefer assert(o.debug_file_map.remove(file));
if (gop.found_existing) return gop.value_ptr.*;
gop.value_ptr.* = try o.builder.debugFile(
try o.builder.metadataString(std.fs.path.basename(file.sub_file_path)),
dir_path: {
const sub_path = std.fs.path.dirname(file.sub_file_path) orelse "";
const dir_path = try file.mod.root.joinString(gpa, sub_path);
defer gpa.free(dir_path);
if (std.fs.path.isAbsolute(dir_path))
break :dir_path try o.builder.metadataString(dir_path);
var abs_buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const abs_path = std.fs.realpath(dir_path, &abs_buffer) catch
break :dir_path try o.builder.metadataString(dir_path);
break :dir_path try o.builder.metadataString(abs_path);
},
);
return gop.value_ptr.*;
}
 
const DebugResolveStatus = enum { fwd, full };
 
/// In the implementation of this function, it is required to store a forward decl
/// into `gop` before making any recursive calls (even directly).
fn lowerDebugType(
pub fn lowerDebugType(
o: *Object,
ty: Type,
resolve: DebugResolveStatus,
) Allocator.Error!*llvm.DIType {
const gpa = o.gpa;
// Be careful not to reference this `gop` variable after any recursive calls
// to `lowerDebugType`.
const gop = try o.di_type_map.getOrPut(gpa, ty.toIntern());
if (gop.found_existing) {
const annotated = gop.value_ptr.*;
switch (annotated) {
// This type is currently attempting to be resolved fully, so make
// sure a second recursion through the types uses forward resolution.
.null => assert(resolve == .fwd),
// This type already has at least forward resolution, only resolve
// fully during full resolution.
_ => {
const di_type = annotated.toDIType();
if (!annotated.isFwdOnly() or resolve == .fwd) {
return di_type;
}
const entry: Object.DITypeMap.Entry = .{
.key_ptr = gop.key_ptr,
.value_ptr = gop.value_ptr,
};
return o.lowerDebugTypeImpl(entry, resolve, di_type);
},
}
} else gop.value_ptr.* = .null;
errdefer if (!gop.found_existing) assert(o.di_type_map.orderedRemove(ty.toIntern()));
const entry: Object.DITypeMap.Entry = .{
.key_ptr = gop.key_ptr,
.value_ptr = gop.value_ptr,
};
return o.lowerDebugTypeImpl(entry, resolve, null);
}
) Allocator.Error!Builder.Metadata {
assert(!o.builder.strip);
 
/// This is a helper function used by `lowerDebugType`.
fn lowerDebugTypeImpl(
o: *Object,
gop: Object.DITypeMap.Entry,
resolve: DebugResolveStatus,
opt_fwd_decl: ?*llvm.DIType,
) Allocator.Error!*llvm.DIType {
const ty = Type.fromInterned(gop.key_ptr.*);
const gpa = o.gpa;
const target = o.target;
const dib = o.di_builder.?;
const mod = o.module;
const ip = &mod.intern_pool;
 
if (o.debug_type_map.get(ty)) |debug_type| return debug_type;
 
switch (ty.zigTypeTag(mod)) {
.Void, .NoReturn => {
const di_type = dib.createBasicType("void", 0, DW.ATE.signed);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
return di_type;
.Void,
.NoReturn,
=> {
const debug_void_type = try o.builder.debugSignedType(
try o.builder.metadataString("void"),
0,
);
try o.debug_type_map.put(gpa, ty, debug_void_type);
return debug_void_type;
},
.Int => {
const info = ty.intInfo(mod);
assert(info.bits != 0);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
const builder_name = try o.builder.metadataString(name);
const debug_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types
const debug_int_type = switch (info.signedness) {
.signed => try o.builder.debugSignedType(builder_name, debug_bits),
.unsigned => try o.builder.debugUnsignedType(builder_name, debug_bits),
};
const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types
const di_type = dib.createBasicType(name, di_bits, dwarf_encoding);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
return di_type;
try o.debug_type_map.put(gpa, ty, debug_int_type);
return debug_int_type;
},
.Enum => {
const owner_decl_index = ty.getOwnerDecl(mod);
const owner_decl = o.module.declPtr(owner_decl_index);
 
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty));
return enum_di_ty;
const debug_enum_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
try o.debug_type_map.put(gpa, ty, debug_enum_type);
return debug_enum_type;
}
 
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
 
const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len);
const enumerators = try gpa.alloc(Builder.Metadata, enum_type.names.len);
defer gpa.free(enumerators);
 
const int_ty = Type.fromInterned(enum_type.tag_ty);
@@ -2010,66 +2004,59 @@ pub const Object = struct {
assert(int_info.bits != 0);
 
for (enum_type.names.get(ip), 0..) |field_name_ip, i| {
const field_name_z = ip.stringToSlice(field_name_ip);
 
var bigint_space: Value.BigIntSpace = undefined;
const bigint = if (enum_type.values.len != 0)
Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, mod)
else
std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst();
 
if (bigint.limbs.len == 1) {
enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned);
continue;
}
if (@sizeOf(usize) == @sizeOf(u64)) {
enumerators[i] = dib.createEnumerator2(
field_name_z,
@intCast(bigint.limbs.len),
bigint.limbs.ptr,
int_info.bits,
int_info.signedness == .unsigned,
);
continue;
}
@panic("TODO implement bigint debug enumerators to llvm int for 32-bit compiler builds");
enumerators[i] = try o.builder.debugEnumerator(
try o.builder.metadataString(ip.stringToSlice(field_name_ip)),
int_ty.isUnsignedInt(mod),
int_info.bits,
bigint,
);
}
 
const di_file = try o.getDIFile(gpa, mod.namespacePtr(owner_decl.src_namespace).file_scope);
const di_scope = try o.namespaceToDebugScope(owner_decl.src_namespace);
const file = try o.getDebugFile(mod.namespacePtr(owner_decl.src_namespace).file_scope);
const scope = try o.namespaceToDebugScope(owner_decl.src_namespace);
 
const name = try o.allocTypeName(ty);
defer gpa.free(name);
 
const enum_di_ty = dib.createEnumerationType(
di_scope,
name,
di_file,
owner_decl.src_node + 1,
const debug_enum_type = try o.builder.debugEnumerationType(
try o.builder.metadataString(name),
file,
scope,
owner_decl.src_node + 1, // Line
try o.lowerDebugType(int_ty),
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
enumerators.ptr,
@intCast(enumerators.len),
try o.lowerDebugType(int_ty, resolve),
"",
try o.builder.debugTuple(enumerators),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty));
return enum_di_ty;
 
try o.debug_type_map.put(gpa, ty, debug_enum_type);
try o.debug_enums.append(gpa, debug_enum_type);
return debug_enum_type;
},
.Float => {
const bits = ty.floatBits(target);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const di_type = dib.createBasicType(name, bits, DW.ATE.float);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
return di_type;
const debug_float_type = try o.builder.debugFloatType(
try o.builder.metadataString(name),
bits,
);
try o.debug_type_map.put(gpa, ty, debug_float_type);
return debug_float_type;
},
.Bool => {
const di_bits = 8; // lldb cannot handle non-byte sized types
const di_type = dib.createBasicType("bool", di_bits, DW.ATE.boolean);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
return di_type;
const debug_bool_type = try o.builder.debugBoolType(
try o.builder.metadataString("bool"),
8, // lldb cannot handle non-byte sized types
);
try o.debug_type_map.put(gpa, ty, debug_bool_type);
return debug_bool_type;
},
.Pointer => {
// Normalize everything that the debug info does not represent.
@@ -2099,136 +2086,145 @@ pub const Object = struct {
},
},
});
const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve));
return ptr_di_ty;
const debug_ptr_type = try o.lowerDebugType(bland_ptr_ty);
try o.debug_type_map.put(gpa, ty, debug_ptr_type);
return debug_ptr_type;
}
 
const debug_fwd_ref = try o.builder.debugForwardReference();
 
// Set as forward reference while the type is lowered in case it references itself
try o.debug_type_map.put(gpa, ty, debug_fwd_ref);
 
if (ty.isSlice(mod)) {
const ptr_ty = ty.slicePtrFieldType(mod);
const len_ty = Type.usize;
 
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const di_file: ?*llvm.DIFile = null;
const line = 0;
const compile_unit_scope = o.di_compile_unit.?.toScope();
 
const fwd_decl = opt_fwd_decl orelse blk: {
const fwd_decl = dib.createReplaceableCompositeType(
DW.TAG.structure_type,
name.ptr,
compile_unit_scope,
di_file,
line,
);
gop.value_ptr.* = AnnotatedDITypePtr.initFwd(fwd_decl);
if (resolve == .fwd) return fwd_decl;
break :blk fwd_decl;
};
 
const ptr_size = ptr_ty.abiSize(mod);
const ptr_align = ptr_ty.abiAlignment(mod);
const len_size = len_ty.abiSize(mod);
const len_align = len_ty.abiAlignment(mod);
 
var offset: u64 = 0;
offset += ptr_size;
offset = len_align.forward(offset);
const len_offset = offset;
const len_offset = len_align.forward(ptr_size);
 
const fields: [2]*llvm.DIType = .{
dib.createMemberType(
fwd_decl.toScope(),
"ptr",
di_file,
line,
ptr_size * 8, // size in bits
ptr_align.toByteUnits(0) * 8, // align in bits
0, // offset in bits
0, // flags
try o.lowerDebugType(ptr_ty, resolve),
),
dib.createMemberType(
fwd_decl.toScope(),
"len",
di_file,
line,
len_size * 8, // size in bits
len_align.toByteUnits(0) * 8, // align in bits
len_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(len_ty, resolve),
),
};
 
const full_di_ty = dib.createStructType(
compile_unit_scope,
name.ptr,
di_file,
line,
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
&fields,
fields.len,
0, // run time lang
null, // vtable holder
"", // unique id
const debug_ptr_type = try o.builder.debugMemberType(
try o.builder.metadataString("ptr"),
.none, // File
debug_fwd_ref,
0, // Line
try o.lowerDebugType(ptr_ty),
ptr_size * 8,
ptr_align.toByteUnits(0) * 8,
0, // Offset
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
 
const debug_len_type = try o.builder.debugMemberType(
try o.builder.metadataString("len"),
.none, // File
debug_fwd_ref,
0, // Line
try o.lowerDebugType(len_ty),
len_size * 8,
len_align.toByteUnits(0) * 8,
len_offset * 8,
);
 
const debug_slice_type = try o.builder.debugStructType(
try o.builder.metadataString(name),
.none, // File
o.debug_compile_unit, // Scope
line,
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(&.{
debug_ptr_type,
debug_len_type,
}),
);
 
o.builder.debugForwardReferenceSetType(debug_fwd_ref, debug_slice_type);
 
// Set to real type now that it has been lowered fully
const map_ptr = o.debug_type_map.getPtr(ty) orelse unreachable;
map_ptr.* = debug_slice_type;
 
return debug_slice_type;
}
 
const elem_di_ty = try o.lowerDebugType(Type.fromInterned(ptr_info.child), .fwd);
const debug_elem_ty = try o.lowerDebugType(Type.fromInterned(ptr_info.child));
 
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const ptr_di_ty = dib.createPointerType(
elem_di_ty,
 
const debug_ptr_type = try o.builder.debugPointerType(
try o.builder.metadataString(name),
.none, // File
.none, // Scope
0, // Line
debug_elem_ty,
target.ptrBitWidth(),
ty.ptrAlignment(mod).toByteUnits(0) * 8,
name,
0, // Offset
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(ptr_di_ty));
return ptr_di_ty;
 
o.builder.debugForwardReferenceSetType(debug_fwd_ref, debug_ptr_type);
 
// Set to real type now that it has been lowered fully
const map_ptr = o.debug_type_map.getPtr(ty) orelse unreachable;
map_ptr.* = debug_ptr_type;
 
return debug_ptr_type;
},
.Opaque => {
if (ty.toIntern() == .anyopaque_type) {
const di_ty = dib.createBasicType("anyopaque", 0, DW.ATE.signed);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
const debug_opaque_type = try o.builder.debugSignedType(
try o.builder.metadataString("anyopaque"),
0,
);
try o.debug_type_map.put(gpa, ty, debug_opaque_type);
return debug_opaque_type;
}
 
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const owner_decl_index = ty.getOwnerDecl(mod);
const owner_decl = o.module.declPtr(owner_decl_index);
const opaque_di_ty = dib.createForwardDeclType(
DW.TAG.structure_type,
name,
const debug_opaque_type = try o.builder.debugStructType(
try o.builder.metadataString(name),
try o.getDebugFile(mod.namespacePtr(owner_decl.src_namespace).file_scope),
try o.namespaceToDebugScope(owner_decl.src_namespace),
try o.getDIFile(gpa, mod.namespacePtr(owner_decl.src_namespace).file_scope),
owner_decl.src_node + 1,
owner_decl.src_node + 1, // Line
.none, // Underlying type
0, // Size
0, // Align
.none, // Fields
);
// The recursive call to `lowerDebugType` va `namespaceToDebugScope`
// means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(opaque_di_ty));
return opaque_di_ty;
try o.debug_type_map.put(gpa, ty, debug_opaque_type);
return debug_opaque_type;
},
.Array => {
const array_di_ty = dib.createArrayType(
const debug_array_type = try o.builder.debugArrayType(
.none, // Name
.none, // File
.none, // Scope
0, // Line
try o.lowerDebugType(ty.childType(mod)),
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.lowerDebugType(ty.childType(mod), resolve),
@intCast(ty.arrayLen(mod)),
try o.builder.debugTuple(&.{
try o.builder.debugSubrange(
try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
try o.builder.debugConstant(try o.builder.intConst(.i64, ty.arrayLen(mod))),
),
}),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty));
return array_di_ty;
try o.debug_type_map.put(gpa, ty, debug_array_type);
return debug_array_type;
},
.Vector => {
const elem_ty = ty.elemType2(mod);
@@ -2236,146 +2232,136 @@ pub const Object = struct {
// @bitSizOf(elem) * len > @bitSizOf(vec).
// Neither gdb nor lldb seem to be able to display non-byte sized
// vectors properly.
const elem_di_type = switch (elem_ty.zigTypeTag(mod)) {
const debug_elem_type = switch (elem_ty.zigTypeTag(mod)) {
.Int => blk: {
const info = elem_ty.intInfo(mod);
assert(info.bits != 0);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
const builder_name = try o.builder.metadataString(name);
break :blk switch (info.signedness) {
.signed => try o.builder.debugSignedType(builder_name, info.bits),
.unsigned => try o.builder.debugUnsignedType(builder_name, info.bits),
};
break :blk dib.createBasicType(name, info.bits, dwarf_encoding);
},
.Bool => dib.createBasicType("bool", 1, DW.ATE.boolean),
else => try o.lowerDebugType(ty.childType(mod), resolve),
.Bool => try o.builder.debugBoolType(
try o.builder.metadataString("bool"),
1,
),
else => try o.lowerDebugType(ty.childType(mod)),
};
 
const vector_di_ty = dib.createVectorType(
const debug_vector_type = try o.builder.debugVectorType(
.none, // Name
.none, // File
.none, // Scope
0, // Line
debug_elem_type,
ty.abiSize(mod) * 8,
@intCast(ty.abiAlignment(mod).toByteUnits(0) * 8),
elem_di_type,
ty.vectorLen(mod),
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(&.{
try o.builder.debugSubrange(
try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
try o.builder.debugConstant(try o.builder.intConst(.i64, ty.vectorLen(mod))),
),
}),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(vector_di_ty));
return vector_di_ty;
 
try o.debug_type_map.put(gpa, ty, debug_vector_type);
return debug_vector_type;
},
.Optional => {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const child_ty = ty.optionalChild(mod);
if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const di_bits = 8; // lldb cannot handle non-byte sized types
const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
}
if (ty.optionalReprIsPayload(mod)) {
const ptr_di_ty = try o.lowerDebugType(child_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve));
return ptr_di_ty;
const debug_bool_type = try o.builder.debugBoolType(
try o.builder.metadataString(name),
8,
);
try o.debug_type_map.put(gpa, ty, debug_bool_type);
return debug_bool_type;
}
 
const di_file: ?*llvm.DIFile = null;
const line = 0;
const compile_unit_scope = o.di_compile_unit.?.toScope();
const fwd_decl = opt_fwd_decl orelse blk: {
const fwd_decl = dib.createReplaceableCompositeType(
DW.TAG.structure_type,
name.ptr,
compile_unit_scope,
di_file,
line,
);
gop.value_ptr.* = AnnotatedDITypePtr.initFwd(fwd_decl);
if (resolve == .fwd) return fwd_decl;
break :blk fwd_decl;
};
const debug_fwd_ref = try o.builder.debugForwardReference();
 
// Set as forward reference while the type is lowered in case it references itself
try o.debug_type_map.put(gpa, ty, debug_fwd_ref);
 
if (ty.optionalReprIsPayload(mod)) {
const debug_optional_type = try o.lowerDebugType(child_ty);
 
o.builder.debugForwardReferenceSetType(debug_fwd_ref, debug_optional_type);
 
// Set to real type now that it has been lowered fully
const map_ptr = o.debug_type_map.getPtr(ty) orelse unreachable;
map_ptr.* = debug_optional_type;
 
return debug_optional_type;
}
 
const non_null_ty = Type.u8;
const payload_size = child_ty.abiSize(mod);
const payload_align = child_ty.abiAlignment(mod);
const non_null_size = non_null_ty.abiSize(mod);
const non_null_align = non_null_ty.abiAlignment(mod);
const non_null_offset = non_null_align.forward(payload_size);
 
var offset: u64 = 0;
offset += payload_size;
offset = non_null_align.forward(offset);
const non_null_offset = offset;
 
const fields: [2]*llvm.DIType = .{
dib.createMemberType(
fwd_decl.toScope(),
"data",
di_file,
line,
payload_size * 8, // size in bits
payload_align.toByteUnits(0) * 8, // align in bits
0, // offset in bits
0, // flags
try o.lowerDebugType(child_ty, resolve),
),
dib.createMemberType(
fwd_decl.toScope(),
"some",
di_file,
line,
non_null_size * 8, // size in bits
non_null_align.toByteUnits(0) * 8, // align in bits
non_null_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(non_null_ty, resolve),
),
};
 
const full_di_ty = dib.createStructType(
compile_unit_scope,
name.ptr,
di_file,
line,
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
&fields,
fields.len,
0, // run time lang
null, // vtable holder
"", // unique id
const debug_data_type = try o.builder.debugMemberType(
try o.builder.metadataString("data"),
.none, // File
debug_fwd_ref,
0, // Line
try o.lowerDebugType(child_ty),
payload_size * 8,
payload_align.toByteUnits(0) * 8,
0, // Offset
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
 
const debug_some_type = try o.builder.debugMemberType(
try o.builder.metadataString("some"),
.none,
debug_fwd_ref,
0,
try o.lowerDebugType(non_null_ty),
non_null_size * 8,
non_null_align.toByteUnits(0) * 8,
non_null_offset * 8,
);
 
const debug_optional_type = try o.builder.debugStructType(
try o.builder.metadataString(name),
.none, // File
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(&.{
debug_data_type,
debug_some_type,
}),
);
 
o.builder.debugForwardReferenceSetType(debug_fwd_ref, debug_optional_type);
 
// Set to real type now that it has been lowered fully
const map_ptr = o.debug_type_map.getPtr(ty) orelse unreachable;
map_ptr.* = debug_optional_type;
 
return debug_optional_type;
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const err_set_di_ty = try o.lowerDebugType(Type.anyerror, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(err_set_di_ty));
return err_set_di_ty;
// TODO: Maybe remove?
const debug_error_union_type = try o.lowerDebugType(Type.anyerror);
try o.debug_type_map.put(gpa, ty, debug_error_union_type);
return debug_error_union_type;
}
 
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const di_file: ?*llvm.DIFile = null;
const line = 0;
const compile_unit_scope = o.di_compile_unit.?.toScope();
const fwd_decl = opt_fwd_decl orelse blk: {
const fwd_decl = dib.createReplaceableCompositeType(
DW.TAG.structure_type,
name.ptr,
compile_unit_scope,
di_file,
line,
);
gop.value_ptr.* = AnnotatedDITypePtr.initFwd(fwd_decl);
if (resolve == .fwd) return fwd_decl;
break :blk fwd_decl;
};
 
const error_size = Type.anyerror.abiSize(mod);
const error_align = Type.anyerror.abiAlignment(mod);
@@ -2398,59 +2384,55 @@ pub const Object = struct {
error_offset = error_align.forward(payload_size);
}
 
var fields: [2]*llvm.DIType = undefined;
fields[error_index] = dib.createMemberType(
fwd_decl.toScope(),
"tag",
di_file,
line,
error_size * 8, // size in bits
error_align.toByteUnits(0) * 8, // align in bits
error_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(Type.anyerror, resolve),
const debug_fwd_ref = try o.builder.debugForwardReference();
 
var fields: [2]Builder.Metadata = undefined;
fields[error_index] = try o.builder.debugMemberType(
try o.builder.metadataString("tag"),
.none, // File
debug_fwd_ref,
0, // Line
try o.lowerDebugType(Type.anyerror),
error_size * 8,
error_align.toByteUnits(0) * 8,
error_offset * 8,
);
fields[payload_index] = dib.createMemberType(
fwd_decl.toScope(),
"value",
di_file,
line,
payload_size * 8, // size in bits
payload_align.toByteUnits(0) * 8, // align in bits
payload_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(payload_ty, resolve),
fields[payload_index] = try o.builder.debugMemberType(
try o.builder.metadataString("value"),
.none, // File
debug_fwd_ref,
0, // Line
try o.lowerDebugType(payload_ty),
payload_size * 8,
payload_align.toByteUnits(0) * 8,
payload_offset * 8,
);
 
const full_di_ty = dib.createStructType(
compile_unit_scope,
name.ptr,
di_file,
line,
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
&fields,
fields.len,
0, // run time lang
null, // vtable holder
"", // unique id
const debug_error_union_type = try o.builder.debugStructType(
try o.builder.metadataString(name),
.none, // File
o.debug_compile_unit, // Sope
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(&fields),
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
 
o.builder.debugForwardReferenceSetType(debug_fwd_ref, debug_error_union_type);
 
try o.debug_type_map.put(gpa, ty, debug_error_union_type);
return debug_error_union_type;
},
.ErrorSet => {
// TODO make this a proper enum with all the error codes in it.
// will need to consider how to take incremental compilation into account.
const di_ty = dib.createBasicType("anyerror", 16, DW.ATE.unsigned);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
const debug_error_set = try o.builder.debugUnsignedType(
try o.builder.metadataString("anyerror"),
16,
);
try o.debug_type_map.put(gpa, ty, debug_error_set);
return debug_error_set;
},
.Struct => {
const compile_unit_scope = o.di_compile_unit.?.toScope();
const name = try o.allocTypeName(ty);
defer gpa.free(name);
 
@@ -2458,40 +2440,28 @@ pub const Object = struct {
const backing_int_ty = struct_type.backingIntType(ip).*;
if (backing_int_ty != .none) {
const info = Type.fromInterned(backing_int_ty).intInfo(mod);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
const builder_name = try o.builder.metadataString(name);
const debug_int_type = switch (info.signedness) {
.signed => try o.builder.debugSignedType(builder_name, ty.abiSize(mod) * 8),
.unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(mod) * 8),
};
const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types
const di_ty = dib.createBasicType(name, di_bits, dwarf_encoding);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
try o.debug_type_map.put(gpa, ty, debug_int_type);
return debug_int_type;
}
}
 
const fwd_decl = opt_fwd_decl orelse blk: {
const fwd_decl = dib.createReplaceableCompositeType(
DW.TAG.structure_type,
name.ptr,
compile_unit_scope,
null, // file
0, // line
);
gop.value_ptr.* = AnnotatedDITypePtr.initFwd(fwd_decl);
if (resolve == .fwd) return fwd_decl;
break :blk fwd_decl;
};
 
switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
defer di_fields.deinit(gpa);
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{};
defer fields.deinit(gpa);
 
try di_fields.ensureUnusedCapacity(gpa, tuple.types.len);
try fields.ensureUnusedCapacity(gpa, tuple.types.len);
 
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
 
const debug_fwd_ref = try o.builder.debugForwardReference();
 
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
 
@@ -2506,38 +2476,33 @@ pub const Object = struct {
try std.fmt.allocPrintZ(gpa, "{d}", .{i});
defer if (tuple.names.len == 0) gpa.free(field_name);
 
try di_fields.append(gpa, dib.createMemberType(
fwd_decl.toScope(),
field_name,
null, // file
0, // line
field_size * 8, // size in bits
field_align.toByteUnits(0) * 8, // align in bits
field_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(Type.fromInterned(field_ty), resolve),
fields.appendAssumeCapacity(try o.builder.debugMemberType(
try o.builder.metadataString(field_name),
.none, // File
debug_fwd_ref,
0,
try o.lowerDebugType(Type.fromInterned(field_ty)),
field_size * 8,
field_align.toByteUnits(0) * 8,
field_offset * 8,
));
}
 
const full_di_ty = dib.createStructType(
compile_unit_scope,
name.ptr,
null, // file
0, // line
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
di_fields.items.ptr,
@intCast(di_fields.items.len),
0, // run time lang
null, // vtable holder
"", // unique id
const debug_struct_type = try o.builder.debugStructType(
try o.builder.metadataString(name),
.none, // File
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(fields.items),
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
 
o.builder.debugForwardReferenceSetType(debug_fwd_ref, debug_struct_type);
 
try o.debug_type_map.put(gpa, ty, debug_struct_type);
return debug_struct_type;
},
.struct_type => |struct_type| {
if (!struct_type.haveFieldTypes(ip)) {
@@ -2549,12 +2514,9 @@ pub const Object = struct {
// rather than changing the frontend to unnecessarily resolve the
// struct field types.
const owner_decl_index = ty.getOwnerDecl(mod);
const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty));
return struct_di_ty;
const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
try o.debug_type_map.put(gpa, ty, debug_struct_type);
return debug_struct_type;
}
},
else => {},
@@ -2562,20 +2524,22 @@ pub const Object = struct {
 
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
const owner_decl_index = ty.getOwnerDecl(mod);
const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty));
return struct_di_ty;
const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
try o.debug_type_map.put(gpa, ty, debug_struct_type);
return debug_struct_type;
}
 
const struct_type = mod.typeToStruct(ty).?;
 
var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
defer di_fields.deinit(gpa);
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{};
defer fields.deinit(gpa);
 
try di_fields.ensureUnusedCapacity(gpa, struct_type.field_types.len);
try fields.ensureUnusedCapacity(gpa, struct_type.field_types.len);
 
const debug_fwd_ref = try o.builder.debugForwardReference();
 
// Set as forward reference while the type is lowered in case it references itself
try o.debug_type_map.put(gpa, ty, debug_fwd_ref);
 
comptime assert(struct_layout_version == 2);
var it = struct_type.iterateRuntimeOrder(ip);
@@ -2593,103 +2557,88 @@ pub const Object = struct {
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
try ip.getOrPutStringFmt(gpa, "{d}", .{field_index});
 
const field_di_ty = try o.lowerDebugType(field_ty, resolve);
 
try di_fields.append(gpa, dib.createMemberType(
fwd_decl.toScope(),
ip.stringToSlice(field_name),
null, // file
0, // line
field_size * 8, // size in bits
field_align.toByteUnits(0) * 8, // align in bits
field_offset * 8, // offset in bits
0, // flags
field_di_ty,
fields.appendAssumeCapacity(try o.builder.debugMemberType(
try o.builder.metadataString(ip.stringToSlice(field_name)),
.none, // File
debug_fwd_ref,
0, // Line
try o.lowerDebugType(field_ty),
field_size * 8,
field_align.toByteUnits(0) * 8,
field_offset * 8,
));
}
 
const full_di_ty = dib.createStructType(
compile_unit_scope,
name.ptr,
null, // file
0, // line
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
di_fields.items.ptr,
@intCast(di_fields.items.len),
0, // run time lang
null, // vtable holder
"", // unique id
const debug_struct_type = try o.builder.debugStructType(
try o.builder.metadataString(name),
.none, // File
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(fields.items),
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
 
o.builder.debugForwardReferenceSetType(debug_fwd_ref, debug_struct_type);
 
// Set to real type now that it has been lowered fully
const map_ptr = o.debug_type_map.getPtr(ty) orelse unreachable;
map_ptr.* = debug_struct_type;
 
return debug_struct_type;
},
.Union => {
const compile_unit_scope = o.di_compile_unit.?.toScope();
const owner_decl_index = ty.getOwnerDecl(mod);
 
const name = try o.allocTypeName(ty);
defer gpa.free(name);
 
const fwd_decl = opt_fwd_decl orelse blk: {
const fwd_decl = dib.createReplaceableCompositeType(
DW.TAG.structure_type,
name.ptr,
o.di_compile_unit.?.toScope(),
null, // file
0, // line
);
gop.value_ptr.* = AnnotatedDITypePtr.initFwd(fwd_decl);
if (resolve == .fwd) return fwd_decl;
break :blk fwd_decl;
};
 
const union_type = ip.indexToKey(ty.toIntern()).union_type;
if (!union_type.haveFieldTypes(ip) or !ty.hasRuntimeBitsIgnoreComptime(mod)) {
const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty));
return union_di_ty;
const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
try o.debug_type_map.put(gpa, ty, debug_union_type);
return debug_union_type;
}
 
const union_obj = ip.loadUnionType(union_type);
const layout = mod.getUnionLayout(union_obj);
 
const debug_fwd_ref = try o.builder.debugForwardReference();
 
// Set as forward reference while the type is lowered in case it references itself
try o.debug_type_map.put(gpa, ty, debug_fwd_ref);
 
if (layout.payload_size == 0) {
const tag_di_ty = try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty), resolve);
const di_fields = [_]*llvm.DIType{tag_di_ty};
const full_di_ty = dib.createStructType(
compile_unit_scope,
name.ptr,
null, // file
0, // line
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
&di_fields,
di_fields.len,
0, // run time lang
null, // vtable holder
"", // unique id
const debug_union_type = try o.builder.debugStructType(
try o.builder.metadataString(name),
.none, // File
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(
&.{try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty))},
),
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
 
// Set to real type now that it has been lowered fully
const map_ptr = o.debug_type_map.getPtr(ty) orelse unreachable;
map_ptr.* = debug_union_type;
 
return debug_union_type;
}
 
var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
defer di_fields.deinit(gpa);
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{};
defer fields.deinit(gpa);
 
try di_fields.ensureUnusedCapacity(gpa, union_obj.field_names.len);
try fields.ensureUnusedCapacity(gpa, union_obj.field_names.len);
 
const debug_union_fwd_ref = if (layout.tag_size == 0)
debug_fwd_ref
else
try o.builder.debugForwardReference();
 
for (0..union_obj.field_names.len) |field_index| {
const field_ty = union_obj.field_types.get(ip)[field_index];
@@ -2698,18 +2647,16 @@ pub const Object = struct {
const field_size = Type.fromInterned(field_ty).abiSize(mod);
const field_align = mod.unionFieldNormalAlignment(union_obj, @intCast(field_index));
 
const field_di_ty = try o.lowerDebugType(Type.fromInterned(field_ty), resolve);
const field_name = union_obj.field_names.get(ip)[field_index];
di_fields.appendAssumeCapacity(dib.createMemberType(
fwd_decl.toScope(),
ip.stringToSlice(field_name),
null, // file
0, // line
field_size * 8, // size in bits
field_align.toByteUnits(0) * 8, // align in bits
0, // offset in bits
0, // flags
field_di_ty,
fields.appendAssumeCapacity(try o.builder.debugMemberType(
try o.builder.metadataString(ip.stringToSlice(field_name)),
.none, // File
debug_union_fwd_ref,
0, // Line
try o.lowerDebugType(Type.fromInterned(field_ty)),
field_size * 8,
field_align.toByteUnits(0) * 8,
0, // Offset
));
}
 
@@ -2720,25 +2667,25 @@ pub const Object = struct {
break :name union_name_buf.?;
};
 
const union_di_ty = dib.createUnionType(
compile_unit_scope,
union_name.ptr,
null, // file
0, // line
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
di_fields.items.ptr,
@intCast(di_fields.items.len),
0, // run time lang
"", // unique id
const debug_union_type = try o.builder.debugUnionType(
try o.builder.metadataString(union_name),
.none, // File
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(fields.items),
);
 
o.builder.debugForwardReferenceSetType(debug_union_fwd_ref, debug_union_type);
 
if (layout.tag_size == 0) {
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty));
return union_di_ty;
// Set to real type now that it has been lowered fully
const map_ptr = o.debug_type_map.getPtr(ty) orelse unreachable;
map_ptr.* = debug_union_type;
 
return debug_union_type;
}
 
var tag_offset: u64 = undefined;
@@ -2751,81 +2698,80 @@ pub const Object = struct {
tag_offset = layout.tag_align.forward(layout.payload_size);
}
 
const tag_di = dib.createMemberType(
fwd_decl.toScope(),
"tag",
null, // file
0, // line
const debug_tag_type = try o.builder.debugMemberType(
try o.builder.metadataString("tag"),
.none, // File
debug_fwd_ref,
0, // Line
try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty)),
layout.tag_size * 8,
layout.tag_align.toByteUnits(0) * 8,
tag_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty), resolve),
tag_offset * 8,
);
 
const payload_di = dib.createMemberType(
fwd_decl.toScope(),
"payload",
null, // file
0, // line
layout.payload_size * 8, // size in bits
const debug_payload_type = try o.builder.debugMemberType(
try o.builder.metadataString("payload"),
.none, // File
debug_fwd_ref,
0, // Line
debug_union_type,
layout.payload_size * 8,
layout.payload_align.toByteUnits(0) * 8,
payload_offset * 8, // offset in bits
0, // flags
union_di_ty,
payload_offset * 8,
);
 
const full_di_fields: [2]*llvm.DIType =
const full_fields: [2]Builder.Metadata =
if (layout.tag_align.compare(.gte, layout.payload_align))
.{ tag_di, payload_di }
.{ debug_tag_type, debug_payload_type }
else
.{ payload_di, tag_di };
.{ debug_payload_type, debug_tag_type };
 
const full_di_ty = dib.createStructType(
compile_unit_scope,
name.ptr,
null, // file
0, // line
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
&full_di_fields,
full_di_fields.len,
0, // run time lang
null, // vtable holder
"", // unique id
const debug_tagged_union_type = try o.builder.debugStructType(
try o.builder.metadataString(name),
.none, // File
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(&full_fields),
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
 
o.builder.debugForwardReferenceSetType(debug_fwd_ref, debug_tagged_union_type);
 
// Set to real type now that it has been lowered fully
const map_ptr = o.debug_type_map.getPtr(ty) orelse unreachable;
map_ptr.* = debug_tagged_union_type;
 
return debug_tagged_union_type;
},
.Fn => {
const fn_info = mod.typeToFunc(ty).?;
 
var param_di_types = std.ArrayList(*llvm.DIType).init(gpa);
defer param_di_types.deinit();
var debug_param_types = std.ArrayList(Builder.Metadata).init(gpa);
defer debug_param_types.deinit();
 
try debug_param_types.ensureUnusedCapacity(3 + fn_info.param_types.len);
 
// Return type goes first.
if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(mod)) {
const sret = firstParamSRet(fn_info, mod);
const di_ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type);
try param_di_types.append(try o.lowerDebugType(di_ret_ty, resolve));
const ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type);
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ret_ty));
 
if (sret) {
const ptr_ty = try mod.singleMutPtrType(Type.fromInterned(fn_info.return_type));
try param_di_types.append(try o.lowerDebugType(ptr_ty, resolve));
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty));
}
} else {
try param_di_types.append(try o.lowerDebugType(Type.void, resolve));
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(Type.void));
}
 
if (Type.fromInterned(fn_info.return_type).isError(mod) and
o.module.comp.config.any_error_tracing)
{
const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType());
try param_di_types.append(try o.lowerDebugType(ptr_ty, resolve));
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty));
}
 
for (0..fn_info.param_types.len) |i| {
@@ -2834,20 +2780,18 @@ pub const Object = struct {
 
if (isByRef(param_ty, mod)) {
const ptr_ty = try mod.singleMutPtrType(param_ty);
try param_di_types.append(try o.lowerDebugType(ptr_ty, resolve));
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty));
} else {
try param_di_types.append(try o.lowerDebugType(param_ty, resolve));
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(param_ty));
}
}
 
const fn_di_ty = dib.createSubroutineType(
param_di_types.items.ptr,
@intCast(param_di_types.items.len),
0,
const debug_function_type = try o.builder.debugSubroutineType(
try o.builder.debugTuple(debug_param_types.items),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(fn_di_ty));
return fn_di_ty;
 
try o.debug_type_map.put(gpa, ty, debug_function_type);
return debug_function_type;
},
.ComptimeInt => unreachable,
.ComptimeFloat => unreachable,
@@ -2861,39 +2805,30 @@ pub const Object = struct {
}
}
 
fn namespaceToDebugScope(o: *Object, namespace_index: InternPool.NamespaceIndex) !*llvm.DIScope {
fn namespaceToDebugScope(o: *Object, namespace_index: InternPool.NamespaceIndex) !Builder.Metadata {
const mod = o.module;
const namespace = mod.namespacePtr(namespace_index);
if (namespace.parent == .none) {
const di_file = try o.getDIFile(o.gpa, namespace.file_scope);
return di_file.toScope();
}
const di_type = try o.lowerDebugType(namespace.ty, .fwd);
return di_type.toScope();
if (namespace.parent == .none) return try o.getDebugFile(namespace.file_scope);
 
const gop = try o.debug_unresolved_namespace_scopes.getOrPut(o.gpa, namespace_index);
 
if (!gop.found_existing) gop.value_ptr.* = try o.builder.debugForwardReference();
 
return gop.value_ptr.*;
}
 
/// This is to be used instead of void for debug info types, to avoid tripping
/// Assertion `!isa<DIType>(Scope) && "shouldn't make a namespace scope for a type"'
/// when targeting CodeView (Windows).
fn makeEmptyNamespaceDIType(o: *Object, decl_index: InternPool.DeclIndex) !*llvm.DIType {
fn makeEmptyNamespaceDebugType(o: *Object, decl_index: InternPool.DeclIndex) !Builder.Metadata {
const mod = o.module;
const decl = mod.declPtr(decl_index);
const fields: [0]*llvm.DIType = .{};
const di_scope = try o.namespaceToDebugScope(decl.src_namespace);
return o.di_builder.?.createStructType(
di_scope,
mod.intern_pool.stringToSlice(decl.name), // TODO use fully qualified name
try o.getDIFile(o.gpa, mod.namespacePtr(decl.src_namespace).file_scope),
return o.builder.debugStructType(
try o.builder.metadataString(mod.intern_pool.stringToSlice(decl.name)), // TODO use fully qualified name
try o.getDebugFile(mod.namespacePtr(decl.src_namespace).file_scope),
try o.namespaceToDebugScope(decl.src_namespace),
decl.src_line + 1,
0, // size in bits
0, // align in bits
0, // flags
null, // derived from
undefined, // TODO should be able to pass &fields,
fields.len,
0, // run time lang
null, // vtable holder
"", // unique id
.none,
0,
0,
.none,
);
}
 
@@ -3202,26 +3137,6 @@ pub const Object = struct {
}
 
fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type {
const ty = try o.lowerTypeInner(t);
const mod = o.module;
if (std.debug.runtime_safety and o.builder.useLibLlvm() and false) check: {
const llvm_ty = ty.toLlvm(&o.builder);
if (t.zigTypeTag(mod) == .Opaque) break :check;
if (!t.hasRuntimeBits(mod)) break :check;
if (!try ty.isSized(&o.builder)) break :check;
 
const zig_size = t.abiSize(mod);
const llvm_size = o.target_data.abiSizeOfType(llvm_ty);
if (llvm_size != zig_size) {
log.err("when lowering {}, Zig ABI size = {d} but LLVM ABI size = {d}", .{
t.fmt(o.module), zig_size, llvm_size,
});
}
}
return ty;
}
 
fn lowerTypeInner(o: *Object, t: Type) Allocator.Error!Builder.Type {
const mod = o.module;
const target = mod.getTarget();
const ip = &mod.intern_pool;
@@ -3406,20 +3321,17 @@ pub const Object = struct {
},
.simple_type => unreachable,
.struct_type => |struct_type| {
const gop = try o.type_map.getOrPut(o.gpa, t.toIntern());
if (gop.found_existing) return gop.value_ptr.*;
if (o.type_map.get(t.toIntern())) |value| return value;
 
if (struct_type.layout == .Packed) {
const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntType(ip).*));
gop.value_ptr.* = int_ty;
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
return int_ty;
}
 
const name = try o.builder.string(ip.stringToSlice(
try mod.declPtr(struct_type.decl.unwrap().?).getFullyQualifiedName(mod),
));
const ty = try o.builder.opaqueType(name);
gop.value_ptr.* = ty; // must be done before any recursive calls
 
var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){};
defer llvm_field_types.deinit(o.gpa);
@@ -3484,7 +3396,10 @@ pub const Object = struct {
);
}
 
try o.builder.namedTypeSetBody(
const ty = try o.builder.opaqueType(name);
try o.type_map.put(o.gpa, t.toIntern(), ty);
 
o.builder.namedTypeSetBody(
ty,
try o.builder.structType(struct_kind, llvm_field_types.items),
);
@@ -3553,29 +3468,26 @@ pub const Object = struct {
return o.builder.structType(.normal, llvm_field_types.items);
},
.union_type => |union_type| {
const gop = try o.type_map.getOrPut(o.gpa, t.toIntern());
if (gop.found_existing) return gop.value_ptr.*;
if (o.type_map.get(t.toIntern())) |value| return value;
 
const union_obj = ip.loadUnionType(union_type);
const layout = mod.getUnionLayout(union_obj);
 
if (union_obj.flagsPtr(ip).layout == .Packed) {
const int_ty = try o.builder.intType(@intCast(t.bitSize(mod)));
gop.value_ptr.* = int_ty;
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
return int_ty;
}
 
if (layout.payload_size == 0) {
const enum_tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty));
gop.value_ptr.* = enum_tag_ty;
try o.type_map.put(o.gpa, t.toIntern(), enum_tag_ty);
return enum_tag_ty;
}
 
const name = try o.builder.string(ip.stringToSlice(
try mod.declPtr(union_obj.decl).getFullyQualifiedName(mod),
));
const ty = try o.builder.opaqueType(name);
gop.value_ptr.* = ty; // must be done before any recursive calls
 
const aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[layout.most_aligned_field]);
const aligned_field_llvm_ty = try o.lowerType(aligned_field_ty);
@@ -3595,7 +3507,10 @@ pub const Object = struct {
};
 
if (layout.tag_size == 0) {
try o.builder.namedTypeSetBody(
const ty = try o.builder.opaqueType(name);
try o.type_map.put(o.gpa, t.toIntern(), ty);
 
o.builder.namedTypeSetBody(
ty,
try o.builder.structType(.normal, &.{payload_ty}),
);
@@ -3620,7 +3535,10 @@ pub const Object = struct {
llvm_fields_len += 1;
}
 
try o.builder.namedTypeSetBody(
const ty = try o.builder.opaqueType(name);
try o.type_map.put(o.gpa, t.toIntern(), ty);
 
o.builder.namedTypeSetBody(
ty,
try o.builder.structType(.normal, llvm_fields[0..llvm_fields_len]),
);
@@ -4368,7 +4286,7 @@ pub const Object = struct {
const err_align = err_int_ty.abiAlignment(mod);
const index: u32 = if (payload_align.compare(.gt, err_align)) 2 else 1;
return o.builder.gepConst(.inbounds, try o.lowerType(eu_ty), parent_ptr, null, &.{
try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, index),
.@"0", try o.builder.intConst(.i32, index),
});
},
.opt_payload => |opt_ptr| {
@@ -4384,9 +4302,7 @@ pub const Object = struct {
return parent_ptr;
}
 
return o.builder.gepConst(.inbounds, try o.lowerType(opt_ty), parent_ptr, null, &.{
try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, 0),
});
return o.builder.gepConst(.inbounds, try o.lowerType(opt_ty), parent_ptr, null, &.{ .@"0", .@"0" });
},
.comptime_field => unreachable,
.elem => |elem_ptr| {
@@ -4417,7 +4333,7 @@ pub const Object = struct {
 
const parent_llvm_ty = try o.lowerType(parent_ty);
return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{
try o.builder.intConst(.i32, 0),
.@"0",
try o.builder.intConst(.i32, @intFromBool(
layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align),
)),
@@ -4443,7 +4359,7 @@ pub const Object = struct {
parent_ptr,
null,
if (o.llvmFieldIndex(parent_ty, field_index)) |llvm_field_index| &.{
try o.builder.intConst(.i32, 0),
.@"0",
try o.builder.intConst(.i32, llvm_field_index),
} else &.{
try o.builder.intConst(.i32, @intFromBool(
@@ -4456,7 +4372,7 @@ pub const Object = struct {
assert(parent_ty.isSlice(mod));
const parent_llvm_ty = try o.lowerType(parent_ty);
return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{
try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, field_index),
.@"0", try o.builder.intConst(.i32, field_index),
});
},
else => unreachable,
@@ -4716,8 +4632,8 @@ pub const Object = struct {
defer wip_switch.finish(&wip);
 
for (0..enum_type.names.len) |field_index| {
const name = try o.builder.string(ip.stringToSlice(enum_type.names.get(ip)[field_index]));
const name_init = try o.builder.stringNullConst(name);
const name = try o.builder.stringNull(ip.stringToSlice(enum_type.names.get(ip)[field_index]));
const name_init = try o.builder.stringConst(name);
const name_variable_index =
try o.builder.addVariable(.empty, name_init.typeOf(&o.builder), .default);
try name_variable_index.setInitializer(name_init, &o.builder);
@@ -4728,7 +4644,7 @@ pub const Object = struct {
 
const name_val = try o.builder.structValue(ret_ty, &.{
name_variable_index.toConst(&o.builder),
try o.builder.intConst(usize_ty, name.slice(&o.builder).?.len),
try o.builder.intConst(usize_ty, name.slice(&o.builder).?.len - 1),
});
 
const return_block = try wip.block(1, "Name");
@@ -4800,26 +4716,33 @@ pub const DeclGen = struct {
else => try o.lowerValue(init_val),
}, &o.builder);
 
if (o.di_builder) |dib| {
const di_file =
try o.getDIFile(o.gpa, mod.namespacePtr(decl.src_namespace).file_scope);
const line_number = decl.src_line + 1;
const is_internal_linkage = !o.module.decl_exports.contains(decl_index);
 
const line_number = decl.src_line + 1;
const is_internal_linkage = !o.module.decl_exports.contains(decl_index);
const di_global = dib.createGlobalVariableExpression(
di_file.toScope(),
mod.intern_pool.stringToSlice(decl.name),
variable_index.name(&o.builder).slice(&o.builder).?,
di_file,
line_number,
try o.lowerDebugType(decl.ty, .full),
is_internal_linkage,
);
if (dg.object.builder.strip) return;
 
try o.di_map.put(o.gpa, dg.decl, di_global.getVariable().toNode());
if (!is_internal_linkage or decl.isExtern(mod))
variable_index.toLlvm(&o.builder).attachMetaData(di_global);
}
const debug_file = try o.getDebugFile(mod.namespacePtr(decl.src_namespace).file_scope);
 
const debug_global_var = try o.builder.debugGlobalVar(
try o.builder.metadataString(mod.intern_pool.stringToSlice(decl.name)), // Name
try o.builder.metadataStringFromString(variable_index.name(&o.builder)), // Linkage name
debug_file, // File
debug_file, // Scope
line_number,
try o.lowerDebugType(decl.ty),
variable_index,
.{ .local = is_internal_linkage },
);
 
const debug_expression = try o.builder.debugExpression(&.{});
 
const debug_global_var_expression = try o.builder.debugGlobalVarExpression(
debug_global_var,
debug_expression,
);
if (!is_internal_linkage or decl.isExtern(mod))
variable_index.setGlobalVariableExpression(debug_global_var_expression, &o.builder);
try o.debug_globals.append(o.gpa, debug_global_var_expression);
}
}
};
@@ -4830,19 +4753,22 @@ pub const FuncGen = struct {
air: Air,
liveness: Liveness,
wip: Builder.WipFunction,
di_scope: ?if (build_options.have_llvm) *llvm.DIScope else noreturn,
di_file: ?if (build_options.have_llvm) *llvm.DIFile else noreturn,
 
file: Builder.Metadata,
scope: Builder.Metadata,
 
inlined: std.ArrayListUnmanaged(struct {
base_line: u32,
location: Builder.Metadata,
scope: Builder.Metadata,
}) = .{},
 
scope_stack: std.ArrayListUnmanaged(Builder.Metadata) = .{},
 
base_line: u32,
prev_dbg_line: c_uint,
prev_dbg_column: c_uint,
 
/// Stack of locations where a call was inlined.
dbg_inlined: std.ArrayListUnmanaged(if (build_options.have_llvm) DbgState else void) = .{},
 
/// Stack of `DILexicalBlock`s. dbg_block instructions cannot happend accross
/// dbg_inline instructions so no special handling there is required.
dbg_block_stack: std.ArrayListUnmanaged(if (build_options.have_llvm) *llvm.DIScope else void) = .{},
 
/// This stores the LLVM values used in a function, such that they can be referred to
/// in other instructions. This table is cleared before every function is generated.
func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Ref, Builder.Value),
@@ -4872,7 +4798,6 @@ pub const FuncGen = struct {
 
sync_scope: Builder.SyncScope,
 
const DbgState = if (build_options.have_llvm) struct { loc: *llvm.DILocation, scope: *llvm.DIScope, base_line: u32 } else struct {};
const BreakList = union {
list: std.MultiArrayList(struct {
bb: Builder.Function.Block.Index,
@@ -4883,8 +4808,8 @@ pub const FuncGen = struct {
 
fn deinit(self: *FuncGen) void {
self.wip.deinit();
self.dbg_inlined.deinit(self.gpa);
self.dbg_block_stack.deinit(self.gpa);
self.scope_stack.deinit(self.gpa);
self.inlined.deinit(self.gpa);
self.func_inst_table.deinit(self.gpa);
self.blocks.deinit(self.gpa);
}
@@ -5493,9 +5418,6 @@ pub const FuncGen = struct {
// a different LLVM type than the usual one. We solve this here at the callsite
// by using our canonical type, then loading it if necessary.
const alignment = return_type.abiAlignment(mod).toLlvm();
if (o.builder.useLibLlvm())
assert(o.target_data.abiSizeOfType(abi_ret_ty.toLlvm(&o.builder)) >=
o.target_data.abiSizeOfType(llvm_ret_ty.toLlvm(&o.builder)));
const rp = try self.buildAlloca(abi_ret_ty, alignment);
_ = try self.wip.store(.normal, call, rp, alignment);
return if (isByRef(return_type, mod))
@@ -5862,7 +5784,7 @@ pub const FuncGen = struct {
};
 
const phi = try self.wip.phi(.i1, "");
try phi.finish(
phi.finish(
&incoming_values,
&.{ both_null_block, mixed_block, both_pl_block_end },
&self.wip,
@@ -5929,7 +5851,7 @@ pub const FuncGen = struct {
 
parent_bb.ptr(&self.wip).incoming = @intCast(breaks.list.len);
const phi = try self.wip.phi(llvm_ty, "");
try phi.finish(breaks.list.items(.val), breaks.list.items(.bb), &self.wip);
phi.finish(breaks.list.items(.val), breaks.list.items(.bb), &self.wip);
return phi.toValue();
} else {
parent_bb.ptr(&self.wip).incoming = @intCast(breaks.len);
@@ -6653,42 +6575,43 @@ pub const FuncGen = struct {
}
 
fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const di_scope = self.di_scope orelse return .none;
if (self.wip.builder.strip) return .none;
const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
self.prev_dbg_line = @intCast(self.base_line + dbg_stmt.line + 1);
self.prev_dbg_column = @intCast(dbg_stmt.column + 1);
const inlined_at = if (self.dbg_inlined.items.len > 0)
self.dbg_inlined.items[self.dbg_inlined.items.len - 1].loc
const inlined_at = if (self.inlined.items.len > 0)
self.inlined.items[self.inlined.items.len - 1].location
else
null;
self.wip.llvm.builder.setCurrentDebugLocation(
.none;
 
self.wip.current_debug_location = try self.wip.builder.debugLocation(
self.prev_dbg_line,
self.prev_dbg_column,
di_scope,
self.scope,
inlined_at,
);
 
return .none;
}
 
fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
if (self.wip.builder.strip) return .none;
const o = self.dg.object;
const dib = o.di_builder orelse return .none;
const ty_fn = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_fn;
 
const zcu = o.module;
 
const ty_fn = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_fn;
const func = zcu.funcInfo(ty_fn.func);
const decl_index = func.owner_decl;
const decl = zcu.declPtr(decl_index);
const namespace = zcu.namespacePtr(decl.src_namespace);
const owner_mod = namespace.file_scope.mod;
const di_file = try o.getDIFile(self.gpa, zcu.namespacePtr(decl.src_namespace).file_scope);
self.di_file = di_file;
const line_number = decl.src_line + 1;
const cur_debug_location = self.wip.llvm.builder.getCurrentDebugLocation2();
 
try self.dbg_inlined.append(self.gpa, .{
.loc = @ptrCast(cur_debug_location),
.scope = self.di_scope.?,
self.file = try o.getDebugFile(namespace.file_scope);
 
const line_number = decl.src_line + 1;
try self.inlined.append(self.gpa, .{
.location = self.wip.current_debug_location,
.scope = self.scope,
.base_line = self.base_line,
});
 
@@ -6699,91 +6622,118 @@ pub const FuncGen = struct {
.param_types = &.{},
.return_type = .void_type,
});
const fn_di_ty = try o.lowerDebugType(fn_ty, .full);
const subprogram = dib.createFunction(
di_file.toScope(),
zcu.intern_pool.stringToSlice(decl.name),
zcu.intern_pool.stringToSlice(fqn),
di_file,
 
const subprogram = try o.builder.debugSubprogram(
self.file,
try o.builder.metadataString(zcu.intern_pool.stringToSlice(decl.name)),
try o.builder.metadataString(zcu.intern_pool.stringToSlice(fqn)),
line_number,
fn_di_ty,
is_internal_linkage,
true, // is definition
line_number + func.lbrace_line, // scope line
llvm.DIFlags.StaticMember,
owner_mod.optimize_mode != .Debug,
null, // decl_subprogram
line_number + func.lbrace_line,
try o.lowerDebugType(fn_ty),
.{
.di_flags = .{ .StaticMember = true },
.sp_flags = .{
.Optimized = owner_mod.optimize_mode != .Debug,
.Definition = true,
.LocalToUnit = is_internal_linkage,
},
},
o.debug_compile_unit,
);
 
const lexical_block = dib.createLexicalBlock(subprogram.toScope(), di_file, line_number, 1);
self.di_scope = lexical_block.toScope();
const lexical_block = try o.builder.debugLexicalBlock(
subprogram,
self.file,
line_number,
1,
);
self.scope = lexical_block;
self.base_line = decl.src_line;
const inlined_at = self.wip.current_debug_location;
self.wip.current_debug_location = try o.builder.debugLocation(
line_number,
0,
self.scope,
inlined_at,
);
return .none;
}
 
fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) Allocator.Error!Builder.Value {
if (self.wip.builder.strip) return .none;
const o = self.dg.object;
if (o.di_builder == null) return .none;
 
const ty_fn = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_fn;
 
const mod = o.module;
const decl = mod.funcOwnerDeclPtr(ty_fn.func);
const di_file = try o.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope);
self.di_file = di_file;
const old = self.dbg_inlined.pop();
self.di_scope = old.scope;
self.file = try o.getDebugFile(mod.namespacePtr(decl.src_namespace).file_scope);
 
const old = self.inlined.pop();
self.scope = old.scope;
self.base_line = old.base_line;
self.wip.current_debug_location = old.location;
return .none;
}
 
fn airDbgBlockBegin(self: *FuncGen) !Builder.Value {
fn airDbgBlockBegin(self: *FuncGen) Allocator.Error!Builder.Value {
if (self.wip.builder.strip) return .none;
const o = self.dg.object;
const dib = o.di_builder orelse return .none;
const old_scope = self.di_scope.?;
try self.dbg_block_stack.append(self.gpa, old_scope);
const lexical_block = dib.createLexicalBlock(old_scope, self.di_file.?, self.prev_dbg_line, self.prev_dbg_column);
self.di_scope = lexical_block.toScope();
 
try self.scope_stack.append(self.gpa, self.scope);
 
const old = self.scope;
self.scope = try o.builder.debugLexicalBlock(
old,
self.file,
self.prev_dbg_line,
self.prev_dbg_column,
);
return .none;
}
 
fn airDbgBlockEnd(self: *FuncGen) !Builder.Value {
const o = self.dg.object;
if (o.di_builder == null) return .none;
self.di_scope = self.dbg_block_stack.pop();
if (self.wip.builder.strip) return .none;
self.scope = self.scope_stack.pop();
return .none;
}
 
fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
if (self.wip.builder.strip) return .none;
const o = self.dg.object;
const mod = o.module;
const dib = o.di_builder orelse return .none;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const operand = try self.resolveInst(pl_op.operand);
const name = self.air.nullTerminatedString(pl_op.payload);
const ptr_ty = self.typeOf(pl_op.operand);
 
const di_local_var = dib.createAutoVariable(
self.di_scope.?,
name.ptr,
self.di_file.?,
const debug_local_var = try o.builder.debugLocalVar(
try o.builder.metadataString(name),
self.file,
self.scope,
self.prev_dbg_line,
try o.lowerDebugType(ptr_ty.childType(mod), .full),
true, // always preserve
0, // flags
try o.lowerDebugType(ptr_ty.childType(mod)),
);
const inlined_at = if (self.dbg_inlined.items.len > 0)
self.dbg_inlined.items[self.dbg_inlined.items.len - 1].loc
else
null;
const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at);
const insert_block = self.wip.cursor.block.toLlvm(&self.wip);
_ = dib.insertDeclareAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
 
_ = try self.wip.callIntrinsic(
.normal,
.none,
.@"dbg.declare",
&.{},
&.{
(try self.wip.debugValue(operand)).toValue(),
debug_local_var.toValue(),
(try o.builder.debugExpression(&.{})).toValue(),
},
"",
);
 
return .none;
}
 
fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
if (self.wip.builder.strip) return .none;
const o = self.dg.object;
const dib = o.di_builder orelse return .none;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const operand = try self.resolveInst(pl_op.operand);
const operand_ty = self.typeOf(pl_op.operand);
@@ -6791,32 +6741,58 @@ pub const FuncGen = struct {
 
if (needDbgVarWorkaround(o)) return .none;
 
const di_local_var = dib.createAutoVariable(
self.di_scope.?,
name.ptr,
self.di_file.?,
const debug_local_var = try o.builder.debugLocalVar(
try o.builder.metadataString(name),
self.file,
self.scope,
self.prev_dbg_line,
try o.lowerDebugType(operand_ty, .full),
true, // always preserve
0, // flags
try o.lowerDebugType(operand_ty),
);
const inlined_at = if (self.dbg_inlined.items.len > 0)
self.dbg_inlined.items[self.dbg_inlined.items.len - 1].loc
else
null;
const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at);
const insert_block = self.wip.cursor.block.toLlvm(&self.wip);
 
const zcu = o.module;
const owner_mod = self.dg.ownerModule();
if (isByRef(operand_ty, zcu)) {
_ = dib.insertDeclareAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
_ = try self.wip.callIntrinsic(
.normal,
.none,
.@"dbg.declare",
&.{},
&.{
(try self.wip.debugValue(operand)).toValue(),
debug_local_var.toValue(),
(try o.builder.debugExpression(&.{})).toValue(),
},
"",
);
} else if (owner_mod.optimize_mode == .Debug) {
const alignment = operand_ty.abiAlignment(zcu).toLlvm();
const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, operand, alloca, alignment);
_ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
_ = try self.wip.callIntrinsic(
.normal,
.none,
.@"dbg.declare",
&.{},
&.{
(try self.wip.debugValue(alloca)).toValue(),
debug_local_var.toValue(),
(try o.builder.debugExpression(&.{})).toValue(),
},
"",
);
} else {
_ = dib.insertDbgValueIntrinsicAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
_ = try self.wip.callIntrinsic(
.normal,
.none,
.@"dbg.value",
&.{},
&.{
(try self.wip.debugValue(operand)).toValue(),
debug_local_var.toValue(),
(try o.builder.debugExpression(&.{})).toValue(),
},
"",
);
}
return .none;
}
@@ -7885,7 +7861,7 @@ pub const FuncGen = struct {
.none,
if (scalar_ty.isSignedInt(mod)) .@"smul.fix.sat" else .@"umul.fix.sat",
&.{try o.lowerType(inst_ty)},
&.{ lhs, rhs, try o.builder.intValue(.i32, 0) },
&.{ lhs, rhs, .@"0" },
"",
);
}
@@ -8208,7 +8184,6 @@ pub const FuncGen = struct {
 
const libc_fn = try self.getLibcFunction(fn_name, &.{ scalar_llvm_ty, scalar_llvm_ty }, .i32);
 
const zero = try o.builder.intConst(.i32, 0);
const int_cond: Builder.IntegerCondition = switch (pred) {
.eq => .eq,
.neq => .ne,
@@ -8225,7 +8200,7 @@ pub const FuncGen = struct {
const init = try o.builder.poisonValue(vector_result_ty);
const result = try self.buildElementwiseCall(libc_fn, &params, init, vec_len);
 
const zero_vector = try o.builder.splatValue(vector_result_ty, zero);
const zero_vector = try o.builder.splatValue(vector_result_ty, .@"0");
return self.wip.icmp(int_cond, result, zero_vector, "");
}
 
@@ -8238,7 +8213,7 @@ pub const FuncGen = struct {
&params,
"",
);
return self.wip.icmp(int_cond, result, zero.toValue(), "");
return self.wip.icmp(int_cond, result, .@"0", "");
}
 
const FloatOp = enum {
@@ -8838,41 +8813,80 @@ pub const FuncGen = struct {
const arg_val = self.args[self.arg_index];
self.arg_index += 1;
 
if (self.wip.builder.strip) return arg_val;
 
const inst_ty = self.typeOfIndex(inst);
if (o.di_builder) |dib| {
if (needDbgVarWorkaround(o)) return arg_val;
if (needDbgVarWorkaround(o)) return arg_val;
 
const src_index = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index;
const func_index = self.dg.decl.getOwnedFunctionIndex();
const func = mod.funcInfo(func_index);
const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1;
const lbrace_col = func.lbrace_column + 1;
const di_local_var = dib.createParameterVariable(
self.di_scope.?,
mod.getParamName(func_index, src_index).ptr, // TODO test 0 bit args
self.di_file.?,
lbrace_line,
try o.lowerDebugType(inst_ty, .full),
true, // always preserve
0, // flags
@intCast(self.arg_index), // includes +1 because 0 is return type
const src_index = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index;
const func_index = self.dg.decl.getOwnedFunctionIndex();
const func = mod.funcInfo(func_index);
const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1;
const lbrace_col = func.lbrace_column + 1;
 
const debug_parameter = try o.builder.debugParameter(
try o.builder.metadataString(mod.getParamName(func_index, src_index)),
self.file,
self.scope,
lbrace_line,
try o.lowerDebugType(inst_ty),
@intCast(self.arg_index),
);
 
const old_location = self.wip.current_debug_location;
self.wip.current_debug_location = try o.builder.debugLocation(
lbrace_line,
lbrace_col,
self.scope,
.none,
);
 
const owner_mod = self.dg.ownerModule();
if (isByRef(inst_ty, mod)) {
_ = try self.wip.callIntrinsic(
.normal,
.none,
.@"dbg.declare",
&.{},
&.{
(try self.wip.debugValue(arg_val)).toValue(),
debug_parameter.toValue(),
(try o.builder.debugExpression(&.{})).toValue(),
},
"",
);
} else if (owner_mod.optimize_mode == .Debug) {
const alignment = inst_ty.abiAlignment(mod).toLlvm();
const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, arg_val, alloca, alignment);
_ = try self.wip.callIntrinsic(
.normal,
.none,
.@"dbg.declare",
&.{},
&.{
(try self.wip.debugValue(alloca)).toValue(),
debug_parameter.toValue(),
(try o.builder.debugExpression(&.{})).toValue(),
},
"",
);
} else {
_ = try self.wip.callIntrinsic(
.normal,
.none,
.@"dbg.value",
&.{},
&.{
(try self.wip.debugValue(arg_val)).toValue(),
debug_parameter.toValue(),
(try o.builder.debugExpression(&.{})).toValue(),
},
"",
);
 
const debug_loc = llvm.getDebugLoc(lbrace_line, lbrace_col, self.di_scope.?, null);
const insert_block = self.wip.cursor.block.toLlvm(&self.wip);
const owner_mod = self.dg.ownerModule();
if (isByRef(inst_ty, mod)) {
_ = dib.insertDeclareAtEnd(arg_val.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
} else if (owner_mod.optimize_mode == .Debug) {
const alignment = inst_ty.abiAlignment(mod).toLlvm();
const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, arg_val, alloca, alignment);
_ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
} else {
_ = dib.insertDbgValueIntrinsicAtEnd(arg_val.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
}
}
 
self.wip.current_debug_location = old_location;
return arg_val;
}
 
@@ -8910,7 +8924,7 @@ pub const FuncGen = struct {
alignment: Builder.Alignment,
) Allocator.Error!Builder.Value {
const target = self.dg.object.module.getTarget();
return buildAllocaInner(&self.wip, self.di_scope != null, llvm_ty, alignment, target);
return buildAllocaInner(&self.wip, llvm_ty, alignment, target);
}
 
// Workaround for https://github.com/ziglang/zig/issues/16392
@@ -9025,18 +9039,14 @@ pub const FuncGen = struct {
// https://github.com/ziglang/zig/issues/11946
return o.builder.intValue(llvm_usize, 0);
}
const result = try self.wip.callIntrinsic(.normal, .none, .returnaddress, &.{}, &.{
try o.builder.intValue(.i32, 0),
}, "");
const result = try self.wip.callIntrinsic(.normal, .none, .returnaddress, &.{}, &.{.@"0"}, "");
return self.wip.cast(.ptrtoint, result, llvm_usize, "");
}
 
fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
_ = inst;
const o = self.dg.object;
const result = try self.wip.callIntrinsic(.normal, .none, .frameaddress, &.{.ptr}, &.{
try o.builder.intValue(.i32, 0),
}, "");
const result = try self.wip.callIntrinsic(.normal, .none, .frameaddress, &.{.ptr}, &.{.@"0"}, "");
return self.wip.cast(.ptrtoint, result, try o.lowerType(Type.usize), "");
}
 
@@ -9364,7 +9374,7 @@ pub const FuncGen = struct {
_ = try self.wip.br(loop_block);
 
self.wip.cursor = .{ .block = end_block };
try it_ptr.finish(&.{ next_ptr, dest_ptr }, &.{ body_block, entry_block }, &self.wip);
it_ptr.finish(&.{ next_ptr, dest_ptr }, &.{ body_block, entry_block }, &self.wip);
return .none;
}
 
@@ -9599,7 +9609,7 @@ pub const FuncGen = struct {
 
self.wip.cursor = .{ .block = end_block };
const phi = try self.wip.phi(.i1, "");
try phi.finish(&.{ .true, .false }, &.{ valid_block, invalid_block }, &self.wip);
phi.finish(&.{ .true, .false }, &.{ valid_block, invalid_block }, &self.wip);
return phi.toValue();
}
 
@@ -10120,7 +10130,6 @@ pub const FuncGen = struct {
const field_align = mod.unionFieldNormalAlignment(union_obj, extra.field_index);
const llvm_usize = try o.lowerType(Type.usize);
const usize_zero = try o.builder.intValue(llvm_usize, 0);
const i32_zero = try o.builder.intValue(.i32, 0);
 
const llvm_union_ty = t: {
const payload_ty = p: {
@@ -10159,7 +10168,7 @@ pub const FuncGen = struct {
.flags = .{ .alignment = field_align },
});
if (layout.tag_size == 0) {
const indices = [3]Builder.Value{ usize_zero, i32_zero, i32_zero };
const indices = [3]Builder.Value{ usize_zero, .@"0", .@"0" };
const len: usize = if (field_size == layout.payload_size) 2 else 3;
const field_ptr =
try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, indices[0..len], "");
@@ -10169,11 +10178,9 @@ pub const FuncGen = struct {
 
{
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
const indices: [3]Builder.Value =
.{ usize_zero, try o.builder.intValue(.i32, payload_index), i32_zero };
const indices: [3]Builder.Value = .{ usize_zero, try o.builder.intValue(.i32, payload_index), .@"0" };
const len: usize = if (field_size == layout.payload_size) 2 else 3;
const field_ptr =
try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, indices[0..len], "");
const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, indices[0..len], "");
try self.store(field_ptr, field_ptr_ty, llvm_payload, .none);
}
{
@@ -10279,7 +10286,7 @@ pub const FuncGen = struct {
 
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const dimension = pl_op.payload;
if (dimension >= 3) return o.builder.intValue(.i32, 1);
if (dimension >= 3) return .@"1";
 
// Fetch the dispatch pointer, which points to this structure:
// https://github.com/RadeonOpenCompute/ROCR-Runtime/blob/adae6c61e10d371f7cbc3d0e94ae2c070cab18a4/src/inc/hsa.h#L2913
@@ -11694,45 +11701,6 @@ const struct_layout_version = 2;
// https://github.com/llvm/llvm-project/issues/56585/ is fixed
const optional_layout_version = 3;
 
/// We use the least significant bit of the pointer address to tell us
/// whether the type is fully resolved. Types that are only fwd declared
/// have the LSB flipped to a 1.
const AnnotatedDITypePtr = enum(usize) {
null,
_,
 
fn initFwd(di_type: *llvm.DIType) AnnotatedDITypePtr {
const addr = @intFromPtr(di_type);
assert(@as(u1, @truncate(addr)) == 0);
return @enumFromInt(addr | 1);
}
 
fn initFull(di_type: *llvm.DIType) AnnotatedDITypePtr {
const addr = @intFromPtr(di_type);
return @enumFromInt(addr);
}
 
fn init(di_type: *llvm.DIType, resolve: Object.DebugResolveStatus) AnnotatedDITypePtr {
const addr = @intFromPtr(di_type);
const bit = @intFromBool(resolve == .fwd);
return @enumFromInt(addr | bit);
}
 
fn toDIType(self: AnnotatedDITypePtr) *llvm.DIType {
switch (self) {
.null => unreachable,
_ => return @ptrFromInt(@intFromEnum(self) & ~@as(usize, 1)),
}
}
 
fn isFwdOnly(self: AnnotatedDITypePtr) bool {
switch (self) {
.null => unreachable,
_ => return @as(u1, @truncate(@intFromEnum(self))) != 0,
}
}
};
 
const lt_errors_fn_name = "__zig_lt_errors_len";
 
/// Without this workaround, LLVM crashes with "unknown codeview register H1"
@@ -11756,7 +11724,6 @@ fn compilerRtIntBits(bits: u16) u16 {
 
fn buildAllocaInner(
wip: *Builder.WipFunction,
di_scope_non_null: bool,
llvm_ty: Builder.Type,
alignment: Builder.Alignment,
target: std.Target,
@@ -11765,19 +11732,15 @@ fn buildAllocaInner(
 
const alloca = blk: {
const prev_cursor = wip.cursor;
const prev_debug_location = if (wip.builder.useLibLlvm())
wip.llvm.builder.getCurrentDebugLocation2()
else
undefined;
const prev_debug_location = wip.current_debug_location;
defer {
wip.cursor = prev_cursor;
if (wip.cursor.block == .entry) wip.cursor.instruction += 1;
if (wip.builder.useLibLlvm() and di_scope_non_null)
wip.llvm.builder.setCurrentDebugLocation2(prev_debug_location);
wip.current_debug_location = prev_debug_location;
}
 
wip.cursor = .{ .block = .entry };
if (wip.builder.useLibLlvm()) wip.llvm.builder.clearCurrentDebugLocation();
wip.current_debug_location = .none;
break :blk try wip.alloca(.normal, llvm_ty, .none, alignment, address_space, "");
};
 
@@ -11823,3 +11786,195 @@ fn constraintAllowsRegister(constraint: []const u8) bool {
}
} else return false;
}
 
pub fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
switch (arch) {
.aarch64, .aarch64_be, .aarch64_32 => {
llvm.LLVMInitializeAArch64Target();
llvm.LLVMInitializeAArch64TargetInfo();
llvm.LLVMInitializeAArch64TargetMC();
llvm.LLVMInitializeAArch64AsmPrinter();
llvm.LLVMInitializeAArch64AsmParser();
},
.amdgcn => {
llvm.LLVMInitializeAMDGPUTarget();
llvm.LLVMInitializeAMDGPUTargetInfo();
llvm.LLVMInitializeAMDGPUTargetMC();
llvm.LLVMInitializeAMDGPUAsmPrinter();
llvm.LLVMInitializeAMDGPUAsmParser();
},
.thumb, .thumbeb, .arm, .armeb => {
llvm.LLVMInitializeARMTarget();
llvm.LLVMInitializeARMTargetInfo();
llvm.LLVMInitializeARMTargetMC();
llvm.LLVMInitializeARMAsmPrinter();
llvm.LLVMInitializeARMAsmParser();
},
.avr => {
llvm.LLVMInitializeAVRTarget();
llvm.LLVMInitializeAVRTargetInfo();
llvm.LLVMInitializeAVRTargetMC();
llvm.LLVMInitializeAVRAsmPrinter();
llvm.LLVMInitializeAVRAsmParser();
},
.bpfel, .bpfeb => {
llvm.LLVMInitializeBPFTarget();
llvm.LLVMInitializeBPFTargetInfo();
llvm.LLVMInitializeBPFTargetMC();
llvm.LLVMInitializeBPFAsmPrinter();
llvm.LLVMInitializeBPFAsmParser();
},
.hexagon => {
llvm.LLVMInitializeHexagonTarget();
llvm.LLVMInitializeHexagonTargetInfo();
llvm.LLVMInitializeHexagonTargetMC();
llvm.LLVMInitializeHexagonAsmPrinter();
llvm.LLVMInitializeHexagonAsmParser();
},
.lanai => {
llvm.LLVMInitializeLanaiTarget();
llvm.LLVMInitializeLanaiTargetInfo();
llvm.LLVMInitializeLanaiTargetMC();
llvm.LLVMInitializeLanaiAsmPrinter();
llvm.LLVMInitializeLanaiAsmParser();
},
.mips, .mipsel, .mips64, .mips64el => {
llvm.LLVMInitializeMipsTarget();
llvm.LLVMInitializeMipsTargetInfo();
llvm.LLVMInitializeMipsTargetMC();
llvm.LLVMInitializeMipsAsmPrinter();
llvm.LLVMInitializeMipsAsmParser();
},
.msp430 => {
llvm.LLVMInitializeMSP430Target();
llvm.LLVMInitializeMSP430TargetInfo();
llvm.LLVMInitializeMSP430TargetMC();
llvm.LLVMInitializeMSP430AsmPrinter();
llvm.LLVMInitializeMSP430AsmParser();
},
.nvptx, .nvptx64 => {
llvm.LLVMInitializeNVPTXTarget();
llvm.LLVMInitializeNVPTXTargetInfo();
llvm.LLVMInitializeNVPTXTargetMC();
llvm.LLVMInitializeNVPTXAsmPrinter();
// There is no LLVMInitializeNVPTXAsmParser function available.
},
.powerpc, .powerpcle, .powerpc64, .powerpc64le => {
llvm.LLVMInitializePowerPCTarget();
llvm.LLVMInitializePowerPCTargetInfo();
llvm.LLVMInitializePowerPCTargetMC();
llvm.LLVMInitializePowerPCAsmPrinter();
llvm.LLVMInitializePowerPCAsmParser();
},
.riscv32, .riscv64 => {
llvm.LLVMInitializeRISCVTarget();
llvm.LLVMInitializeRISCVTargetInfo();
llvm.LLVMInitializeRISCVTargetMC();
llvm.LLVMInitializeRISCVAsmPrinter();
llvm.LLVMInitializeRISCVAsmParser();
},
.sparc, .sparc64, .sparcel => {
llvm.LLVMInitializeSparcTarget();
llvm.LLVMInitializeSparcTargetInfo();
llvm.LLVMInitializeSparcTargetMC();
llvm.LLVMInitializeSparcAsmPrinter();
llvm.LLVMInitializeSparcAsmParser();
},
.s390x => {
llvm.LLVMInitializeSystemZTarget();
llvm.LLVMInitializeSystemZTargetInfo();
llvm.LLVMInitializeSystemZTargetMC();
llvm.LLVMInitializeSystemZAsmPrinter();
llvm.LLVMInitializeSystemZAsmParser();
},
.wasm32, .wasm64 => {
llvm.LLVMInitializeWebAssemblyTarget();
llvm.LLVMInitializeWebAssemblyTargetInfo();
llvm.LLVMInitializeWebAssemblyTargetMC();
llvm.LLVMInitializeWebAssemblyAsmPrinter();
llvm.LLVMInitializeWebAssemblyAsmParser();
},
.x86, .x86_64 => {
llvm.LLVMInitializeX86Target();
llvm.LLVMInitializeX86TargetInfo();
llvm.LLVMInitializeX86TargetMC();
llvm.LLVMInitializeX86AsmPrinter();
llvm.LLVMInitializeX86AsmParser();
},
.xtensa => {
if (build_options.llvm_has_xtensa) {
llvm.LLVMInitializeXtensaTarget();
llvm.LLVMInitializeXtensaTargetInfo();
llvm.LLVMInitializeXtensaTargetMC();
// There is no LLVMInitializeXtensaAsmPrinter function.
llvm.LLVMInitializeXtensaAsmParser();
}
},
.xcore => {
llvm.LLVMInitializeXCoreTarget();
llvm.LLVMInitializeXCoreTargetInfo();
llvm.LLVMInitializeXCoreTargetMC();
llvm.LLVMInitializeXCoreAsmPrinter();
// There is no LLVMInitializeXCoreAsmParser function.
},
.m68k => {
if (build_options.llvm_has_m68k) {
llvm.LLVMInitializeM68kTarget();
llvm.LLVMInitializeM68kTargetInfo();
llvm.LLVMInitializeM68kTargetMC();
llvm.LLVMInitializeM68kAsmPrinter();
llvm.LLVMInitializeM68kAsmParser();
}
},
.csky => {
if (build_options.llvm_has_csky) {
llvm.LLVMInitializeCSKYTarget();
llvm.LLVMInitializeCSKYTargetInfo();
llvm.LLVMInitializeCSKYTargetMC();
// There is no LLVMInitializeCSKYAsmPrinter function.
llvm.LLVMInitializeCSKYAsmParser();
}
},
.ve => {
llvm.LLVMInitializeVETarget();
llvm.LLVMInitializeVETargetInfo();
llvm.LLVMInitializeVETargetMC();
llvm.LLVMInitializeVEAsmPrinter();
llvm.LLVMInitializeVEAsmParser();
},
.arc => {
if (build_options.llvm_has_arc) {
llvm.LLVMInitializeARCTarget();
llvm.LLVMInitializeARCTargetInfo();
llvm.LLVMInitializeARCTargetMC();
llvm.LLVMInitializeARCAsmPrinter();
// There is no LLVMInitializeARCAsmParser function.
}
},
 
// LLVM backends that have no initialization functions.
.tce,
.tcele,
.r600,
.le32,
.le64,
.amdil,
.amdil64,
.hsail,
.hsail64,
.shave,
.spir,
.spir64,
.kalimba,
.renderscript32,
.renderscript64,
.dxil,
.loongarch32,
.loongarch64,
=> {},
 
.spu_2 => unreachable, // LLVM does not support this backend
.spirv32 => unreachable, // LLVM does not support this backend
.spirv64 => unreachable, // LLVM does not support this backend
}
}
 
src/codegen/llvm/Builder.zig added: 8538, removed: 5358, total 3180
@@ -1,21 +1,6 @@
gpa: Allocator,
use_lib_llvm: bool,
strip: bool,
 
llvm: if (build_options.have_llvm) struct {
context: *llvm.Context,
module: ?*llvm.Module,
target: ?*llvm.Target,
di_builder: ?*llvm.DIBuilder,
di_compile_unit: ?*llvm.DICompileUnit,
attribute_kind_ids: ?*[Attribute.Kind.len]c_uint,
attributes: std.ArrayListUnmanaged(*llvm.Attribute),
types: std.ArrayListUnmanaged(*llvm.Type),
globals: std.ArrayListUnmanaged(*llvm.Value),
constants: std.ArrayListUnmanaged(*llvm.Value),
replacements: std.AutoHashMapUnmanaged(*llvm.Value, Global.Index),
} else void,
 
source_filename: String,
data_layout: String,
target_triple: String,
@@ -37,6 +22,8 @@ attributes_map: std.AutoArrayHashMapUnmanaged(void, void),
attributes_indices: std.ArrayListUnmanaged(u32),
attributes_extra: std.ArrayListUnmanaged(u32),
 
function_attributes_set: std.AutoArrayHashMapUnmanaged(FunctionAttributes, void),
 
globals: std.AutoArrayHashMapUnmanaged(String, Global),
next_unnamed_global: String,
next_replaced_global: String,
@@ -50,17 +37,29 @@ constant_items: std.MultiArrayList(Constant.Item),
constant_extra: std.ArrayListUnmanaged(u32),
constant_limbs: std.ArrayListUnmanaged(std.math.big.Limb),
 
metadata_map: std.AutoArrayHashMapUnmanaged(void, void),
metadata_items: std.MultiArrayList(Metadata.Item),
metadata_extra: std.ArrayListUnmanaged(u32),
metadata_limbs: std.ArrayListUnmanaged(std.math.big.Limb),
metadata_forward_references: std.ArrayListUnmanaged(Metadata),
metadata_named: std.AutoArrayHashMapUnmanaged(MetadataString, struct {
len: u32,
index: Metadata.Item.ExtraIndex,
}),
 
metadata_string_map: std.AutoArrayHashMapUnmanaged(void, void),
metadata_string_indices: std.ArrayListUnmanaged(u32),
metadata_string_bytes: std.ArrayListUnmanaged(u8),
 
pub const expected_args_len = 16;
pub const expected_attrs_len = 16;
pub const expected_fields_len = 32;
pub const expected_gep_indices_len = 8;
pub const expected_cases_len = 8;
pub const expected_incoming_len = 8;
pub const expected_intrinsic_name_len = 64;
 
pub const Options = struct {
allocator: Allocator,
use_lib_llvm: bool = false,
strip: bool = true,
name: []const u8 = &.{},
target: std.Target = builtin.target,
@@ -77,11 +76,11 @@ pub const String = enum(u32) {
return self.toIndex() == null;
}
 
pub fn slice(self: String, b: *const Builder) ?[:0]const u8 {
pub fn slice(self: String, builder: *const Builder) ?[]const u8 {
const index = self.toIndex() orelse return null;
const start = b.string_indices.items[index];
const end = b.string_indices.items[index + 1];
return b.string_bytes.items[start .. end - 1 :0];
const start = builder.string_indices.items[index];
const end = builder.string_indices.items[index + 1];
return builder.string_bytes.items[start..end];
}
 
const FormatData = struct {
@@ -94,17 +93,21 @@ pub const String = enum(u32) {
_: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
if (comptime std.mem.indexOfNone(u8, fmt_str, "@\"")) |_|
if (comptime std.mem.indexOfNone(u8, fmt_str, "\"r")) |_|
@compileError("invalid format string: '" ++ fmt_str ++ "'");
assert(data.string != .none);
const sentinel_slice = data.string.slice(data.builder) orelse
const string_slice = data.string.slice(data.builder) orelse
return writer.print("{d}", .{@intFromEnum(data.string)});
try printEscapedString(sentinel_slice[0 .. sentinel_slice.len + comptime @intFromBool(
std.mem.indexOfScalar(u8, fmt_str, '@') != null,
)], if (comptime std.mem.indexOfScalar(u8, fmt_str, '"')) |_|
.always_quote
else
.quote_unless_valid_identifier, writer);
if (comptime std.mem.indexOfScalar(u8, fmt_str, 'r')) |_|
return writer.writeAll(string_slice);
try printEscapedString(
string_slice,
if (comptime std.mem.indexOfScalar(u8, fmt_str, '"')) |_|
.always_quote
else
.quote_unless_valid_identifier,
writer,
);
}
pub fn fmt(self: String, builder: *const Builder) std.fmt.Formatter(format) {
return .{ .data = .{ .string = self, .builder = builder } };
@@ -130,6 +133,72 @@ pub const String = enum(u32) {
};
};
 
pub const BinaryOpcode = enum(u4) {
add = 0,
sub = 1,
mul = 2,
udiv = 3,
sdiv = 4,
urem = 5,
srem = 6,
shl = 7,
lshr = 8,
ashr = 9,
@"and" = 10,
@"or" = 11,
xor = 12,
};
 
pub const CastOpcode = enum(u4) {
trunc = 0,
zext = 1,
sext = 2,
fptoui = 3,
fptosi = 4,
uitofp = 5,
sitofp = 6,
fptrunc = 7,
fpext = 8,
ptrtoint = 9,
inttoptr = 10,
bitcast = 11,
addrspacecast = 12,
};
 
pub const CmpPredicate = enum(u6) {
fcmp_false = 0,
fcmp_oeq = 1,
fcmp_ogt = 2,
fcmp_oge = 3,
fcmp_olt = 4,
fcmp_ole = 5,
fcmp_one = 6,
fcmp_ord = 7,
fcmp_uno = 8,
fcmp_ueq = 9,
fcmp_ugt = 10,
fcmp_uge = 11,
fcmp_ult = 12,
fcmp_ule = 13,
fcmp_une = 14,
fcmp_true = 15,
icmp_eq = 32,
icmp_ne = 33,
icmp_ugt = 34,
icmp_uge = 35,
icmp_ult = 36,
icmp_ule = 37,
icmp_sgt = 38,
icmp_sge = 39,
icmp_slt = 40,
icmp_sle = 41,
};
 
pub const StrtabString = struct {
offset: usize,
size: usize,
};
 
pub const Type = enum(u32) {
void,
half,
@@ -178,20 +247,20 @@ pub const Type = enum(u32) {
named_structure,
};
 
pub const Simple = enum {
void,
half,
bfloat,
float,
double,
fp128,
x86_fp80,
ppc_fp128,
x86_amx,
x86_mmx,
label,
token,
metadata,
pub const Simple = enum(u5) {
void = 2,
half = 10,
bfloat = 23,
float = 3,
double = 4,
fp128 = 14,
x86_fp80 = 13,
ppc_fp128 = 15,
x86_amx = 24,
x86_mmx = 17,
label = 5,
token = 22,
metadata = 16,
};
 
pub const Function = struct {
@@ -579,7 +648,6 @@ pub const Type = enum(u32) {
var visited: IsSizedVisited = .{};
defer visited.deinit(builder.gpa);
const result = try self.isSizedVisited(&visited, builder);
if (builder.useLibLlvm()) assert(result == self.toLlvm(builder).isSized().toBool());
return result;
}
 
@@ -766,11 +834,6 @@ pub const Type = enum(u32) {
return .{ .data = .{ .type = self, .builder = builder } };
}
 
pub fn toLlvm(self: Type, builder: *const Builder) *llvm.Type {
assert(builder.useLibLlvm());
return builder.llvm.types.items[@intFromEnum(self)];
}
 
const IsSizedVisited = std.AutoHashMapUnmanaged(Type, void);
fn isSizedVisited(
self: Type,
@@ -1051,14 +1114,21 @@ pub const Attribute = union(Kind) {
.no_sanitize_hwaddress,
.sanitize_address_dyninit,
=> |kind| {
const field = @typeInfo(Attribute).Union.fields[@intFromEnum(kind)];
const field = comptime blk: {
@setEvalBranchQuota(10_000);
for (@typeInfo(Attribute).Union.fields) |field| {
if (std.mem.eql(u8, field.name, @tagName(kind))) break :blk field;
}
unreachable;
};
comptime assert(std.mem.eql(u8, @tagName(kind), field.name));
return @unionInit(Attribute, field.name, switch (field.type) {
void => {},
u32 => storage.value,
Alignment, String, Type, UwTable => @enumFromInt(storage.value),
AllocKind, AllocSize, FpClass, Memory, VScaleRange => @bitCast(storage.value),
else => @compileError("bad payload type: " ++ @typeName(field.type)),
else => @compileError("bad payload type: " ++ field.name ++ ": " ++
@typeName(field.type)),
});
},
.string, .none => unreachable,
@@ -1246,109 +1316,104 @@ pub const Attribute = union(Kind) {
fn toStorage(self: Index, builder: *const Builder) Storage {
return builder.attributes.keys()[@intFromEnum(self)];
}
 
fn toLlvm(self: Index, builder: *const Builder) *llvm.Attribute {
assert(builder.useLibLlvm());
return builder.llvm.attributes.items[@intFromEnum(self)];
}
};
 
pub const Kind = enum(u32) {
// Parameter Attributes
zeroext,
signext,
inreg,
byval,
byref,
preallocated,
inalloca,
sret,
elementtype,
@"align",
@"noalias",
nocapture,
nofree,
nest,
returned,
nonnull,
dereferenceable,
dereferenceable_or_null,
swiftself,
swiftasync,
swifterror,
immarg,
noundef,
nofpclass,
alignstack,
allocalign,
allocptr,
readnone,
readonly,
writeonly,
zeroext = 34,
signext = 24,
inreg = 5,
byval = 3,
byref = 69,
preallocated = 65,
inalloca = 38,
sret = 29, // TODO: ?
elementtype = 77,
@"align" = 1,
@"noalias" = 9,
nocapture = 11,
nofree = 62,
nest = 8,
returned = 22,
nonnull = 39,
dereferenceable = 41,
dereferenceable_or_null = 42,
swiftself = 46,
swiftasync = 75,
swifterror = 47,
immarg = 60,
noundef = 68,
nofpclass = 87,
alignstack = 25,
allocalign = 80,
allocptr = 81,
readnone = 20,
readonly = 21,
writeonly = 52,
 
// Function Attributes
//alignstack,
allockind,
allocsize,
alwaysinline,
builtin,
cold,
convergent,
disable_sanitizer_information,
fn_ret_thunk_extern,
hot,
inlinehint,
jumptable,
memory,
minsize,
naked,
nobuiltin,
nocallback,
noduplicate,
allockind = 82,
allocsize = 51,
alwaysinline = 2,
builtin = 35,
cold = 36,
convergent = 43,
disable_sanitizer_information = 78,
fn_ret_thunk_extern = 84,
hot = 72,
inlinehint = 4,
jumptable = 40,
memory = 86,
minsize = 6,
naked = 7,
nobuiltin = 10,
nocallback = 71,
noduplicate = 12,
//nofree,
noimplicitfloat,
@"noinline",
nomerge,
nonlazybind,
noprofile,
skipprofile,
noredzone,
noreturn,
norecurse,
willreturn,
nosync,
nounwind,
nosanitize_bounds,
nosanitize_coverage,
null_pointer_is_valid,
optforfuzzing,
optnone,
optsize,
noimplicitfloat = 13,
@"noinline" = 14,
nomerge = 66,
nonlazybind = 15,
noprofile = 73,
skipprofile = 85,
noredzone = 16,
noreturn = 17,
norecurse = 48,
willreturn = 61,
nosync = 63,
nounwind = 18,
nosanitize_bounds = 79,
nosanitize_coverage = 76,
null_pointer_is_valid = 67,
optforfuzzing = 57,
optnone = 37,
optsize = 19,
//preallocated,
returns_twice,
safestack,
sanitize_address,
sanitize_memory,
sanitize_thread,
sanitize_hwaddress,
sanitize_memtag,
speculative_load_hardening,
speculatable,
ssp,
sspstrong,
sspreq,
strictfp,
uwtable,
nocf_check,
shadowcallstack,
mustprogress,
vscale_range,
returns_twice = 23,
safestack = 44,
sanitize_address = 30,
sanitize_memory = 32,
sanitize_thread = 31,
sanitize_hwaddress = 55,
sanitize_memtag = 64,
speculative_load_hardening = 59,
speculatable = 53,
ssp = 26,
sspstrong = 28,
sspreq = 27,
strictfp = 54,
uwtable = 33,
nocf_check = 56,
shadowcallstack = 58,
mustprogress = 70,
vscale_range = 74,
 
// Global Attributes
no_sanitize_address,
no_sanitize_hwaddress,
no_sanitize_address = 100,
no_sanitize_hwaddress = 101,
//sanitize_memtag,
sanitize_address_dyninit,
sanitize_address_dyninit = 102,
 
string = std.math.maxInt(u31),
none = std.math.maxInt(u32),
@@ -1368,11 +1433,6 @@ pub const Attribute = union(Kind) {
const str: String = @enumFromInt(@intFromEnum(self));
return if (str.isAnon()) null else str;
}
 
fn toLlvm(self: Kind, builder: *const Builder) *c_uint {
assert(builder.useLibLlvm());
return &builder.llvm.attribute_kind_ids.?[@intFromEnum(self)];
}
};
 
pub const FpClass = packed struct(u32) {
@@ -1494,12 +1554,12 @@ pub const Attribute = union(Kind) {
 
fn toStorage(self: Attribute) Storage {
return switch (self) {
inline else => |value| .{ .kind = @as(Kind, self), .value = switch (@TypeOf(value)) {
inline else => |value, tag| .{ .kind = @as(Kind, self), .value = switch (@TypeOf(value)) {
void => 0,
u32 => value,
Alignment, String, Type, UwTable => @intFromEnum(value),
AllocKind, AllocSize, FpClass, Memory, VScaleRange => @bitCast(value),
else => @compileError("bad payload type: " ++ @typeName(@TypeOf(value))),
else => @compileError("bad payload type: " ++ @tagName(tag) ++ @typeName(@TypeOf(value))),
} },
.string => |string_attr| .{
.kind = Kind.fromString(string_attr.kind),
@@ -1709,18 +1769,18 @@ pub const FunctionAttributes = enum(u32) {
}
};
 
pub const Linkage = enum {
private,
internal,
weak,
weak_odr,
linkonce,
linkonce_odr,
available_externally,
appending,
common,
extern_weak,
external,
pub const Linkage = enum(u4) {
private = 9,
internal = 3,
weak = 1,
weak_odr = 10,
linkonce = 4,
linkonce_odr = 11,
available_externally = 12,
appending = 2,
common = 8,
extern_weak = 7,
external = 0,
 
pub fn format(
self: Linkage,
@@ -1731,20 +1791,16 @@ pub const Linkage = enum {
if (self != .external) try writer.print(" {s}", .{@tagName(self)});
}
 
fn toLlvm(self: Linkage) llvm.Linkage {
return switch (self) {
.private => .Private,
.internal => .Internal,
.weak => .WeakAny,
.weak_odr => .WeakODR,
.linkonce => .LinkOnceAny,
.linkonce_odr => .LinkOnceODR,
.available_externally => .AvailableExternally,
.appending => .Appending,
.common => .Common,
.extern_weak => .ExternalWeak,
.external => .External,
};
fn formatOptional(
data: ?Linkage,
comptime _: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
if (data) |linkage| try writer.print(" {s}", .{@tagName(linkage)});
}
pub fn fmtOptional(self: ?Linkage) std.fmt.Formatter(formatOptional) {
return .{ .data = self };
}
};
 
@@ -1763,10 +1819,10 @@ pub const Preemption = enum {
}
};
 
pub const Visibility = enum {
default,
hidden,
protected,
pub const Visibility = enum(u2) {
default = 0,
hidden = 1,
protected = 2,
 
pub fn format(
self: Visibility,
@@ -1776,20 +1832,12 @@ pub const Visibility = enum {
) @TypeOf(writer).Error!void {
if (self != .default) try writer.print(" {s}", .{@tagName(self)});
}
 
fn toLlvm(self: Visibility) llvm.Visibility {
return switch (self) {
.default => .Default,
.hidden => .Hidden,
.protected => .Protected,
};
}
};
 
pub const DllStorageClass = enum {
default,
dllimport,
dllexport,
pub const DllStorageClass = enum(u2) {
default = 0,
dllimport = 1,
dllexport = 2,
 
pub fn format(
self: DllStorageClass,
@@ -1799,22 +1847,14 @@ pub const DllStorageClass = enum {
) @TypeOf(writer).Error!void {
if (self != .default) try writer.print(" {s}", .{@tagName(self)});
}
 
fn toLlvm(self: DllStorageClass) llvm.DLLStorageClass {
return switch (self) {
.default => .Default,
.dllimport => .DLLImport,
.dllexport => .DLLExport,
};
}
};
 
pub const ThreadLocal = enum {
default,
generaldynamic,
localdynamic,
initialexec,
localexec,
pub const ThreadLocal = enum(u3) {
default = 0,
generaldynamic = 1,
localdynamic = 2,
initialexec = 3,
localexec = 4,
 
pub fn format(
self: ThreadLocal,
@@ -1826,24 +1866,14 @@ pub const ThreadLocal = enum {
try writer.print("{s}thread_local", .{prefix});
if (self != .generaldynamic) try writer.print("({s})", .{@tagName(self)});
}
 
fn toLlvm(self: ThreadLocal) llvm.ThreadLocalMode {
return switch (self) {
.default => .NotThreadLocal,
.generaldynamic => .GeneralDynamicTLSModel,
.localdynamic => .LocalDynamicTLSModel,
.initialexec => .InitialExecTLSModel,
.localexec => .LocalExecTLSModel,
};
}
};
 
pub const Mutability = enum { global, constant };
 
pub const UnnamedAddr = enum {
default,
unnamed_addr,
local_unnamed_addr,
pub const UnnamedAddr = enum(u2) {
default = 0,
unnamed_addr = 1,
local_unnamed_addr = 2,
 
pub fn format(
self: UnnamedAddr,
@@ -1971,6 +2001,10 @@ pub const Alignment = enum(u6) {
return if (self == .default) null else @as(u64, 1) << @intFromEnum(self);
}
 
pub fn toLlvm(self: Alignment) u6 {
return if (self == .default) 0 else (@intFromEnum(self) + 1);
}
 
pub fn format(
self: Alignment,
comptime prefix: []const u8,
@@ -2100,11 +2134,6 @@ pub const CallConv = enum(u10) {
_ => try writer.print(" cc{d}", .{@intFromEnum(self)}),
}
}
 
fn toLlvm(self: CallConv) llvm.CallConv {
// These enum values appear in LLVM IR, and so are guaranteed to be stable.
return @enumFromInt(@intFromEnum(self));
}
};
 
pub const Global = struct {
@@ -2117,6 +2146,7 @@ pub const Global = struct {
externally_initialized: ExternallyInitialized = .default,
type: Type,
partition: String = .none,
dbg: Metadata = .none,
kind: union(enum) {
alias: Alias.Index,
variable: Variable.Index,
@@ -2153,6 +2183,18 @@ pub const Global = struct {
return builder.globals.keys()[@intFromEnum(self.unwrap(builder))];
}
 
pub fn strtab(self: Index, builder: *const Builder) StrtabString {
const name_index = self.name(builder).toIndex() orelse return .{
.offset = 0,
.size = 0,
};
 
return .{
.offset = builder.string_indices.items[name_index],
.size = builder.string_indices.items[name_index + 1] - builder.string_indices.items[name_index],
};
}
 
pub fn typeOf(self: Index, builder: *const Builder) Type {
return self.ptrConst(builder).type;
}
@@ -2162,32 +2204,25 @@ pub const Global = struct {
}
 
pub fn setLinkage(self: Index, linkage: Linkage, builder: *Builder) void {
if (builder.useLibLlvm()) self.toLlvm(builder).setLinkage(linkage.toLlvm());
self.ptr(builder).linkage = linkage;
self.updateDsoLocal(builder);
}
 
pub fn setVisibility(self: Index, visibility: Visibility, builder: *Builder) void {
if (builder.useLibLlvm()) self.toLlvm(builder).setVisibility(visibility.toLlvm());
self.ptr(builder).visibility = visibility;
self.updateDsoLocal(builder);
}
 
pub fn setDllStorageClass(self: Index, class: DllStorageClass, builder: *Builder) void {
if (builder.useLibLlvm()) self.toLlvm(builder).setDLLStorageClass(class.toLlvm());
self.ptr(builder).dll_storage_class = class;
}
 
pub fn setUnnamedAddr(self: Index, unnamed_addr: UnnamedAddr, builder: *Builder) void {
if (builder.useLibLlvm()) self.toLlvm(builder).setUnnamedAddr(
llvm.Bool.fromBool(unnamed_addr != .default),
);
self.ptr(builder).unnamed_addr = unnamed_addr;
}
 
pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value {
assert(builder.useLibLlvm());
return builder.llvm.globals.items[@intFromEnum(self.unwrap(builder))];
pub fn setDebugMetadata(self: Index, dbg: Metadata, builder: *Builder) void {
self.ptr(builder).dbg = dbg;
}
 
const FormatData = struct {
@@ -2220,13 +2255,10 @@ pub const Global = struct {
 
pub fn replace(self: Index, other: Index, builder: *Builder) Allocator.Error!void {
try builder.ensureUnusedGlobalCapacity(.empty);
if (builder.useLibLlvm())
try builder.llvm.replacements.ensureUnusedCapacity(builder.gpa, 1);
self.replaceAssumeCapacity(other, builder);
}
 
pub fn delete(self: Index, builder: *Builder) void {
if (builder.useLibLlvm()) self.toLlvm(builder).eraseGlobalValue();
self.ptr(builder).kind = .{ .replaced = .none };
}
 
@@ -2254,12 +2286,8 @@ pub const Global = struct {
const old_name = self.name(builder);
if (new_name == old_name) return;
const index = @intFromEnum(self.unwrap(builder));
if (builder.useLibLlvm())
builder.llvm.globals.appendAssumeCapacity(builder.llvm.globals.items[index]);
_ = builder.addGlobalAssumeCapacity(new_name, builder.globals.values()[index]);
if (builder.useLibLlvm()) _ = builder.llvm.globals.pop();
builder.globals.swapRemoveAt(index);
self.updateName(builder);
if (!old_name.isAnon()) return;
builder.next_unnamed_global = @enumFromInt(@intFromEnum(builder.next_unnamed_global) - 1);
if (builder.next_unnamed_global == old_name) return;
@@ -2272,23 +2300,10 @@ pub const Global = struct {
self.renameAssumeCapacity(other_name, builder);
}
 
fn updateName(self: Index, builder: *const Builder) void {
if (!builder.useLibLlvm()) return;
const index = @intFromEnum(self.unwrap(builder));
const name_slice = self.name(builder).slice(builder) orelse "";
builder.llvm.globals.items[index].setValueName(name_slice.ptr, name_slice.len);
}
 
fn replaceAssumeCapacity(self: Index, other: Index, builder: *Builder) void {
if (self.eql(other, builder)) return;
builder.next_replaced_global = @enumFromInt(@intFromEnum(builder.next_replaced_global) - 1);
self.renameAssumeCapacity(builder.next_replaced_global, builder);
if (builder.useLibLlvm()) {
const self_llvm = self.toLlvm(builder);
self_llvm.replaceAllUsesWith(other.toLlvm(builder));
self_llvm.removeGlobalValue();
builder.llvm.replacements.putAssumeCapacityNoClobber(self_llvm, other);
}
self.ptr(builder).kind = .{ .replaced = other.unwrap(builder) };
}
 
@@ -2345,13 +2360,8 @@ pub const Alias = struct {
}
 
pub fn setAliasee(self: Index, aliasee: Constant, builder: *Builder) void {
if (builder.useLibLlvm()) self.toLlvm(builder).setAliasee(aliasee.toLlvm(builder));
self.ptr(builder).aliasee = aliasee;
}
 
fn toLlvm(self: Index, builder: *const Builder) *llvm.Value {
return self.ptrConst(builder).global.toLlvm(builder);
}
};
};
 
@@ -2404,14 +2414,10 @@ pub const Variable = struct {
}
 
pub fn setThreadLocal(self: Index, thread_local: ThreadLocal, builder: *Builder) void {
if (builder.useLibLlvm()) self.toLlvm(builder).setThreadLocalMode(thread_local.toLlvm());
self.ptr(builder).thread_local = thread_local;
}
 
pub fn setMutability(self: Index, mutability: Mutability, builder: *Builder) void {
if (builder.useLibLlvm()) self.toLlvm(builder).setGlobalConstant(
llvm.Bool.fromBool(mutability == .constant),
);
self.ptr(builder).mutability = mutability;
}
 
@@ -2424,67 +2430,25 @@ pub const Variable = struct {
const variable = self.ptrConst(builder);
const global = variable.global.ptr(builder);
const initializer_type = initializer.typeOf(builder);
if (builder.useLibLlvm() and global.type != initializer_type) {
try builder.llvm.replacements.ensureUnusedCapacity(builder.gpa, 1);
// LLVM does not allow us to change the type of globals. So we must
// create a new global with the correct type, copy all its attributes,
// and then update all references to point to the new global,
// delete the original, and rename the new one to the old one's name.
// This is necessary because LLVM does not support const bitcasting
// a struct with padding bytes, which is needed to lower a const union value
// to LLVM, when a field other than the most-aligned is active. Instead,
// we must lower to an unnamed struct, and pointer cast at usage sites
// of the global. Such an unnamed struct is the cause of the global type
// mismatch, because we don't have the LLVM type until the *value* is created,
// whereas the global needs to be created based on the type alone, because
// lowering the value may reference the global as a pointer.
// Related: https://github.com/ziglang/zig/issues/13265
const old_global = &builder.llvm.globals.items[@intFromEnum(variable.global)];
const new_global = builder.llvm.module.?.addGlobalInAddressSpace(
initializer_type.toLlvm(builder),
"",
@intFromEnum(global.addr_space),
);
new_global.setLinkage(global.linkage.toLlvm());
new_global.setUnnamedAddr(llvm.Bool.fromBool(global.unnamed_addr != .default));
new_global.setAlignment(@intCast(variable.alignment.toByteUnits() orelse 0));
if (variable.section != .none)
new_global.setSection(variable.section.slice(builder).?);
old_global.*.replaceAllUsesWith(new_global);
builder.llvm.replacements.putAssumeCapacityNoClobber(old_global.*, variable.global);
new_global.takeName(old_global.*);
old_global.*.removeGlobalValue();
old_global.* = new_global;
self.ptr(builder).mutability = .global;
}
global.type = initializer_type;
}
if (builder.useLibLlvm()) self.toLlvm(builder).setInitializer(switch (initializer) {
.no_init => null,
else => initializer.toLlvm(builder),
});
self.ptr(builder).init = initializer;
}
 
pub fn setSection(self: Index, section: String, builder: *Builder) void {
if (builder.useLibLlvm()) self.toLlvm(builder).setSection(section.slice(builder).?);
self.ptr(builder).section = section;
}
 
pub fn setAlignment(self: Index, alignment: Alignment, builder: *Builder) void {
if (builder.useLibLlvm())
self.toLlvm(builder).setAlignment(@intCast(alignment.toByteUnits() orelse 0));
self.ptr(builder).alignment = alignment;
}
 
pub fn getAlignment(self: Index, builder: *Builder) Alignment {
if (builder.useLibLlvm())
return Alignment.fromByteUnits(self.toLlvm(builder).getAlignment());
return self.ptr(builder).alignment;
}
 
pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value {
return self.ptrConst(builder).global.toLlvm(builder);
pub fn setGlobalVariableExpression(self: Index, expression: Metadata, builder: *Builder) void {
self.ptrConst(builder).global.setDebugMetadata(expression, builder);
}
};
};
@@ -2633,6 +2597,10 @@ pub const Intrinsic = enum {
@"threadlocal.address",
vscale,
 
// Debug
@"dbg.declare",
@"dbg.value",
 
// AMDGPU
@"amdgcn.workitem.id.x",
@"amdgcn.workitem.id.y",
@@ -3727,6 +3695,25 @@ pub const Intrinsic = enum {
.attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } },
},
 
.@"dbg.declare" = .{
.ret_len = 0,
.params = &.{
.{ .kind = .{ .type = .metadata } },
.{ .kind = .{ .type = .metadata } },
.{ .kind = .{ .type = .metadata } },
},
.attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } },
},
.@"dbg.value" = .{
.ret_len = 0,
.params = &.{
.{ .kind = .{ .type = .metadata } },
.{ .kind = .{ .type = .metadata } },
.{ .kind = .{ .type = .metadata } },
},
.attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } },
},
 
.@"amdgcn.workitem.id.x" = .{
.ret_len = 1,
.params = &.{
@@ -3809,7 +3796,9 @@ pub const Function = struct {
blocks: []const Block = &.{},
instructions: std.MultiArrayList(Instruction) = .{},
names: [*]const String = &[0]String{},
metadata: ?[*]const Metadata = null,
value_indices: [*]const u32 = &[0]u32{},
debug_locations: std.AutoHashMapUnmanaged(Instruction.Index, Metadata) = .{},
debug_values: []const Instruction.Index = &.{},
extra: []const u32 = &.{},
 
pub const Index = enum(u32) {
@@ -3853,7 +3842,6 @@ pub const Function = struct {
}
 
pub fn setCallConv(self: Index, call_conv: CallConv, builder: *Builder) void {
if (builder.useLibLlvm()) self.toLlvm(builder).setFunctionCallConv(call_conv.toLlvm());
self.ptr(builder).call_conv = call_conv;
}
 
@@ -3862,94 +3850,19 @@ pub const Function = struct {
new_function_attributes: FunctionAttributes,
builder: *Builder,
) void {
if (builder.useLibLlvm()) {
const llvm_function = self.toLlvm(builder);
const old_function_attributes = self.ptrConst(builder).attributes;
for (0..@max(
old_function_attributes.slice(builder).len,
new_function_attributes.slice(builder).len,
)) |function_attribute_index| {
const llvm_attribute_index =
@as(llvm.AttributeIndex, @intCast(function_attribute_index)) -% 1;
const old_attributes_slice =
old_function_attributes.get(function_attribute_index, builder).slice(builder);
const new_attributes_slice =
new_function_attributes.get(function_attribute_index, builder).slice(builder);
var old_attribute_index: usize = 0;
var new_attribute_index: usize = 0;
while (true) {
const old_attribute_kind = if (old_attribute_index < old_attributes_slice.len)
old_attributes_slice[old_attribute_index].getKind(builder)
else
.none;
const new_attribute_kind = if (new_attribute_index < new_attributes_slice.len)
new_attributes_slice[new_attribute_index].getKind(builder)
else
.none;
switch (std.math.order(
@intFromEnum(old_attribute_kind),
@intFromEnum(new_attribute_kind),
)) {
.lt => {
// Removed
if (old_attribute_kind.toString()) |attribute_name| {
const attribute_name_slice = attribute_name.slice(builder).?;
llvm_function.removeStringAttributeAtIndex(
llvm_attribute_index,
attribute_name_slice.ptr,
@intCast(attribute_name_slice.len),
);
} else {
const llvm_kind_id = old_attribute_kind.toLlvm(builder).*;
assert(llvm_kind_id != 0);
llvm_function.removeEnumAttributeAtIndex(
llvm_attribute_index,
llvm_kind_id,
);
}
old_attribute_index += 1;
continue;
},
.eq => {
// Iteration finished
if (old_attribute_kind == .none) break;
// No change
if (old_attributes_slice[old_attribute_index] ==
new_attributes_slice[new_attribute_index])
{
old_attribute_index += 1;
new_attribute_index += 1;
continue;
}
old_attribute_index += 1;
},
.gt => {},
}
// New or changed
llvm_function.addAttributeAtIndex(
llvm_attribute_index,
new_attributes_slice[new_attribute_index].toLlvm(builder),
);
new_attribute_index += 1;
}
}
}
self.ptr(builder).attributes = new_function_attributes;
}
 
pub fn setSection(self: Index, section: String, builder: *Builder) void {
if (builder.useLibLlvm()) self.toLlvm(builder).setSection(section.slice(builder).?);
self.ptr(builder).section = section;
}
 
pub fn setAlignment(self: Index, alignment: Alignment, builder: *Builder) void {
if (builder.useLibLlvm())
self.toLlvm(builder).setAlignment(@intCast(alignment.toByteUnits() orelse 0));
self.ptr(builder).alignment = alignment;
}
 
pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value {
return self.ptrConst(builder).global.toLlvm(builder);
pub fn setSubprogram(self: Index, subprogram: Metadata, builder: *Builder) void {
self.ptrConst(builder).global.setDebugMetadata(subprogram, builder);
}
};
 
@@ -4098,6 +4011,143 @@ pub const Function = struct {
va_arg,
xor,
zext,
 
pub fn toBinaryOpcode(self: Tag) BinaryOpcode {
return switch (self) {
.add,
.@"add nsw",
.@"add nuw",
.@"add nuw nsw",
.fadd,
.@"fadd fast",
=> .add,
.sub,
.@"sub nsw",
.@"sub nuw",
.@"sub nuw nsw",
.fsub,
.@"fsub fast",
=> .sub,
.sdiv,
.@"sdiv exact",
.fdiv,
.@"fdiv fast",
=> .sdiv,
.fmul,
.@"fmul fast",
.mul,
.@"mul nsw",
.@"mul nuw",
.@"mul nuw nsw",
=> .mul,
.srem,
.frem,
.@"frem fast",
=> .srem,
.udiv,
.@"udiv exact",
=> .udiv,
.shl,
.@"shl nsw",
.@"shl nuw",
.@"shl nuw nsw",
=> .shl,
.lshr,
.@"lshr exact",
=> .lshr,
.ashr,
.@"ashr exact",
=> .ashr,
.@"and" => .@"and",
.@"or" => .@"or",
.xor => .xor,
.urem => .urem,
else => unreachable,
};
}
 
pub fn toCastOpcode(self: Tag) CastOpcode {
return switch (self) {
.trunc => .trunc,
.zext => .zext,
.sext => .sext,
.fptoui => .fptoui,
.fptosi => .fptosi,
.uitofp => .uitofp,
.sitofp => .sitofp,
.fptrunc => .fptrunc,
.fpext => .fpext,
.ptrtoint => .ptrtoint,
.inttoptr => .inttoptr,
.bitcast => .bitcast,
.addrspacecast => .addrspacecast,
else => unreachable,
};
}
 
pub fn toCmpPredicate(self: Tag) CmpPredicate {
return switch (self) {
.@"fcmp false",
.@"fcmp fast false",
=> .fcmp_false,
.@"fcmp oeq",
.@"fcmp fast oeq",
=> .fcmp_oeq,
.@"fcmp oge",
.@"fcmp fast oge",
=> .fcmp_oge,
.@"fcmp ogt",
.@"fcmp fast ogt",
=> .fcmp_ogt,
.@"fcmp ole",
.@"fcmp fast ole",
=> .fcmp_ole,
.@"fcmp olt",
.@"fcmp fast olt",
=> .fcmp_olt,
.@"fcmp one",
.@"fcmp fast one",
=> .fcmp_one,
.@"fcmp ord",
.@"fcmp fast ord",
=> .fcmp_ord,
.@"fcmp true",
.@"fcmp fast true",
=> .fcmp_true,
.@"fcmp ueq",
.@"fcmp fast ueq",
=> .fcmp_ueq,
.@"fcmp uge",
.@"fcmp fast uge",
=> .fcmp_uge,
.@"fcmp ugt",
.@"fcmp fast ugt",
=> .fcmp_ugt,
.@"fcmp ule",
.@"fcmp fast ule",
=> .fcmp_ule,
.@"fcmp ult",
.@"fcmp fast ult",
=> .fcmp_ult,
.@"fcmp une",
.@"fcmp fast une",
=> .fcmp_une,
.@"fcmp uno",
.@"fcmp fast uno",
=> .fcmp_uno,
.@"icmp eq" => .icmp_eq,
.@"icmp ne" => .icmp_ne,
.@"icmp sge" => .icmp_sge,
.@"icmp sgt" => .icmp_sgt,
.@"icmp sle" => .icmp_sle,
.@"icmp slt" => .icmp_slt,
.@"icmp uge" => .icmp_uge,
.@"icmp ugt" => .icmp_ugt,
.@"icmp ule" => .icmp_ule,
.@"icmp ult" => .icmp_ult,
else => unreachable,
};
}
};
 
pub const Index = enum(u32) {
@@ -4108,6 +4158,10 @@ pub const Function = struct {
return function.names[@intFromEnum(self)];
}
 
pub fn valueIndex(self: Instruction.Index, function: *const Function) u32 {
return function.value_indices[@intFromEnum(self)];
}
 
pub fn toValue(self: Instruction.Index) Value {
return @enumFromInt(@intFromEnum(self));
}
@@ -4136,6 +4190,7 @@ pub const Function = struct {
.@"store atomic",
.@"switch",
.@"unreachable",
.block,
=> false,
.call,
.@"call fast",
@@ -4240,7 +4295,7 @@ pub const Function = struct {
=> wip.builder.structTypeAssumeCapacity(.normal, &.{
wip.extraData(CmpXchg, instruction.data).cmp.typeOfWip(wip),
.i1,
}) catch unreachable,
}),
.extractelement => wip.extraData(ExtractElement, instruction.data)
.val.typeOfWip(wip).childType(wip.builder),
.extractvalue => {
@@ -4427,7 +4482,7 @@ pub const Function = struct {
function.extraData(CmpXchg, instruction.data)
.cmp.typeOf(function_index, builder),
.i1,
}) catch unreachable,
}),
.extractelement => function.extraData(ExtractElement, instruction.data)
.val.typeOf(function_index, builder).childType(builder),
.extractvalue => {
@@ -4557,20 +4612,6 @@ pub const Function = struct {
) std.fmt.Formatter(format) {
return .{ .data = .{ .instruction = self, .function = function, .builder = builder } };
}
 
fn toLlvm(self: Instruction.Index, wip: *const WipFunction) *llvm.Value {
assert(wip.builder.useLibLlvm());
const llvm_value = wip.llvm.instructions.items[@intFromEnum(self)];
const global = wip.builder.llvm.replacements.get(llvm_value) orelse return llvm_value;
return global.toLlvm(wip.builder);
}
 
fn llvmName(self: Instruction.Index, wip: *const WipFunction) [:0]const u8 {
return if (wip.builder.strip)
""
else
wip.names.items[@intFromEnum(self)].slice(wip.builder).?;
}
};
 
pub const ExtraIndex = u32;
@@ -4664,43 +4705,22 @@ pub const Function = struct {
val: Value,
 
pub const Operation = enum(u5) {
xchg,
add,
sub,
@"and",
nand,
@"or",
xor,
max,
min,
umax,
umin,
fadd,
fsub,
fmax,
fmin,
xchg = 0,
add = 1,
sub = 2,
@"and" = 3,
nand = 4,
@"or" = 5,
xor = 6,
max = 7,
min = 8,
umax = 9,
umin = 10,
fadd = 11,
fsub = 12,
fmax = 13,
fmin = 14,
none = std.math.maxInt(u5),
 
fn toLlvm(self: Operation) llvm.AtomicRMWBinOp {
return switch (self) {
.xchg => .Xchg,
.add => .Add,
.sub => .Sub,
.@"and" => .And,
.nand => .Nand,
.@"or" => .Or,
.xor => .Xor,
.max => .Max,
.min => .Min,
.umax => .UMax,
.umin => .UMin,
.fadd => .FAdd,
.fsub => .FSub,
.fmax => .FMax,
.fmin => .FMin,
.none => unreachable,
};
}
};
};
 
@@ -4764,7 +4784,9 @@ pub const Function = struct {
 
pub fn deinit(self: *Function, gpa: Allocator) void {
gpa.free(self.extra);
if (self.metadata) |metadata| gpa.free(metadata[0..self.instructions.len]);
gpa.free(self.debug_values);
self.debug_locations.deinit(gpa);
gpa.free(self.value_indices[0..self.instructions.len]);
gpa.free(self.names[0..self.instructions.len]);
self.instructions.deinit(gpa);
gpa.free(self.blocks);
@@ -4822,7 +4844,7 @@ pub const Function = struct {
Instruction.Alloca.Info,
Instruction.Call.Info,
=> @bitCast(value),
else => @compileError("bad field type: " ++ @typeName(field.type)),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
};
return .{
.data = result,
@@ -4838,16 +4860,14 @@ pub const Function = struct {
pub const WipFunction = struct {
builder: *Builder,
function: Function.Index,
llvm: if (build_options.have_llvm) struct {
builder: *llvm.Builder,
blocks: std.ArrayListUnmanaged(*llvm.BasicBlock),
instructions: std.ArrayListUnmanaged(*llvm.Value),
} else void,
last_debug_location: Metadata,
current_debug_location: Metadata,
cursor: Cursor,
blocks: std.ArrayListUnmanaged(Block),
instructions: std.MultiArrayList(Instruction),
names: std.ArrayListUnmanaged(String),
metadata: std.ArrayListUnmanaged(Metadata),
debug_locations: std.AutoArrayHashMapUnmanaged(Instruction.Index, Metadata),
debug_values: std.AutoArrayHashMapUnmanaged(Instruction.Index, void),
extra: std.ArrayListUnmanaged(u32),
 
pub const Cursor = struct { block: Block.Index, instruction: u32 = 0 };
@@ -4873,35 +4893,23 @@ pub const WipFunction = struct {
pub fn toInst(self: Index, function: *const Function) Instruction.Index {
return function.blocks[@intFromEnum(self)].instruction;
}
 
pub fn toLlvm(self: Index, wip: *const WipFunction) *llvm.BasicBlock {
assert(wip.builder.useLibLlvm());
return wip.llvm.blocks.items[@intFromEnum(self)];
}
};
};
 
pub const Instruction = Function.Instruction;
 
pub fn init(builder: *Builder, function: Function.Index) Allocator.Error!WipFunction {
if (builder.useLibLlvm()) {
const llvm_function = function.toLlvm(builder);
while (llvm_function.getFirstBasicBlock()) |bb| bb.deleteBasicBlock();
}
 
var self = WipFunction{
var self: WipFunction = .{
.builder = builder,
.function = function,
.llvm = if (builder.useLibLlvm()) .{
.builder = builder.llvm.context.createBuilder(),
.blocks = .{},
.instructions = .{},
} else undefined,
.last_debug_location = .none,
.current_debug_location = .none,
.cursor = undefined,
.blocks = .{},
.instructions = .{},
.names = .{},
.metadata = .{},
.debug_locations = .{},
.debug_values = .{},
.extra = .{},
};
errdefer self.deinit();
@@ -4909,15 +4917,14 @@ pub const WipFunction = struct {
const params_len = function.typeOf(self.builder).functionParameters(self.builder).len;
try self.ensureUnusedExtraCapacity(params_len, NoExtra, 0);
try self.instructions.ensureUnusedCapacity(self.builder.gpa, params_len);
if (!self.builder.strip) try self.names.ensureUnusedCapacity(self.builder.gpa, params_len);
if (self.builder.useLibLlvm())
try self.llvm.instructions.ensureUnusedCapacity(self.builder.gpa, params_len);
if (!self.builder.strip) {
try self.names.ensureUnusedCapacity(self.builder.gpa, params_len);
}
for (0..params_len) |param_index| {
self.instructions.appendAssumeCapacity(.{ .tag = .arg, .data = @intCast(param_index) });
if (!self.builder.strip) self.names.appendAssumeCapacity(.empty); // TODO: param names
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
function.toLlvm(self.builder).getParam(@intCast(param_index)),
);
if (!self.builder.strip) {
self.names.appendAssumeCapacity(.empty); // TODO: param names
}
}
 
return self;
@@ -4934,7 +4941,6 @@ pub const WipFunction = struct {
 
pub fn block(self: *WipFunction, incoming: u32, name: []const u8) Allocator.Error!Block.Index {
try self.blocks.ensureUnusedCapacity(self.builder.gpa, 1);
if (self.builder.useLibLlvm()) try self.llvm.blocks.ensureUnusedCapacity(self.builder.gpa, 1);
 
const index: Block.Index = @enumFromInt(self.blocks.items.len);
const final_name = if (self.builder.strip) .empty else try self.builder.string(name);
@@ -4943,41 +4949,24 @@ pub const WipFunction = struct {
.incoming = incoming,
.instructions = .{},
});
if (self.builder.useLibLlvm()) self.llvm.blocks.appendAssumeCapacity(
self.builder.llvm.context.appendBasicBlock(
self.function.toLlvm(self.builder),
final_name.slice(self.builder).?,
),
);
return index;
}
 
pub fn ret(self: *WipFunction, val: Value) Allocator.Error!Instruction.Index {
assert(val.typeOfWip(self) == self.function.typeOf(self.builder).functionReturn(self.builder));
try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
const instruction = try self.addInst(null, .{ .tag = .ret, .data = @intFromEnum(val) });
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildRet(val.toLlvm(self)),
);
return instruction;
return try self.addInst(null, .{ .tag = .ret, .data = @intFromEnum(val) });
}
 
pub fn retVoid(self: *WipFunction) Allocator.Error!Instruction.Index {
try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
const instruction = try self.addInst(null, .{ .tag = .@"ret void", .data = undefined });
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildRetVoid(),
);
return instruction;
return try self.addInst(null, .{ .tag = .@"ret void", .data = undefined });
}
 
pub fn br(self: *WipFunction, dest: Block.Index) Allocator.Error!Instruction.Index {
try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
const instruction = try self.addInst(null, .{ .tag = .br, .data = @intFromEnum(dest) });
dest.ptr(self).branches += 1;
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildBr(dest.toLlvm(self)),
);
return instruction;
}
 
@@ -4999,9 +4988,6 @@ pub const WipFunction = struct {
});
then.ptr(self).branches += 1;
@"else".ptr(self).branches += 1;
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildCondBr(cond.toLlvm(self), then.toLlvm(self), @"else".toLlvm(self)),
);
return instruction;
}
 
@@ -5022,8 +5008,6 @@ pub const WipFunction = struct {
extra.trail.nextMut(extra.data.cases_len, Block.Index, wip)[self.index] = dest;
self.index += 1;
dest.ptr(wip).branches += 1;
if (wip.builder.useLibLlvm())
self.instruction.toLlvm(wip).addCase(val.toLlvm(wip.builder), dest.toLlvm(wip));
}
 
pub fn finish(self: WipSwitch, wip: *WipFunction) void {
@@ -5050,18 +5034,12 @@ pub const WipFunction = struct {
});
_ = self.extra.addManyAsSliceAssumeCapacity(cases_len * 2);
default.ptr(self).branches += 1;
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildSwitch(val.toLlvm(self), default.toLlvm(self), @intCast(cases_len)),
);
return .{ .index = 0, .instruction = instruction };
}
 
pub fn @"unreachable"(self: *WipFunction) Allocator.Error!Instruction.Index {
try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
const instruction = try self.addInst(null, .{ .tag = .@"unreachable", .data = undefined });
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildUnreachable(),
);
return instruction;
}
 
@@ -5079,17 +5057,6 @@ pub const WipFunction = struct {
}
try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
const instruction = try self.addInst(name, .{ .tag = tag, .data = @intFromEnum(val) });
if (self.builder.useLibLlvm()) {
switch (tag) {
.fneg => self.llvm.builder.setFastMath(false),
.@"fneg fast" => self.llvm.builder.setFastMath(true),
else => unreachable,
}
self.llvm.instructions.appendAssumeCapacity(switch (tag) {
.fneg, .@"fneg fast" => &llvm.Builder.buildFNeg,
else => unreachable,
}(self.llvm.builder, val.toLlvm(self), instruction.llvmName(self)));
}
return instruction.toValue();
}
 
@@ -5157,56 +5124,6 @@ pub const WipFunction = struct {
.tag = tag,
.data = self.addExtraAssumeCapacity(Instruction.Binary{ .lhs = lhs, .rhs = rhs }),
});
if (self.builder.useLibLlvm()) {
switch (tag) {
.fadd,
.fdiv,
.fmul,
.frem,
.fsub,
=> self.llvm.builder.setFastMath(false),
.@"fadd fast",
.@"fdiv fast",
.@"fmul fast",
.@"frem fast",
.@"fsub fast",
=> self.llvm.builder.setFastMath(true),
else => {},
}
self.llvm.instructions.appendAssumeCapacity(switch (tag) {
.add => &llvm.Builder.buildAdd,
.@"add nsw" => &llvm.Builder.buildNSWAdd,
.@"add nuw" => &llvm.Builder.buildNUWAdd,
.@"and" => &llvm.Builder.buildAnd,
.ashr => &llvm.Builder.buildAShr,
.@"ashr exact" => &llvm.Builder.buildAShrExact,
.fadd, .@"fadd fast" => &llvm.Builder.buildFAdd,
.fdiv, .@"fdiv fast" => &llvm.Builder.buildFDiv,
.fmul, .@"fmul fast" => &llvm.Builder.buildFMul,
.frem, .@"frem fast" => &llvm.Builder.buildFRem,
.fsub, .@"fsub fast" => &llvm.Builder.buildFSub,
.lshr => &llvm.Builder.buildLShr,
.@"lshr exact" => &llvm.Builder.buildLShrExact,
.mul => &llvm.Builder.buildMul,
.@"mul nsw" => &llvm.Builder.buildNSWMul,
.@"mul nuw" => &llvm.Builder.buildNUWMul,
.@"or" => &llvm.Builder.buildOr,
.sdiv => &llvm.Builder.buildSDiv,
.@"sdiv exact" => &llvm.Builder.buildExactSDiv,
.shl => &llvm.Builder.buildShl,
.@"shl nsw" => &llvm.Builder.buildNSWShl,
.@"shl nuw" => &llvm.Builder.buildNUWShl,
.srem => &llvm.Builder.buildSRem,
.sub => &llvm.Builder.buildSub,
.@"sub nsw" => &llvm.Builder.buildNSWSub,
.@"sub nuw" => &llvm.Builder.buildNUWSub,
.udiv => &llvm.Builder.buildUDiv,
.@"udiv exact" => &llvm.Builder.buildExactUDiv,
.urem => &llvm.Builder.buildURem,
.xor => &llvm.Builder.buildXor,
else => unreachable,
}(self.llvm.builder, lhs.toLlvm(self), rhs.toLlvm(self), instruction.llvmName(self)));
}
return instruction.toValue();
}
 
@@ -5226,13 +5143,6 @@ pub const WipFunction = struct {
.index = index,
}),
});
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildExtractElement(
val.toLlvm(self),
index.toLlvm(self),
instruction.llvmName(self),
),
);
return instruction.toValue();
}
 
@@ -5254,14 +5164,6 @@ pub const WipFunction = struct {
.index = index,
}),
});
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildInsertElement(
val.toLlvm(self),
elem.toLlvm(self),
index.toLlvm(self),
instruction.llvmName(self),
),
);
return instruction.toValue();
}
 
@@ -5284,14 +5186,6 @@ pub const WipFunction = struct {
.mask = mask,
}),
});
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildShuffleVector(
lhs.toLlvm(self),
rhs.toLlvm(self),
mask.toLlvm(self),
instruction.llvmName(self),
),
);
return instruction.toValue();
}
 
@@ -5303,10 +5197,9 @@ pub const WipFunction = struct {
) Allocator.Error!Value {
const scalar_ty = try ty.changeLength(1, self.builder);
const mask_ty = try ty.changeScalar(.i32, self.builder);
const zero = try self.builder.intConst(.i32, 0);
const poison = try self.builder.poisonValue(scalar_ty);
const mask = try self.builder.splatValue(mask_ty, zero);
const scalar = try self.insertElement(poison, elem, zero.toValue(), name);
const mask = try self.builder.splatValue(mask_ty, .@"0");
const scalar = try self.insertElement(poison, elem, .@"0", name);
return self.shuffleVector(scalar, poison, mask, name);
}
 
@@ -5327,13 +5220,6 @@ pub const WipFunction = struct {
}),
});
self.extra.appendSliceAssumeCapacity(indices);
if (self.builder.useLibLlvm()) {
const llvm_name = instruction.llvmName(self);
var cur = val.toLlvm(self);
for (indices) |index|
cur = self.llvm.builder.buildExtractValue(cur, @intCast(index), llvm_name);
self.llvm.instructions.appendAssumeCapacity(cur);
}
return instruction.toValue();
}
 
@@ -5356,35 +5242,6 @@ pub const WipFunction = struct {
}),
});
self.extra.appendSliceAssumeCapacity(indices);
if (self.builder.useLibLlvm()) {
const ExpectedContents = [expected_gep_indices_len]*llvm.Value;
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.builder.gpa);
const allocator = stack.get();
 
const llvm_name = instruction.llvmName(self);
const llvm_vals = try allocator.alloc(*llvm.Value, indices.len);
defer allocator.free(llvm_vals);
llvm_vals[0] = val.toLlvm(self);
for (llvm_vals[1..], llvm_vals[0 .. llvm_vals.len - 1], indices[0 .. indices.len - 1]) |
*cur_val,
prev_val,
index,
| cur_val.* = self.llvm.builder.buildExtractValue(prev_val, @intCast(index), llvm_name);
 
var depth: usize = llvm_vals.len;
var cur = elem.toLlvm(self);
while (depth > 0) {
depth -= 1;
cur = self.llvm.builder.buildInsertValue(
llvm_vals[depth],
cur,
@intCast(indices[depth]),
llvm_name,
);
}
self.llvm.instructions.appendAssumeCapacity(cur);
}
return instruction.toValue();
}
 
@@ -5420,19 +5277,13 @@ pub const WipFunction = struct {
},
.data = self.addExtraAssumeCapacity(Instruction.Alloca{
.type = ty,
.len = len,
.len = switch (len) {
.none => .@"1",
else => len,
},
.info = .{ .alignment = alignment, .addr_space = addr_space },
}),
});
if (self.builder.useLibLlvm()) {
const llvm_instruction = self.llvm.builder.buildAllocaInAddressSpace(
ty.toLlvm(self.builder),
@intFromEnum(addr_space),
instruction.llvmName(self),
);
if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes));
self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
}
return instruction.toValue();
}
 
@@ -5478,17 +5329,6 @@ pub const WipFunction = struct {
.ptr = ptr,
}),
});
if (self.builder.useLibLlvm()) {
const llvm_instruction = self.llvm.builder.buildLoad(
ty.toLlvm(self.builder),
ptr.toLlvm(self),
instruction.llvmName(self),
);
if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True);
if (ordering != .none) llvm_instruction.setOrdering(ordering.toLlvm());
if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes));
self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
}
return instruction.toValue();
}
 
@@ -5532,13 +5372,6 @@ pub const WipFunction = struct {
.ptr = ptr,
}),
});
if (self.builder.useLibLlvm()) {
const llvm_instruction = self.llvm.builder.buildStore(val.toLlvm(self), ptr.toLlvm(self));
if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True);
if (ordering != .none) llvm_instruction.setOrdering(ordering.toLlvm());
if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes));
self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
}
return instruction;
}
 
@@ -5556,13 +5389,6 @@ pub const WipFunction = struct {
.success_ordering = ordering,
}),
});
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildFence(
ordering.toLlvm(),
llvm.Bool.fromBool(sync_scope == .singlethread),
"",
),
);
return instruction;
}
 
@@ -5605,25 +5431,6 @@ pub const WipFunction = struct {
.new = new,
}),
});
if (self.builder.useLibLlvm()) {
const llvm_instruction = self.llvm.builder.buildAtomicCmpXchg(
ptr.toLlvm(self),
cmp.toLlvm(self),
new.toLlvm(self),
success_ordering.toLlvm(),
failure_ordering.toLlvm(),
llvm.Bool.fromBool(sync_scope == .singlethread),
);
if (kind == .weak) llvm_instruction.setWeak(.True);
if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True);
if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes));
const llvm_name = instruction.llvmName(self);
if (llvm_name.len > 0) llvm_instruction.setValueName(
llvm_name.ptr,
@intCast(llvm_name.len),
);
self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
}
return instruction.toValue();
}
 
@@ -5656,23 +5463,6 @@ pub const WipFunction = struct {
.val = val,
}),
});
if (self.builder.useLibLlvm()) {
const llvm_instruction = self.llvm.builder.buildAtomicRmw(
operation.toLlvm(),
ptr.toLlvm(self),
val.toLlvm(self),
ordering.toLlvm(),
llvm.Bool.fromBool(sync_scope == .singlethread),
);
if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True);
if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes));
const llvm_name = instruction.llvmName(self);
if (llvm_name.len > 0) llvm_instruction.setValueName(
llvm_name.ptr,
@intCast(llvm_name.len),
);
self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
}
return instruction.toValue();
}
 
@@ -5732,28 +5522,6 @@ pub const WipFunction = struct {
}),
});
self.extra.appendSliceAssumeCapacity(@ptrCast(indices));
if (self.builder.useLibLlvm()) {
const ExpectedContents = [expected_gep_indices_len]*llvm.Value;
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.builder.gpa);
const allocator = stack.get();
 
const llvm_indices = try allocator.alloc(*llvm.Value, indices.len);
defer allocator.free(llvm_indices);
for (llvm_indices, indices) |*llvm_index, index| llvm_index.* = index.toLlvm(self);
 
self.llvm.instructions.appendAssumeCapacity(switch (kind) {
.normal => &llvm.Builder.buildGEP,
.inbounds => &llvm.Builder.buildInBoundsGEP,
}(
self.llvm.builder,
ty.toLlvm(self.builder),
base.toLlvm(self),
llvm_indices.ptr,
@intCast(llvm_indices.len),
instruction.llvmName(self),
));
}
return instruction.toValue();
}
 
@@ -5765,9 +5533,7 @@ pub const WipFunction = struct {
name: []const u8,
) Allocator.Error!Value {
assert(ty.isStruct(self.builder));
return self.gep(.inbounds, ty, base, &.{
try self.builder.intValue(.i32, 0), try self.builder.intValue(.i32, index),
}, name);
return self.gep(.inbounds, ty, base, &.{ .@"0", try self.builder.intValue(.i32, index) }, name);
}
 
pub fn conv(
@@ -5815,22 +5581,6 @@ pub const WipFunction = struct {
.type = ty,
}),
});
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(switch (tag) {
.addrspacecast => &llvm.Builder.buildAddrSpaceCast,
.bitcast => &llvm.Builder.buildBitCast,
.fpext => &llvm.Builder.buildFPExt,
.fptosi => &llvm.Builder.buildFPToSI,
.fptoui => &llvm.Builder.buildFPToUI,
.fptrunc => &llvm.Builder.buildFPTrunc,
.inttoptr => &llvm.Builder.buildIntToPtr,
.ptrtoint => &llvm.Builder.buildPtrToInt,
.sext => &llvm.Builder.buildSExt,
.sitofp => &llvm.Builder.buildSIToFP,
.trunc => &llvm.Builder.buildTrunc,
.uitofp => &llvm.Builder.buildUIToFP,
.zext => &llvm.Builder.buildZExt,
else => unreachable,
}(self.llvm.builder, val.toLlvm(self), ty.toLlvm(self.builder), instruction.llvmName(self)));
return instruction.toValue();
}
 
@@ -5843,7 +5593,7 @@ pub const WipFunction = struct {
) Allocator.Error!Value {
return self.cmpTag(switch (cond) {
inline else => |tag| @field(Instruction.Tag, "icmp " ++ @tagName(tag)),
}, @intFromEnum(cond), lhs, rhs, name);
}, lhs, rhs, name);
}
 
pub fn fcmp(
@@ -5861,7 +5611,7 @@ pub const WipFunction = struct {
.fast => "fast ",
} ++ @tagName(cond_tag)),
},
}, @intFromEnum(cond), lhs, rhs, name);
}, lhs, rhs, name);
}
 
pub const WipPhi = struct {
@@ -5877,7 +5627,7 @@ pub const WipFunction = struct {
vals: []const Value,
blocks: []const Block.Index,
wip: *WipFunction,
) (if (build_options.have_llvm) Allocator.Error else error{})!void {
) void {
const incoming_len = self.block.ptrConst(wip).incoming;
assert(vals.len == incoming_len and blocks.len == incoming_len);
const instruction = wip.instructions.get(@intFromEnum(self.instruction));
@@ -5885,26 +5635,6 @@ pub const WipFunction = struct {
for (vals) |val| assert(val.typeOfWip(wip) == extra.data.type);
@memcpy(extra.trail.nextMut(incoming_len, Value, wip), vals);
@memcpy(extra.trail.nextMut(incoming_len, Block.Index, wip), blocks);
if (wip.builder.useLibLlvm()) {
const ExpectedContents = extern struct {
values: [expected_incoming_len]*llvm.Value,
blocks: [expected_incoming_len]*llvm.BasicBlock,
};
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), wip.builder.gpa);
const allocator = stack.get();
 
const llvm_vals = try allocator.alloc(*llvm.Value, incoming_len);
defer allocator.free(llvm_vals);
const llvm_blocks = try allocator.alloc(*llvm.BasicBlock, incoming_len);
defer allocator.free(llvm_blocks);
 
for (llvm_vals, vals) |*llvm_val, incoming_val| llvm_val.* = incoming_val.toLlvm(wip);
for (llvm_blocks, blocks) |*llvm_block, incoming_block|
llvm_block.* = incoming_block.toLlvm(wip);
self.instruction.toLlvm(wip)
.addIncoming(llvm_vals.ptr, llvm_blocks.ptr, @intCast(incoming_len));
}
}
};
 
@@ -5970,53 +5700,6 @@ pub const WipFunction = struct {
}),
});
self.extra.appendSliceAssumeCapacity(@ptrCast(args));
if (self.builder.useLibLlvm()) {
const ExpectedContents = [expected_args_len]*llvm.Value;
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.builder.gpa);
const allocator = stack.get();
 
const llvm_args = try allocator.alloc(*llvm.Value, args.len);
defer allocator.free(llvm_args);
for (llvm_args, args) |*llvm_arg, arg_val| llvm_arg.* = arg_val.toLlvm(self);
 
switch (kind) {
.normal,
.musttail,
.notail,
.tail,
=> self.llvm.builder.setFastMath(false),
.fast,
.musttail_fast,
.notail_fast,
.tail_fast,
=> self.llvm.builder.setFastMath(true),
}
const llvm_instruction = self.llvm.builder.buildCall(
ty.toLlvm(self.builder),
callee.toLlvm(self),
llvm_args.ptr,
@intCast(llvm_args.len),
switch (ret_ty) {
.void => "",
else => instruction.llvmName(self),
},
);
llvm_instruction.setInstructionCallConv(call_conv.toLlvm());
llvm_instruction.setTailCallKind(switch (kind) {
.normal, .fast => .None,
.musttail, .musttail_fast => .MustTail,
.notail, .notail_fast => .NoTail,
.tail, .tail_fast => .Tail,
});
for (0.., function_attributes.slice(self.builder)) |index, attributes| {
for (attributes.slice(self.builder)) |attribute| llvm_instruction.addCallSiteAttribute(
@as(llvm.AttributeIndex, @intCast(index)) -% 1,
attribute.toLlvm(self.builder),
);
}
self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
}
return instruction.toValue();
}
 
@@ -6117,16 +5800,25 @@ pub const WipFunction = struct {
.type = ty,
}),
});
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildVAArg(
list.toLlvm(self),
ty.toLlvm(self.builder),
instruction.llvmName(self),
),
);
return instruction.toValue();
}
 
pub fn debugValue(self: *WipFunction, value: Value) Allocator.Error!Metadata {
if (self.builder.strip) return .none;
return switch (value.unwrap()) {
.instruction => |instr_index| blk: {
const gop = try self.debug_values.getOrPut(self.builder.gpa, instr_index);
 
const metadata: Metadata = @enumFromInt(Metadata.first_local_metadata + gop.index);
if (!gop.found_existing) gop.key_ptr.* = instr_index;
 
break :blk metadata;
},
.constant => |constant| try self.builder.debugConstant(constant),
.metadata => |metadata| metadata,
};
}
 
pub fn finish(self: *WipFunction) Allocator.Error!void {
const gpa = self.builder.gpa;
const function = self.function.ptr(self.builder);
@@ -6146,6 +5838,7 @@ pub const WipFunction = struct {
@intFromEnum(instruction)
].toValue(),
.constant => |constant| constant.toValue(),
.metadata => |metadata| metadata.toValue(),
};
}
} = .{ .items = try gpa.alloc(Instruction.Index, self.instructions.len) };
@@ -6154,9 +5847,15 @@ pub const WipFunction = struct {
const names = try gpa.alloc(String, final_instructions_len);
errdefer gpa.free(names);
 
const metadata =
if (self.builder.strip) null else try gpa.alloc(Metadata, final_instructions_len);
errdefer if (metadata) |new_metadata| gpa.free(new_metadata);
const value_indices = try gpa.alloc(u32, final_instructions_len);
errdefer gpa.free(value_indices);
 
var debug_locations: std.AutoHashMapUnmanaged(Instruction.Index, Metadata) = .{};
errdefer debug_locations.deinit(gpa);
try debug_locations.ensureUnusedCapacity(gpa, @intCast(self.debug_locations.count()));
 
const debug_values = try gpa.alloc(Instruction.Index, self.debug_values.count());
errdefer gpa.free(debug_values);
 
var wip_extra: struct {
index: Instruction.ExtraIndex = 0,
@@ -6179,7 +5878,7 @@ pub const WipFunction = struct {
Instruction.Alloca.Info,
Instruction.Call.Info,
=> @bitCast(value),
else => @compileError("bad field type: " ++ @typeName(field.type)),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
};
wip_extra.index += 1;
}
@@ -6210,8 +5909,10 @@ pub const WipFunction = struct {
gpa.free(function.blocks);
function.blocks = &.{};
gpa.free(function.names[0..function.instructions.len]);
if (function.metadata) |old_metadata| gpa.free(old_metadata[0..function.instructions.len]);
function.metadata = null;
function.debug_locations.deinit(gpa);
function.debug_locations = .{};
gpa.free(function.debug_values);
function.debug_values = &.{};
gpa.free(function.extra);
function.extra = &.{};
 
@@ -6238,33 +5939,76 @@ pub const WipFunction = struct {
 
var wip_name: struct {
next_name: String = @enumFromInt(0),
next_unique_name: std.AutoHashMap(String, String),
builder: *Builder,
 
fn map(wip_name: *@This(), old_name: String) String {
if (old_name != .empty) return old_name;
fn map(wip_name: *@This(), name: String, sep: []const u8) Allocator.Error!String {
switch (name) {
.none => return .none,
.empty => {
assert(wip_name.next_name != .none);
defer wip_name.next_name = @enumFromInt(@intFromEnum(wip_name.next_name) + 1);
return wip_name.next_name;
},
_ => {
assert(!name.isAnon());
const gop = try wip_name.next_unique_name.getOrPut(name);
if (!gop.found_existing) {
gop.value_ptr.* = @enumFromInt(0);
return name;
}
 
const new_name = wip_name.next_name;
wip_name.next_name = @enumFromInt(@intFromEnum(new_name) + 1);
return new_name;
while (true) {
gop.value_ptr.* = @enumFromInt(@intFromEnum(gop.value_ptr.*) + 1);
const unique_name = try wip_name.builder.fmt("{r}{s}{r}", .{
name.fmt(wip_name.builder),
sep,
gop.value_ptr.fmt(wip_name.builder),
});
const unique_gop = try wip_name.next_unique_name.getOrPut(unique_name);
if (!unique_gop.found_existing) {
unique_gop.value_ptr.* = @enumFromInt(0);
return unique_name;
}
}
},
}
}
} = .{};
} = .{
.next_unique_name = std.AutoHashMap(String, String).init(gpa),
.builder = self.builder,
};
defer wip_name.next_unique_name.deinit();
 
var value_index: u32 = 0;
for (0..params_len) |param_index| {
const old_argument_index: Instruction.Index = @enumFromInt(param_index);
const new_argument_index: Instruction.Index = @enumFromInt(function.instructions.len);
const argument = self.instructions.get(@intFromEnum(old_argument_index));
assert(argument.tag == .arg);
assert(argument.data == param_index);
value_indices[function.instructions.len] = value_index;
value_index += 1;
function.instructions.appendAssumeCapacity(argument);
names[@intFromEnum(new_argument_index)] = wip_name.map(
names[@intFromEnum(new_argument_index)] = try wip_name.map(
if (self.builder.strip) .empty else self.names.items[@intFromEnum(old_argument_index)],
".",
);
if (self.debug_locations.get(old_argument_index)) |location| {
debug_locations.putAssumeCapacity(new_argument_index, location);
}
if (self.debug_values.getIndex(old_argument_index)) |index| {
debug_values[index] = new_argument_index;
}
}
for (self.blocks.items) |current_block| {
const new_block_index: Instruction.Index = @enumFromInt(function.instructions.len);
value_indices[function.instructions.len] = value_index;
function.instructions.appendAssumeCapacity(.{
.tag = .block,
.data = current_block.incoming,
});
names[@intFromEnum(new_block_index)] = wip_name.map(current_block.name);
names[@intFromEnum(new_block_index)] = try wip_name.map(current_block.name, "");
for (current_block.instructions.items) |old_instruction_index| {
const new_instruction_index: Instruction.Index =
@enumFromInt(function.instructions.len);
@@ -6565,10 +6309,21 @@ pub const WipFunction = struct {
},
}
function.instructions.appendAssumeCapacity(instruction);
names[@intFromEnum(new_instruction_index)] = wip_name.map(if (self.builder.strip)
names[@intFromEnum(new_instruction_index)] = try wip_name.map(if (self.builder.strip)
if (old_instruction_index.hasResultWip(self)) .empty else .none
else
self.names.items[@intFromEnum(old_instruction_index)]);
self.names.items[@intFromEnum(old_instruction_index)], ".");
 
if (self.debug_locations.get(old_instruction_index)) |location| {
debug_locations.putAssumeCapacity(new_instruction_index, location);
}
 
if (self.debug_values.getIndex(old_instruction_index)) |index| {
debug_values[index] = new_instruction_index;
}
 
value_indices[@intFromEnum(new_instruction_index)] = value_index;
if (old_instruction_index.hasResultWip(self)) value_index += 1;
}
}
 
@@ -6576,28 +6331,25 @@ pub const WipFunction = struct {
function.extra = wip_extra.finish();
function.blocks = blocks;
function.names = names.ptr;
function.metadata = if (metadata) |new_metadata| new_metadata.ptr else null;
function.value_indices = value_indices.ptr;
function.debug_locations = debug_locations;
function.debug_values = debug_values;
}
 
pub fn deinit(self: *WipFunction) void {
self.extra.deinit(self.builder.gpa);
self.metadata.deinit(self.builder.gpa);
self.debug_values.deinit(self.builder.gpa);
self.debug_locations.deinit(self.builder.gpa);
self.names.deinit(self.builder.gpa);
self.instructions.deinit(self.builder.gpa);
for (self.blocks.items) |*b| b.instructions.deinit(self.builder.gpa);
self.blocks.deinit(self.builder.gpa);
if (self.builder.useLibLlvm()) {
self.llvm.instructions.deinit(self.builder.gpa);
self.llvm.blocks.deinit(self.builder.gpa);
self.llvm.builder.dispose();
}
self.* = undefined;
}
 
fn cmpTag(
self: *WipFunction,
tag: Instruction.Tag,
cond: u32,
lhs: Value,
rhs: Value,
name: []const u8,
@@ -6657,113 +6409,6 @@ pub const WipFunction = struct {
.rhs = rhs,
}),
});
if (self.builder.useLibLlvm()) {
switch (tag) {
.@"fcmp false",
.@"fcmp oeq",
.@"fcmp oge",
.@"fcmp ogt",
.@"fcmp ole",
.@"fcmp olt",
.@"fcmp one",
.@"fcmp ord",
.@"fcmp true",
.@"fcmp ueq",
.@"fcmp uge",
.@"fcmp ugt",
.@"fcmp ule",
.@"fcmp ult",
.@"fcmp une",
.@"fcmp uno",
=> self.llvm.builder.setFastMath(false),
.@"fcmp fast false",
.@"fcmp fast oeq",
.@"fcmp fast oge",
.@"fcmp fast ogt",
.@"fcmp fast ole",
.@"fcmp fast olt",
.@"fcmp fast one",
.@"fcmp fast ord",
.@"fcmp fast true",
.@"fcmp fast ueq",
.@"fcmp fast uge",
.@"fcmp fast ugt",
.@"fcmp fast ule",
.@"fcmp fast ult",
.@"fcmp fast une",
.@"fcmp fast uno",
=> self.llvm.builder.setFastMath(true),
.@"icmp eq",
.@"icmp ne",
.@"icmp sge",
.@"icmp sgt",
.@"icmp sle",
.@"icmp slt",
.@"icmp uge",
.@"icmp ugt",
.@"icmp ule",
.@"icmp ult",
=> {},
else => unreachable,
}
self.llvm.instructions.appendAssumeCapacity(switch (tag) {
.@"fcmp false",
.@"fcmp fast false",
.@"fcmp fast oeq",
.@"fcmp fast oge",
.@"fcmp fast ogt",
.@"fcmp fast ole",
.@"fcmp fast olt",
.@"fcmp fast one",
.@"fcmp fast ord",
.@"fcmp fast true",
.@"fcmp fast ueq",
.@"fcmp fast uge",
.@"fcmp fast ugt",
.@"fcmp fast ule",
.@"fcmp fast ult",
.@"fcmp fast une",
.@"fcmp fast uno",
.@"fcmp oeq",
.@"fcmp oge",
.@"fcmp ogt",
.@"fcmp ole",
.@"fcmp olt",
.@"fcmp one",
.@"fcmp ord",
.@"fcmp true",
.@"fcmp ueq",
.@"fcmp uge",
.@"fcmp ugt",
.@"fcmp ule",
.@"fcmp ult",
.@"fcmp une",
.@"fcmp uno",
=> self.llvm.builder.buildFCmp(
@enumFromInt(cond),
lhs.toLlvm(self),
rhs.toLlvm(self),
instruction.llvmName(self),
),
.@"icmp eq",
.@"icmp ne",
.@"icmp sge",
.@"icmp sgt",
.@"icmp sle",
.@"icmp slt",
.@"icmp uge",
.@"icmp ugt",
.@"icmp ule",
.@"icmp ult",
=> self.llvm.builder.buildICmp(
@enumFromInt(cond),
lhs.toLlvm(self),
rhs.toLlvm(self),
instruction.llvmName(self),
),
else => unreachable,
});
}
return instruction.toValue();
}
 
@@ -6785,16 +6430,6 @@ pub const WipFunction = struct {
.data = self.addExtraAssumeCapacity(Instruction.Phi{ .type = ty }),
});
_ = self.extra.addManyAsSliceAssumeCapacity(incoming * 2);
if (self.builder.useLibLlvm()) {
switch (tag) {
.phi => self.llvm.builder.setFastMath(false),
.@"phi fast" => self.llvm.builder.setFastMath(true),
else => unreachable,
}
self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildPhi(ty.toLlvm(self.builder), instruction.llvmName(self)),
);
}
return .{ .block = self.cursor.block, .instruction = instruction };
}
 
@@ -6822,19 +6457,6 @@ pub const WipFunction = struct {
.rhs = rhs,
}),
});
if (self.builder.useLibLlvm()) {
switch (tag) {
.select => self.llvm.builder.setFastMath(false),
.@"select fast" => self.llvm.builder.setFastMath(true),
else => unreachable,
}
self.llvm.instructions.appendAssumeCapacity(self.llvm.builder.buildSelect(
cond.toLlvm(self),
lhs.toLlvm(self),
rhs.toLlvm(self),
instruction.llvmName(self),
));
}
return instruction.toValue();
}
 
@@ -6857,28 +6479,27 @@ pub const WipFunction = struct {
) Allocator.Error!Instruction.Index {
const block_instructions = &self.cursor.block.ptr(self).instructions;
try self.instructions.ensureUnusedCapacity(self.builder.gpa, 1);
if (!self.builder.strip) try self.names.ensureUnusedCapacity(self.builder.gpa, 1);
if (!self.builder.strip) {
try self.names.ensureUnusedCapacity(self.builder.gpa, 1);
try self.debug_locations.ensureUnusedCapacity(self.builder.gpa, 1);
}
try block_instructions.ensureUnusedCapacity(self.builder.gpa, 1);
if (self.builder.useLibLlvm())
try self.llvm.instructions.ensureUnusedCapacity(self.builder.gpa, 1);
const final_name = if (name) |n|
if (self.builder.strip) .empty else try self.builder.string(n)
else
.none;
 
if (self.builder.useLibLlvm()) self.llvm.builder.positionBuilder(
self.cursor.block.toLlvm(self),
for (block_instructions.items[self.cursor.instruction..]) |instruction_index| {
const llvm_instruction =
self.llvm.instructions.items[@intFromEnum(instruction_index)];
// TODO: remove when constant propagation is implemented
if (!llvm_instruction.isConstant().toBool()) break llvm_instruction;
} else null,
);
 
const index: Instruction.Index = @enumFromInt(self.instructions.len);
self.instructions.appendAssumeCapacity(instruction);
if (!self.builder.strip) self.names.appendAssumeCapacity(final_name);
if (!self.builder.strip) {
self.names.appendAssumeCapacity(final_name);
if (block_instructions.items.len == 0 or
self.current_debug_location != self.last_debug_location)
{
self.debug_locations.putAssumeCapacity(index, self.current_debug_location);
self.last_debug_location = self.current_debug_location;
}
}
block_instructions.insertAssumeCapacity(self.cursor.instruction, index);
self.cursor.instruction += 1;
return index;
@@ -6901,7 +6522,7 @@ pub const WipFunction = struct {
Instruction.Alloca.Info,
Instruction.Call.Info,
=> @bitCast(value),
else => @compileError("bad field type: " ++ @typeName(field.type)),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
});
}
return result;
@@ -6949,7 +6570,7 @@ pub const WipFunction = struct {
Instruction.Alloca.Info,
Instruction.Call.Info,
=> @bitCast(value),
else => @compileError("bad field type: " ++ @typeName(field.type)),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
};
return .{
.data = result,
@@ -6977,24 +6598,6 @@ pub const FloatCondition = enum(u4) {
ult = 12,
ule = 13,
une = 14,
 
fn toLlvm(self: FloatCondition) llvm.RealPredicate {
return switch (self) {
.oeq => .OEQ,
.ogt => .OGT,
.oge => .OGE,
.olt => .OLT,
.ole => .OLE,
.one => .ONE,
.ord => .ORD,
.uno => .UNO,
.ueq => .UEQ,
.ugt => .UGT,
.uge => .UGE,
.ult => .ULT,
.uno => .UNE,
};
}
};
 
pub const IntegerCondition = enum(u6) {
@@ -7008,20 +6611,6 @@ pub const IntegerCondition = enum(u6) {
sge = 39,
slt = 40,
sle = 41,
 
fn toLlvm(self: IntegerCondition) llvm.IntPredicate {
return switch (self) {
.eq => .EQ,
.ne => .NE,
.ugt => .UGT,
.uge => .UGE,
.ult => .ULT,
.sgt => .SGT,
.sge => .SGE,
.slt => .SLT,
.sle => .SLE,
};
}
};
 
pub const MemoryAccessKind = enum(u1) {
@@ -7058,10 +6647,10 @@ pub const AtomicOrdering = enum(u3) {
none = 0,
unordered = 1,
monotonic = 2,
acquire = 4,
release = 5,
acq_rel = 6,
seq_cst = 7,
acquire = 3,
release = 4,
acq_rel = 5,
seq_cst = 6,
 
pub fn format(
self: AtomicOrdering,
@@ -7071,18 +6660,6 @@ pub const AtomicOrdering = enum(u3) {
) @TypeOf(writer).Error!void {
if (self != .none) try writer.print("{s}{s}", .{ prefix, @tagName(self) });
}
 
fn toLlvm(self: AtomicOrdering) llvm.AtomicOrdering {
return switch (self) {
.none => .NotAtomic,
.unordered => .Unordered,
.monotonic => .Monotonic,
.acquire => .Acquire,
.release => .Release,
.acq_rel => .AcquireRelease,
.seq_cst => .SequentiallyConsistent,
};
}
};
 
const MemoryAccessInfo = packed struct(u32) {
@@ -7095,7 +6672,8 @@ const MemoryAccessInfo = packed struct(u32) {
_: u13 = undefined,
};
 
pub const FastMath = packed struct(u32) {
pub const FastMath = packed struct(u8) {
unsafe_algebra: bool = false, // Legacy
nnan: bool = false,
ninf: bool = false,
nsz: bool = false,
@@ -7130,11 +6708,13 @@ pub const FastMathKind = enum {
pub const Constant = enum(u32) {
false,
true,
@"0",
@"1",
none,
no_init = 1 << 31,
no_init = (1 << 30) - 1,
_,
 
const first_global: Constant = @enumFromInt(1 << 30);
const first_global: Constant = @enumFromInt(1 << 29);
 
pub const Tag = enum(u7) {
positive_integer,
@@ -7152,7 +6732,6 @@ pub const Constant = enum(u32) {
packed_structure,
array,
string,
string_null,
vector,
splat,
zeroinitializer,
@@ -7212,6 +6791,49 @@ pub const Constant = enum(u32) {
@"asm sideeffect inteldialect unwind",
@"asm alignstack inteldialect unwind",
@"asm sideeffect alignstack inteldialect unwind",
 
pub fn toBinaryOpcode(self: Tag) BinaryOpcode {
return switch (self) {
.add,
.@"add nsw",
.@"add nuw",
=> .add,
.sub,
.@"sub nsw",
.@"sub nuw",
=> .sub,
.mul,
.@"mul nsw",
.@"mul nuw",
=> .mul,
.shl => .shl,
.lshr => .lshr,
.ashr => .ashr,
.@"and" => .@"and",
.@"or" => .@"or",
.xor => .xor,
else => unreachable,
};
}
 
pub fn toCastOpcode(self: Tag) CastOpcode {
return switch (self) {
.trunc => .trunc,
.zext => .zext,
.sext => .sext,
.fptoui => .fptoui,
.fptosi => .fptosi,
.uitofp => .uitofp,
.sitofp => .sitofp,
.fptrunc => .fptrunc,
.fpext => .fpext,
.ptrtoint => .ptrtoint,
.inttoptr => .inttoptr,
.bitcast => .bitcast,
.addrspacecast => .addrspacecast,
else => unreachable,
};
}
};
 
pub const Item = struct {
@@ -7364,11 +6986,8 @@ pub const Constant = enum(u32) {
.vector,
=> builder.constantExtraData(Aggregate, item.data).type,
.splat => builder.constantExtraData(Splat, item.data).type,
.string,
.string_null,
=> builder.arrayTypeAssumeCapacity(
@as(String, @enumFromInt(item.data)).slice(builder).?.len +
@intFromBool(item.tag == .string_null),
.string => builder.arrayTypeAssumeCapacity(
@as(String, @enumFromInt(item.data)).slice(builder).?.len,
.i8,
),
.blockaddress => builder.ptrTypeAssumeCapacity(
@@ -7574,7 +7193,7 @@ pub const Constant = enum(u32) {
@ptrCast(data.builder.constant_limbs.items[item.data..][0..Integer.limbs]);
const limbs = data.builder.constant_limbs
.items[item.data + Integer.limbs ..][0..extra.limbs_len];
const bigint = std.math.big.int.Const{
const bigint: std.math.big.int.Const = .{
.limbs = limbs,
.positive = tag == .positive_integer,
};
@@ -7616,17 +7235,31 @@ pub const Constant = enum(u32) {
};
}
};
const Mantissa64 = std.meta.FieldType(Float.Repr(f64), .mantissa);
const Exponent32 = std.meta.FieldType(Float.Repr(f32), .exponent);
const Exponent64 = std.meta.FieldType(Float.Repr(f64), .exponent);
 
const repr: Float.Repr(f32) = @bitCast(item.data);
const denormal_shift = switch (repr.exponent) {
std.math.minInt(Exponent32) => @as(
std.math.Log2Int(Mantissa64),
@clz(repr.mantissa),
) + 1,
else => 0,
};
try writer.print("0x{X:0>16}", .{@as(u64, @bitCast(Float.Repr(f64){
.mantissa = std.math.shl(
std.meta.FieldType(Float.Repr(f64), .mantissa),
Mantissa64,
repr.mantissa,
std.math.floatMantissaBits(f64) - std.math.floatMantissaBits(f32),
std.math.floatMantissaBits(f64) - std.math.floatMantissaBits(f32) +
denormal_shift,
),
.exponent = switch (repr.exponent) {
std.math.minInt(Exponent32) => std.math.minInt(Exponent64),
std.math.minInt(Exponent32) => if (repr.mantissa > 0)
@as(Exponent64, std.math.floatExponentMin(f32) +
std.math.floatExponentMax(f64)) - denormal_shift
else
std.math.minInt(Exponent64),
else => @as(Exponent64, repr.exponent) +
(std.math.floatExponentMax(f64) - std.math.floatExponentMax(f32)),
std.math.maxInt(Exponent32) => std.math.maxInt(Exponent64),
@@ -7703,13 +7336,9 @@ pub const Constant = enum(u32) {
}
try writer.writeByte('>');
},
inline .string,
.string_null,
=> |tag| try writer.print("c{\"" ++ switch (tag) {
.string => "",
.string_null => "@",
else => unreachable,
} ++ "}", .{@as(String, @enumFromInt(item.data)).fmt(data.builder)}),
.string => try writer.print("c{\"}", .{
@as(String, @enumFromInt(item.data)).fmt(data.builder),
}),
.blockaddress => |tag| {
const extra = data.builder.constantExtraData(BlockAddress, item.data);
const function = extra.function.ptrConst(data.builder);
@@ -7859,40 +7488,37 @@ pub const Constant = enum(u32) {
pub fn fmt(self: Constant, builder: *Builder) std.fmt.Formatter(format) {
return .{ .data = .{ .constant = self, .builder = builder } };
}
 
pub fn toLlvm(self: Constant, builder: *const Builder) *llvm.Value {
assert(builder.useLibLlvm());
const llvm_value = switch (self.unwrap()) {
.constant => |constant| builder.llvm.constants.items[constant],
.global => |global| return global.toLlvm(builder),
};
const global = builder.llvm.replacements.get(llvm_value) orelse return llvm_value;
return global.toLlvm(builder);
}
};
 
pub const Value = enum(u32) {
none = std.math.maxInt(u31),
false = first_constant + @intFromEnum(Constant.false),
true = first_constant + @intFromEnum(Constant.true),
@"0" = first_constant + @intFromEnum(Constant.@"0"),
@"1" = first_constant + @intFromEnum(Constant.@"1"),
_,
 
const first_constant = 1 << 31;
const first_constant = 1 << 30;
const first_metadata = 1 << 31;
 
pub fn unwrap(self: Value) union(enum) {
instruction: Function.Instruction.Index,
constant: Constant,
metadata: Metadata,
} {
return if (@intFromEnum(self) < first_constant)
.{ .instruction = @enumFromInt(@intFromEnum(self)) }
else if (@intFromEnum(self) < first_metadata)
.{ .constant = @enumFromInt(@intFromEnum(self) - first_constant) }
else
.{ .constant = @enumFromInt(@intFromEnum(self) - first_constant) };
.{ .metadata = @enumFromInt(@intFromEnum(self) - first_metadata) };
}
 
pub fn typeOfWip(self: Value, wip: *const WipFunction) Type {
return switch (self.unwrap()) {
.instruction => |instruction| instruction.typeOfWip(wip),
.constant => |constant| constant.typeOf(wip.builder),
.metadata => .metadata,
};
}
 
@@ -7900,12 +7526,13 @@ pub const Value = enum(u32) {
return switch (self.unwrap()) {
.instruction => |instruction| instruction.typeOf(function, builder),
.constant => |constant| constant.typeOf(builder),
.metadata => .metadata,
};
}
 
pub fn toConst(self: Value) ?Constant {
return switch (self.unwrap()) {
.instruction => null,
.instruction, .metadata => null,
.constant => |constant| constant,
};
}
@@ -7931,34 +7558,714 @@ pub const Value = enum(u32) {
.constant = constant,
.builder = data.builder,
}, fmt_str, fmt_opts, writer),
.metadata => unreachable,
}
}
pub fn fmt(self: Value, function: Function.Index, builder: *Builder) std.fmt.Formatter(format) {
return .{ .data = .{ .value = self, .function = function, .builder = builder } };
}
};
 
pub fn toLlvm(self: Value, wip: *const WipFunction) *llvm.Value {
return switch (self.unwrap()) {
.instruction => |instruction| instruction.toLlvm(wip),
.constant => |constant| constant.toLlvm(wip.builder),
};
pub const MetadataString = enum(u32) {
none = 0,
_,
 
pub fn slice(self: MetadataString, builder: *const Builder) []const u8 {
const index = @intFromEnum(self);
const start = builder.metadata_string_indices.items[index];
const end = builder.metadata_string_indices.items[index + 1];
return builder.metadata_string_bytes.items[start..end];
}
 
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: Adapter, key: []const u8) u32 {
return @truncate(std.hash.Wyhash.hash(0, key));
}
pub fn eql(ctx: Adapter, lhs_key: []const u8, _: void, rhs_index: usize) bool {
const rhs_metadata_string: MetadataString = @enumFromInt(rhs_index);
return std.mem.eql(u8, lhs_key, rhs_metadata_string.slice(ctx.builder));
}
};
 
const FormatData = struct {
metadata_string: MetadataString,
builder: *const Builder,
};
fn format(
data: FormatData,
comptime _: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
try printEscapedString(data.metadata_string.slice(data.builder), .always_quote, writer);
}
fn fmt(self: MetadataString, builder: *const Builder) std.fmt.Formatter(format) {
return .{ .data = .{ .metadata_string = self, .builder = builder } };
}
};
 
pub const Metadata = enum(u32) { _ };
pub const Metadata = enum(u32) {
none = 0,
_,
 
pub const InitError = error{
InvalidLlvmTriple,
} || Allocator.Error;
const first_forward_reference = 1 << 29;
const first_local_metadata = 1 << 30;
 
pub fn init(options: Options) InitError!Builder {
pub const Tag = enum(u6) {
none,
file,
compile_unit,
@"compile_unit optimized",
subprogram,
@"subprogram local",
@"subprogram definition",
@"subprogram local definition",
@"subprogram optimized",
@"subprogram optimized local",
@"subprogram optimized definition",
@"subprogram optimized local definition",
lexical_block,
location,
basic_bool_type,
basic_unsigned_type,
basic_signed_type,
basic_float_type,
composite_struct_type,
composite_union_type,
composite_enumeration_type,
composite_array_type,
composite_vector_type,
derived_pointer_type,
derived_member_type,
subroutine_type,
enumerator_unsigned,
enumerator_signed_positive,
enumerator_signed_negative,
subrange,
tuple,
module_flag,
expression,
local_var,
parameter,
global_var,
@"global_var local",
global_var_expression,
constant,
 
pub fn isInline(tag: Tag) bool {
return switch (tag) {
.none,
.expression,
.constant,
=> true,
.file,
.compile_unit,
.@"compile_unit optimized",
.subprogram,
.@"subprogram local",
.@"subprogram definition",
.@"subprogram local definition",
.@"subprogram optimized",
.@"subprogram optimized local",
.@"subprogram optimized definition",
.@"subprogram optimized local definition",
.lexical_block,
.location,
.basic_bool_type,
.basic_unsigned_type,
.basic_signed_type,
.basic_float_type,
.composite_struct_type,
.composite_union_type,
.composite_enumeration_type,
.composite_array_type,
.composite_vector_type,
.derived_pointer_type,
.derived_member_type,
.subroutine_type,
.enumerator_unsigned,
.enumerator_signed_positive,
.enumerator_signed_negative,
.subrange,
.tuple,
.module_flag,
.local_var,
.parameter,
.global_var,
.@"global_var local",
.global_var_expression,
=> false,
};
}
};
 
pub fn isInline(self: Metadata, builder: *const Builder) bool {
return builder.metadata_items.items(.tag)[@intFromEnum(self)].isInline();
}
 
pub fn unwrap(self: Metadata, builder: *const Builder) Metadata {
var metadata = self;
while (@intFromEnum(metadata) >= Metadata.first_forward_reference and
@intFromEnum(metadata) < Metadata.first_local_metadata)
{
const index = @intFromEnum(metadata) - Metadata.first_forward_reference;
metadata = builder.metadata_forward_references.items[index];
assert(metadata != .none);
}
return metadata;
}
 
pub const Item = struct {
tag: Tag,
data: ExtraIndex,
 
const ExtraIndex = u32;
};
 
pub const DIFlags = packed struct(u32) {
Visibility: enum(u2) { Zero, Private, Protected, Public } = .Zero,
FwdDecl: bool = false,
AppleBlock: bool = false,
ReservedBit4: u1 = 0,
Virtual: bool = false,
Artificial: bool = false,
Explicit: bool = false,
Prototyped: bool = false,
ObjcClassComplete: bool = false,
ObjectPointer: bool = false,
Vector: bool = false,
StaticMember: bool = false,
LValueReference: bool = false,
RValueReference: bool = false,
ExportSymbols: bool = false,
Inheritance: enum(u2) {
Zero,
SingleInheritance,
MultipleInheritance,
VirtualInheritance,
} = .Zero,
IntroducedVirtual: bool = false,
BitField: bool = false,
NoReturn: bool = false,
ReservedBit21: u1 = 0,
TypePassbyValue: bool = false,
TypePassbyReference: bool = false,
EnumClass: bool = false,
Thunk: bool = false,
NonTrivial: bool = false,
BigEndian: bool = false,
LittleEndian: bool = false,
AllCallsDescribed: bool = false,
Unused: u2 = 0,
 
pub fn format(
self: DIFlags,
comptime _: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
var need_pipe = false;
inline for (@typeInfo(DIFlags).Struct.fields) |field| {
switch (@typeInfo(field.type)) {
.Bool => if (@field(self, field.name)) {
if (need_pipe) try writer.writeAll(" | ") else need_pipe = true;
try writer.print("DIFlag{s}", .{field.name});
},
.Enum => if (@field(self, field.name) != .Zero) {
if (need_pipe) try writer.writeAll(" | ") else need_pipe = true;
try writer.print("DIFlag{s}", .{@tagName(@field(self, field.name))});
},
.Int => assert(@field(self, field.name) == 0),
else => @compileError("bad field type: " ++ field.name ++ ": " ++
@typeName(field.type)),
}
}
if (!need_pipe) try writer.writeByte('0');
}
};
 
pub const File = struct {
filename: MetadataString,
directory: MetadataString,
};
 
pub const CompileUnit = struct {
pub const Options = struct {
optimized: bool,
};
 
file: Metadata,
producer: MetadataString,
enums: Metadata,
globals: Metadata,
};
 
pub const Subprogram = struct {
pub const Options = struct {
di_flags: DIFlags,
sp_flags: DISPFlags,
};
 
pub const DISPFlags = packed struct(u32) {
Virtuality: enum(u2) { Zero, Virtual, PureVirtual } = .Zero,
LocalToUnit: bool = false,
Definition: bool = false,
Optimized: bool = false,
Pure: bool = false,
Elemental: bool = false,
Recursive: bool = false,
MainSubprogram: bool = false,
Deleted: bool = false,
ReservedBit10: u1 = 0,
ObjCDirect: bool = false,
Unused: u20 = 0,
 
pub fn format(
self: DISPFlags,
comptime _: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
var need_pipe = false;
inline for (@typeInfo(DISPFlags).Struct.fields) |field| {
switch (@typeInfo(field.type)) {
.Bool => if (@field(self, field.name)) {
if (need_pipe) try writer.writeAll(" | ") else need_pipe = true;
try writer.print("DISPFlag{s}", .{field.name});
},
.Enum => if (@field(self, field.name) != .Zero) {
if (need_pipe) try writer.writeAll(" | ") else need_pipe = true;
try writer.print("DISPFlag{s}", .{@tagName(@field(self, field.name))});
},
.Int => assert(@field(self, field.name) == 0),
else => @compileError("bad field type: " ++ field.name ++ ": " ++
@typeName(field.type)),
}
}
if (!need_pipe) try writer.writeByte('0');
}
};
 
file: Metadata,
name: MetadataString,
linkage_name: MetadataString,
line: u32,
scope_line: u32,
ty: Metadata,
di_flags: DIFlags,
compile_unit: Metadata,
};
 
pub const LexicalBlock = struct {
scope: Metadata,
file: Metadata,
line: u32,
column: u32,
};
 
pub const Location = struct {
line: u32,
column: u32,
scope: Metadata,
inlined_at: Metadata,
};
 
pub const BasicType = struct {
name: MetadataString,
size_in_bits_lo: u32,
size_in_bits_hi: u32,
 
pub fn bitSize(self: BasicType) u64 {
return @as(u64, self.size_in_bits_hi) << 32 | self.size_in_bits_lo;
}
};
 
pub const CompositeType = struct {
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits_lo: u32,
size_in_bits_hi: u32,
align_in_bits_lo: u32,
align_in_bits_hi: u32,
fields_tuple: Metadata,
 
pub fn bitSize(self: CompositeType) u64 {
return @as(u64, self.size_in_bits_hi) << 32 | self.size_in_bits_lo;
}
pub fn bitAlign(self: CompositeType) u64 {
return @as(u64, self.align_in_bits_hi) << 32 | self.align_in_bits_lo;
}
};
 
pub const DerivedType = struct {
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits_lo: u32,
size_in_bits_hi: u32,
align_in_bits_lo: u32,
align_in_bits_hi: u32,
offset_in_bits_lo: u32,
offset_in_bits_hi: u32,
 
pub fn bitSize(self: DerivedType) u64 {
return @as(u64, self.size_in_bits_hi) << 32 | self.size_in_bits_lo;
}
pub fn bitAlign(self: DerivedType) u64 {
return @as(u64, self.align_in_bits_hi) << 32 | self.align_in_bits_lo;
}
pub fn bitOffset(self: DerivedType) u64 {
return @as(u64, self.offset_in_bits_hi) << 32 | self.offset_in_bits_lo;
}
};
 
pub const SubroutineType = struct {
types_tuple: Metadata,
};
 
pub const Enumerator = struct {
name: MetadataString,
bit_width: u32,
limbs_index: u32,
limbs_len: u32,
};
 
pub const Subrange = struct {
lower_bound: Metadata,
count: Metadata,
};
 
pub const Expression = struct {
elements_len: u32,
 
// elements: [elements_len]u32
};
 
pub const Tuple = struct {
elements_len: u32,
 
// elements: [elements_len]Metadata
};
 
pub const ModuleFlag = struct {
behavior: Metadata,
name: MetadataString,
constant: Metadata,
};
 
pub const LocalVar = struct {
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
ty: Metadata,
};
 
pub const Parameter = struct {
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
ty: Metadata,
arg_no: u32,
};
 
pub const GlobalVar = struct {
pub const Options = struct {
local: bool,
};
 
name: MetadataString,
linkage_name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
ty: Metadata,
variable: Variable.Index,
};
 
pub const GlobalVarExpression = struct {
variable: Metadata,
expression: Metadata,
};
 
pub fn toValue(self: Metadata) Value {
return @enumFromInt(Value.first_metadata + @intFromEnum(self));
}
 
const Formatter = struct {
builder: *Builder,
need_comma: bool,
map: std.AutoArrayHashMapUnmanaged(Metadata, void) = .{},
 
const FormatData = struct {
formatter: *Formatter,
prefix: []const u8 = "",
node: Node,
 
const Node = union(enum) {
none,
@"inline": Metadata,
index: u32,
 
local_value: ValueData,
local_metadata: ValueData,
local_inline: Metadata,
local_index: u32,
 
string: MetadataString,
bool: bool,
u32: u32,
u64: u64,
di_flags: DIFlags,
sp_flags: Subprogram.DISPFlags,
raw: []const u8,
 
const ValueData = struct {
value: Value,
function: Function.Index,
};
};
};
fn format(
data: FormatData,
comptime fmt_str: []const u8,
fmt_opts: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
if (data.node == .none) return;
 
const is_specialized = fmt_str.len > 0 and fmt_str[0] == 'S';
const recurse_fmt_str = if (is_specialized) fmt_str[1..] else fmt_str;
 
if (data.formatter.need_comma) try writer.writeAll(", ");
defer data.formatter.need_comma = true;
try writer.writeAll(data.prefix);
 
const builder = data.formatter.builder;
switch (data.node) {
.none => unreachable,
.@"inline" => |node| {
const needed_comma = data.formatter.need_comma;
defer data.formatter.need_comma = needed_comma;
data.formatter.need_comma = false;
 
const item = builder.metadata_items.get(@intFromEnum(node));
switch (item.tag) {
.expression => {
var extra = builder.metadataExtraDataTrail(Expression, item.data);
const elements = extra.trail.next(extra.data.elements_len, u32, builder);
try writer.writeAll("!DIExpression(");
for (elements) |element| try format(.{
.formatter = data.formatter,
.node = .{ .u64 = element },
}, "%", fmt_opts, writer);
try writer.writeByte(')');
},
.constant => try Constant.format(.{
.constant = @enumFromInt(item.data),
.builder = builder,
}, recurse_fmt_str, fmt_opts, writer),
else => unreachable,
}
},
.index => |node| try writer.print("!{d}", .{node}),
inline .local_value, .local_metadata => |node, tag| try Value.format(.{
.value = node.value,
.function = node.function,
.builder = builder,
}, switch (tag) {
.local_value => recurse_fmt_str,
.local_metadata => "%",
else => unreachable,
}, fmt_opts, writer),
inline .local_inline, .local_index => |node, tag| {
if (comptime std.mem.eql(u8, recurse_fmt_str, "%"))
try writer.print("{%} ", .{Type.metadata.fmt(builder)});
try format(.{
.formatter = data.formatter,
.node = @unionInit(FormatData.Node, @tagName(tag)["local_".len..], node),
}, "%", fmt_opts, writer);
},
.string => |node| try writer.print((if (is_specialized) "" else "!") ++ "{}", .{
node.fmt(builder),
}),
inline .bool,
.u32,
.u64,
.di_flags,
.sp_flags,
=> |node| try writer.print("{}", .{node}),
.raw => |node| try writer.writeAll(node),
}
}
inline fn fmt(formatter: *Formatter, prefix: []const u8, node: anytype) switch (@TypeOf(node)) {
Metadata => Allocator.Error,
else => error{},
}!std.fmt.Formatter(format) {
const Node = @TypeOf(node);
const MaybeNode = switch (@typeInfo(Node)) {
.Optional => Node,
.Null => ?noreturn,
else => ?Node,
};
const Some = @typeInfo(MaybeNode).Optional.child;
return .{ .data = .{
.formatter = formatter,
.prefix = prefix,
.node = if (@as(MaybeNode, node)) |some| switch (@typeInfo(Some)) {
.Enum => |enum_info| switch (Some) {
Metadata => switch (some) {
.none => .none,
else => try formatter.refUnwrapped(some.unwrap(formatter.builder)),
},
MetadataString => .{ .string = some },
else => if (enum_info.is_exhaustive)
.{ .raw = @tagName(some) }
else
@compileError("unknown type to format: " ++ @typeName(Node)),
},
.EnumLiteral => .{ .raw = @tagName(some) },
.Bool => .{ .bool = some },
.Struct => switch (Some) {
DIFlags => .{ .di_flags = some },
Subprogram.DISPFlags => .{ .sp_flags = some },
else => @compileError("unknown type to format: " ++ @typeName(Node)),
},
.Int, .ComptimeInt => .{ .u64 = some },
.Pointer => .{ .raw = some },
else => @compileError("unknown type to format: " ++ @typeName(Node)),
} else switch (@typeInfo(Node)) {
.Optional, .Null => .none,
else => unreachable,
},
} };
}
inline fn fmtLocal(
formatter: *Formatter,
prefix: []const u8,
value: Value,
function: Function.Index,
) Allocator.Error!std.fmt.Formatter(format) {
return .{ .data = .{
.formatter = formatter,
.prefix = prefix,
.node = switch (value.unwrap()) {
.instruction, .constant => .{ .local_value = .{
.value = value,
.function = function,
} },
.metadata => |metadata| if (value == .none) .none else node: {
const unwrapped = metadata.unwrap(formatter.builder);
break :node if (@intFromEnum(unwrapped) >= first_local_metadata)
.{ .local_metadata = .{
.value = function.ptrConst(formatter.builder).debug_values[
@intFromEnum(unwrapped) - first_local_metadata
].toValue(),
.function = function,
} }
else switch (try formatter.refUnwrapped(unwrapped)) {
.@"inline" => |node| .{ .local_inline = node },
.index => |node| .{ .local_index = node },
else => unreachable,
};
},
},
} };
}
fn refUnwrapped(formatter: *Formatter, node: Metadata) Allocator.Error!FormatData.Node {
assert(node != .none);
assert(@intFromEnum(node) < first_forward_reference);
const builder = formatter.builder;
const unwrapped_metadata = node.unwrap(builder);
const tag = formatter.builder.metadata_items.items(.tag)[@intFromEnum(unwrapped_metadata)];
switch (tag) {
.none => unreachable,
.expression, .constant => return .{ .@"inline" = unwrapped_metadata },
else => {
assert(!tag.isInline());
const gop = try formatter.map.getOrPutValue(builder.gpa, unwrapped_metadata, {});
return .{ .index = @intCast(gop.index) };
},
}
}
 
inline fn specialized(
formatter: *Formatter,
distinct: enum { @"!", @"distinct !" },
node: enum {
DIFile,
DICompileUnit,
DISubprogram,
DILexicalBlock,
DILocation,
DIBasicType,
DICompositeType,
DIDerivedType,
DISubroutineType,
DIEnumerator,
DISubrange,
DILocalVariable,
DIGlobalVariable,
DIGlobalVariableExpression,
},
nodes: anytype,
writer: anytype,
) !void {
comptime var fmt_str: []const u8 = "";
const names = comptime std.meta.fieldNames(@TypeOf(nodes));
comptime var fields: [2 + names.len]std.builtin.Type.StructField = undefined;
inline for (fields[0..2], .{ "distinct", "node" }) |*field, name| {
fmt_str = fmt_str ++ "{[" ++ name ++ "]s}";
field.* = .{
.name = name,
.type = []const u8,
.default_value = null,
.is_comptime = false,
.alignment = 0,
};
}
fmt_str = fmt_str ++ "(";
inline for (fields[2..], names) |*field, name| {
fmt_str = fmt_str ++ "{[" ++ name ++ "]S}";
field.* = .{
.name = name,
.type = std.fmt.Formatter(format),
.default_value = null,
.is_comptime = false,
.alignment = 0,
};
}
fmt_str = fmt_str ++ ")\n";
 
var fmt_args: @Type(.{ .Struct = .{
.layout = .Auto,
.fields = &fields,
.decls = &.{},
.is_tuple = false,
} }) = undefined;
fmt_args.distinct = @tagName(distinct);
fmt_args.node = @tagName(node);
inline for (names) |name| @field(fmt_args, name) = try formatter.fmt(
name ++ ": ",
@field(nodes, name),
);
try writer.print(fmt_str, fmt_args);
}
};
};
 
pub fn init(options: Options) Allocator.Error!Builder {
var self = Builder{
.gpa = options.allocator,
.use_lib_llvm = options.use_lib_llvm,
.strip = options.strip,
 
.llvm = undefined,
 
.source_filename = .none,
.data_layout = .none,
.target_triple = .none,
@@ -7980,6 +8287,8 @@ pub fn init(options: Options) InitError!Builder {
.attributes_indices = .{},
.attributes_extra = .{},
 
.function_attributes_set = .{},
 
.globals = .{},
.next_unnamed_global = @enumFromInt(0),
.next_replaced_global = .none,
@@ -7992,19 +8301,16 @@ pub fn init(options: Options) InitError!Builder {
.constant_items = .{},
.constant_extra = .{},
.constant_limbs = .{},
};
if (self.useLibLlvm()) self.llvm = .{
.context = llvm.Context.create(),
.module = null,
.target = null,
.di_builder = null,
.di_compile_unit = null,
.attribute_kind_ids = null,
.attributes = .{},
.types = .{},
.globals = .{},
.constants = .{},
.replacements = .{},
 
.metadata_map = .{},
.metadata_items = .{},
.metadata_extra = .{},
.metadata_limbs = .{},
.metadata_forward_references = .{},
.metadata_named = .{},
.metadata_string_map = .{},
.metadata_string_indices = .{},
.metadata_string_bytes = .{},
};
errdefer self.deinit();
 
@@ -8012,51 +8318,20 @@ pub fn init(options: Options) InitError!Builder {
assert(try self.string("") == .empty);
 
if (options.name.len > 0) self.source_filename = try self.string(options.name);
if (self.useLibLlvm()) {
initializeLLVMTarget(options.target.cpu.arch);
self.llvm.module = llvm.Module.createWithName(
(self.source_filename.slice(&self) orelse ""),
self.llvm.context,
);
}
 
if (options.triple.len > 0) {
self.target_triple = try self.string(options.triple);
 
if (self.useLibLlvm()) {
var error_message: [*:0]const u8 = undefined;
var target: *llvm.Target = undefined;
if (llvm.Target.getFromTriple(
self.target_triple.slice(&self).?,
&target,
&error_message,
).toBool()) {
defer llvm.disposeMessage(error_message);
 
log.err("LLVM failed to parse '{s}': {s}", .{
self.target_triple.slice(&self).?,
error_message,
});
return InitError.InvalidLlvmTriple;
}
self.llvm.target = target;
self.llvm.module.?.setTarget(self.target_triple.slice(&self).?);
}
}
 
{
const static_len = @typeInfo(Type).Enum.fields.len - 1;
try self.type_map.ensureTotalCapacity(self.gpa, static_len);
try self.type_items.ensureTotalCapacity(self.gpa, static_len);
if (self.useLibLlvm()) try self.llvm.types.ensureTotalCapacity(self.gpa, static_len);
inline for (@typeInfo(Type.Simple).Enum.fields) |simple_field| {
const result = self.getOrPutTypeNoExtraAssumeCapacity(
.{ .tag = .simple, .data = simple_field.value },
);
assert(result.new and result.type == @field(Type, simple_field.name));
if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(
@field(llvm.Context, simple_field.name ++ "Type")(self.llvm.context),
);
}
inline for (.{ 1, 8, 16, 29, 32, 64, 80, 128 }) |bits|
assert(self.intTypeAssumeCapacity(bits) ==
@@ -8069,10 +8344,6 @@ pub fn init(options: Options) InitError!Builder {
}
 
{
if (self.useLibLlvm()) {
self.llvm.attribute_kind_ids = try self.gpa.create([Attribute.Kind.len]c_uint);
@memset(self.llvm.attribute_kind_ids.?, 0);
}
try self.attributes_indices.append(self.gpa, 0);
assert(try self.attrs(&.{}) == .none);
assert(try self.fnAttrs(&.{}) == .none);
@@ -8080,26 +8351,18 @@ pub fn init(options: Options) InitError!Builder {
 
assert(try self.intConst(.i1, 0) == .false);
assert(try self.intConst(.i1, 1) == .true);
assert(try self.intConst(.i32, 0) == .@"0");
assert(try self.intConst(.i32, 1) == .@"1");
assert(try self.noneConst(.token) == .none);
if (!self.strip) assert(try self.debugNone() == .none);
 
try self.metadata_string_indices.append(self.gpa, 0);
assert(try self.metadataString("") == .none);
 
return self;
}
 
pub fn deinit(self: *Builder) void {
if (self.useLibLlvm()) {
var replacement_it = self.llvm.replacements.keyIterator();
while (replacement_it.next()) |replacement| replacement.*.deleteGlobalValue();
self.llvm.replacements.deinit(self.gpa);
self.llvm.constants.deinit(self.gpa);
self.llvm.globals.deinit(self.gpa);
self.llvm.types.deinit(self.gpa);
self.llvm.attributes.deinit(self.gpa);
if (self.llvm.attribute_kind_ids) |attribute_kind_ids| self.gpa.destroy(attribute_kind_ids);
if (self.llvm.di_builder) |di_builder| di_builder.dispose();
if (self.llvm.module) |module| module.dispose();
self.llvm.context.dispose();
}
 
self.module_asm.deinit(self.gpa);
 
self.string_map.deinit(self.gpa);
@@ -8117,6 +8380,8 @@ pub fn deinit(self: *Builder) void {
self.attributes_indices.deinit(self.gpa);
self.attributes_extra.deinit(self.gpa);
 
self.function_attributes_set.deinit(self.gpa);
 
self.globals.deinit(self.gpa);
self.next_unique_global_id.deinit(self.gpa);
self.aliases.deinit(self.gpa);
@@ -8129,201 +8394,20 @@ pub fn deinit(self: *Builder) void {
self.constant_extra.deinit(self.gpa);
self.constant_limbs.deinit(self.gpa);
 
self.metadata_map.deinit(self.gpa);
self.metadata_items.deinit(self.gpa);
self.metadata_extra.deinit(self.gpa);
self.metadata_limbs.deinit(self.gpa);
self.metadata_forward_references.deinit(self.gpa);
self.metadata_named.deinit(self.gpa);
 
self.metadata_string_map.deinit(self.gpa);
self.metadata_string_indices.deinit(self.gpa);
self.metadata_string_bytes.deinit(self.gpa);
 
self.* = undefined;
}
 
pub fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
switch (arch) {
.aarch64, .aarch64_be, .aarch64_32 => {
llvm.LLVMInitializeAArch64Target();
llvm.LLVMInitializeAArch64TargetInfo();
llvm.LLVMInitializeAArch64TargetMC();
llvm.LLVMInitializeAArch64AsmPrinter();
llvm.LLVMInitializeAArch64AsmParser();
},
.amdgcn => {
llvm.LLVMInitializeAMDGPUTarget();
llvm.LLVMInitializeAMDGPUTargetInfo();
llvm.LLVMInitializeAMDGPUTargetMC();
llvm.LLVMInitializeAMDGPUAsmPrinter();
llvm.LLVMInitializeAMDGPUAsmParser();
},
.thumb, .thumbeb, .arm, .armeb => {
llvm.LLVMInitializeARMTarget();
llvm.LLVMInitializeARMTargetInfo();
llvm.LLVMInitializeARMTargetMC();
llvm.LLVMInitializeARMAsmPrinter();
llvm.LLVMInitializeARMAsmParser();
},
.avr => {
llvm.LLVMInitializeAVRTarget();
llvm.LLVMInitializeAVRTargetInfo();
llvm.LLVMInitializeAVRTargetMC();
llvm.LLVMInitializeAVRAsmPrinter();
llvm.LLVMInitializeAVRAsmParser();
},
.bpfel, .bpfeb => {
llvm.LLVMInitializeBPFTarget();
llvm.LLVMInitializeBPFTargetInfo();
llvm.LLVMInitializeBPFTargetMC();
llvm.LLVMInitializeBPFAsmPrinter();
llvm.LLVMInitializeBPFAsmParser();
},
.hexagon => {
llvm.LLVMInitializeHexagonTarget();
llvm.LLVMInitializeHexagonTargetInfo();
llvm.LLVMInitializeHexagonTargetMC();
llvm.LLVMInitializeHexagonAsmPrinter();
llvm.LLVMInitializeHexagonAsmParser();
},
.lanai => {
llvm.LLVMInitializeLanaiTarget();
llvm.LLVMInitializeLanaiTargetInfo();
llvm.LLVMInitializeLanaiTargetMC();
llvm.LLVMInitializeLanaiAsmPrinter();
llvm.LLVMInitializeLanaiAsmParser();
},
.mips, .mipsel, .mips64, .mips64el => {
llvm.LLVMInitializeMipsTarget();
llvm.LLVMInitializeMipsTargetInfo();
llvm.LLVMInitializeMipsTargetMC();
llvm.LLVMInitializeMipsAsmPrinter();
llvm.LLVMInitializeMipsAsmParser();
},
.msp430 => {
llvm.LLVMInitializeMSP430Target();
llvm.LLVMInitializeMSP430TargetInfo();
llvm.LLVMInitializeMSP430TargetMC();
llvm.LLVMInitializeMSP430AsmPrinter();
llvm.LLVMInitializeMSP430AsmParser();
},
.nvptx, .nvptx64 => {
llvm.LLVMInitializeNVPTXTarget();
llvm.LLVMInitializeNVPTXTargetInfo();
llvm.LLVMInitializeNVPTXTargetMC();
llvm.LLVMInitializeNVPTXAsmPrinter();
// There is no LLVMInitializeNVPTXAsmParser function available.
},
.powerpc, .powerpcle, .powerpc64, .powerpc64le => {
llvm.LLVMInitializePowerPCTarget();
llvm.LLVMInitializePowerPCTargetInfo();
llvm.LLVMInitializePowerPCTargetMC();
llvm.LLVMInitializePowerPCAsmPrinter();
llvm.LLVMInitializePowerPCAsmParser();
},
.riscv32, .riscv64 => {
llvm.LLVMInitializeRISCVTarget();
llvm.LLVMInitializeRISCVTargetInfo();
llvm.LLVMInitializeRISCVTargetMC();
llvm.LLVMInitializeRISCVAsmPrinter();
llvm.LLVMInitializeRISCVAsmParser();
},
.sparc, .sparc64, .sparcel => {
llvm.LLVMInitializeSparcTarget();
llvm.LLVMInitializeSparcTargetInfo();
llvm.LLVMInitializeSparcTargetMC();
llvm.LLVMInitializeSparcAsmPrinter();
llvm.LLVMInitializeSparcAsmParser();
},
.s390x => {
llvm.LLVMInitializeSystemZTarget();
llvm.LLVMInitializeSystemZTargetInfo();
llvm.LLVMInitializeSystemZTargetMC();
llvm.LLVMInitializeSystemZAsmPrinter();
llvm.LLVMInitializeSystemZAsmParser();
},
.wasm32, .wasm64 => {
llvm.LLVMInitializeWebAssemblyTarget();
llvm.LLVMInitializeWebAssemblyTargetInfo();
llvm.LLVMInitializeWebAssemblyTargetMC();
llvm.LLVMInitializeWebAssemblyAsmPrinter();
llvm.LLVMInitializeWebAssemblyAsmParser();
},
.x86, .x86_64 => {
llvm.LLVMInitializeX86Target();
llvm.LLVMInitializeX86TargetInfo();
llvm.LLVMInitializeX86TargetMC();
llvm.LLVMInitializeX86AsmPrinter();
llvm.LLVMInitializeX86AsmParser();
},
.xtensa => {
if (build_options.llvm_has_xtensa) {
llvm.LLVMInitializeXtensaTarget();
llvm.LLVMInitializeXtensaTargetInfo();
llvm.LLVMInitializeXtensaTargetMC();
// There is no LLVMInitializeXtensaAsmPrinter function.
llvm.LLVMInitializeXtensaAsmParser();
}
},
.xcore => {
llvm.LLVMInitializeXCoreTarget();
llvm.LLVMInitializeXCoreTargetInfo();
llvm.LLVMInitializeXCoreTargetMC();
llvm.LLVMInitializeXCoreAsmPrinter();
// There is no LLVMInitializeXCoreAsmParser function.
},
.m68k => {
if (build_options.llvm_has_m68k) {
llvm.LLVMInitializeM68kTarget();
llvm.LLVMInitializeM68kTargetInfo();
llvm.LLVMInitializeM68kTargetMC();
llvm.LLVMInitializeM68kAsmPrinter();
llvm.LLVMInitializeM68kAsmParser();
}
},
.csky => {
if (build_options.llvm_has_csky) {
llvm.LLVMInitializeCSKYTarget();
llvm.LLVMInitializeCSKYTargetInfo();
llvm.LLVMInitializeCSKYTargetMC();
// There is no LLVMInitializeCSKYAsmPrinter function.
llvm.LLVMInitializeCSKYAsmParser();
}
},
.ve => {
llvm.LLVMInitializeVETarget();
llvm.LLVMInitializeVETargetInfo();
llvm.LLVMInitializeVETargetMC();
llvm.LLVMInitializeVEAsmPrinter();
llvm.LLVMInitializeVEAsmParser();
},
.arc => {
if (build_options.llvm_has_arc) {
llvm.LLVMInitializeARCTarget();
llvm.LLVMInitializeARCTargetInfo();
llvm.LLVMInitializeARCTargetMC();
llvm.LLVMInitializeARCAsmPrinter();
// There is no LLVMInitializeARCAsmParser function.
}
},
 
// LLVM backends that have no initialization functions.
.tce,
.tcele,
.r600,
.le32,
.le64,
.amdil,
.amdil64,
.hsail,
.hsail64,
.shave,
.spir,
.spir64,
.kalimba,
.renderscript32,
.renderscript64,
.dxil,
.loongarch32,
.loongarch64,
=> {},
 
.spu_2 => unreachable, // LLVM does not support this backend
.spirv32 => unreachable, // LLVM does not support this backend
.spirv64 => unreachable, // LLVM does not support this backend
}
}
 
pub fn setModuleAsm(self: *Builder) std.ArrayListUnmanaged(u8).Writer {
self.module_asm.clearRetainingCapacity();
return self.appendModuleAsm();
@@ -8336,24 +8420,25 @@ pub fn appendModuleAsm(self: *Builder) std.ArrayListUnmanaged(u8).Writer {
pub fn finishModuleAsm(self: *Builder) Allocator.Error!void {
if (self.module_asm.getLastOrNull()) |last| if (last != '\n')
try self.module_asm.append(self.gpa, '\n');
if (self.useLibLlvm())
self.llvm.module.?.setModuleInlineAsm(self.module_asm.items.ptr, self.module_asm.items.len);
}
 
pub fn string(self: *Builder, bytes: []const u8) Allocator.Error!String {
try self.string_bytes.ensureUnusedCapacity(self.gpa, bytes.len + 1);
try self.string_bytes.ensureUnusedCapacity(self.gpa, bytes.len);
try self.string_indices.ensureUnusedCapacity(self.gpa, 1);
try self.string_map.ensureUnusedCapacity(self.gpa, 1);
 
const gop = self.string_map.getOrPutAssumeCapacityAdapted(bytes, String.Adapter{ .builder = self });
if (!gop.found_existing) {
self.string_bytes.appendSliceAssumeCapacity(bytes);
self.string_bytes.appendAssumeCapacity(0);
self.string_indices.appendAssumeCapacity(@intCast(self.string_bytes.items.len));
}
return String.fromIndex(gop.index);
}
 
pub fn stringNull(self: *Builder, bytes: [:0]const u8) Allocator.Error!String {
return self.string(bytes[0 .. bytes.len + 1]);
}
 
pub fn stringIfExists(self: *const Builder, bytes: []const u8) ?String {
return String.fromIndex(
self.string_map.getIndexAdapted(bytes, String.Adapter{ .builder = self }) orelse return null,
@@ -8362,16 +8447,25 @@ pub fn stringIfExists(self: *const Builder, bytes: []const u8) ?String {
 
pub fn fmt(self: *Builder, comptime fmt_str: []const u8, fmt_args: anytype) Allocator.Error!String {
try self.string_map.ensureUnusedCapacity(self.gpa, 1);
try self.string_bytes.ensureUnusedCapacity(self.gpa, @intCast(std.fmt.count(fmt_str ++ .{0}, fmt_args)));
try self.string_bytes.ensureUnusedCapacity(self.gpa, @intCast(std.fmt.count(fmt_str, fmt_args)));
try self.string_indices.ensureUnusedCapacity(self.gpa, 1);
return self.fmtAssumeCapacity(fmt_str, fmt_args);
}
 
pub fn fmtAssumeCapacity(self: *Builder, comptime fmt_str: []const u8, fmt_args: anytype) String {
const start = self.string_bytes.items.len;
self.string_bytes.writer(self.gpa).print(fmt_str ++ .{0}, fmt_args) catch unreachable;
const bytes: []const u8 = self.string_bytes.items[start .. self.string_bytes.items.len - 1];
self.string_bytes.writer(undefined).print(fmt_str, fmt_args) catch unreachable;
return self.trailingStringAssumeCapacity();
}
 
pub fn trailingString(self: *Builder) Allocator.Error!String {
try self.string_indices.ensureUnusedCapacity(self.gpa, 1);
try self.string_map.ensureUnusedCapacity(self.gpa, 1);
return self.trailingStringAssumeCapacity();
}
 
pub fn trailingStringAssumeCapacity(self: *Builder) String {
const start = self.string_indices.getLast();
const bytes: []const u8 = self.string_bytes.items[start..];
const gop = self.string_map.getOrPutAssumeCapacityAdapted(bytes, String.Adapter{ .builder = self });
if (gop.found_existing) {
self.string_bytes.shrinkRetainingCapacity(start);
@@ -8435,7 +8529,7 @@ pub fn structType(
pub fn opaqueType(self: *Builder, name: String) Allocator.Error!Type {
try self.string_map.ensureUnusedCapacity(self.gpa, 1);
if (name.slice(self)) |id| {
const count: usize = comptime std.fmt.count("{d}" ++ .{0}, .{std.math.maxInt(u32)});
const count: usize = comptime std.fmt.count("{d}", .{std.math.maxInt(u32)});
try self.string_bytes.ensureUnusedCapacity(self.gpa, id.len + count);
}
try self.string_indices.ensureUnusedCapacity(self.gpa, 1);
@@ -8449,98 +8543,17 @@ pub fn namedTypeSetBody(
self: *Builder,
named_type: Type,
body_type: Type,
) (if (build_options.have_llvm) Allocator.Error else error{})!void {
) void {
const named_item = self.type_items.items[@intFromEnum(named_type)];
self.type_extra.items[named_item.data + std.meta.fieldIndex(Type.NamedStructure, "body").?] =
@intFromEnum(body_type);
if (self.useLibLlvm()) {
const body_item = self.type_items.items[@intFromEnum(body_type)];
var body_extra = self.typeExtraDataTrail(Type.Structure, body_item.data);
const body_fields = body_extra.trail.next(body_extra.data.fields_len, Type, self);
const llvm_fields = try self.gpa.alloc(*llvm.Type, body_fields.len);
defer self.gpa.free(llvm_fields);
for (llvm_fields, body_fields) |*llvm_field, body_field| llvm_field.* = body_field.toLlvm(self);
self.llvm.types.items[@intFromEnum(named_type)].structSetBody(
llvm_fields.ptr,
@intCast(llvm_fields.len),
switch (body_item.tag) {
.structure => .False,
.packed_structure => .True,
else => unreachable,
},
);
}
}
 
pub fn attr(self: *Builder, attribute: Attribute) Allocator.Error!Attribute.Index {
try self.attributes.ensureUnusedCapacity(self.gpa, 1);
if (self.useLibLlvm()) try self.llvm.attributes.ensureUnusedCapacity(self.gpa, 1);
 
const gop = self.attributes.getOrPutAssumeCapacity(attribute.toStorage());
if (!gop.found_existing) {
gop.value_ptr.* = {};
if (self.useLibLlvm()) self.llvm.attributes.appendAssumeCapacity(switch (attribute) {
else => llvm_attr: {
const llvm_kind_id = attribute.getKind().toLlvm(self);
if (llvm_kind_id.* == 0) {
const name = @tagName(attribute);
llvm_kind_id.* = llvm.getEnumAttributeKindForName(name.ptr, name.len);
assert(llvm_kind_id.* != 0);
}
break :llvm_attr switch (attribute) {
else => switch (attribute) {
inline else => |value| self.llvm.context.createEnumAttribute(
llvm_kind_id.*,
switch (@TypeOf(value)) {
void => 0,
u32 => value,
Attribute.FpClass,
Attribute.AllocKind,
Attribute.Memory,
=> @as(u32, @bitCast(value)),
Alignment => value.toByteUnits() orelse 0,
Attribute.AllocSize,
Attribute.VScaleRange,
=> @bitCast(value.toLlvm()),
Attribute.UwTable => @intFromEnum(value),
else => @compileError(
"bad payload type: " ++ @typeName(@TypeOf(value)),
),
},
),
.byval,
.byref,
.preallocated,
.inalloca,
.sret,
.elementtype,
.string,
.none,
=> unreachable,
},
.byval,
.byref,
.preallocated,
.inalloca,
.sret,
.elementtype,
=> |ty| self.llvm.context.createTypeAttribute(llvm_kind_id.*, ty.toLlvm(self)),
.string, .none => unreachable,
};
},
.string => |string_attr| llvm_attr: {
const kind = string_attr.kind.slice(self).?;
const value = string_attr.value.slice(self).?;
break :llvm_attr self.llvm.context.createStringAttribute(
kind.ptr,
@intCast(kind.len),
value.ptr,
@intCast(value.len),
);
},
.none => unreachable,
});
}
if (!gop.found_existing) gop.value_ptr.* = {};
return @enumFromInt(gop.index);
}
 
@@ -8557,12 +8570,16 @@ pub fn attrs(self: *Builder, attributes: []Attribute.Index) Allocator.Error!Attr
}
 
pub fn fnAttrs(self: *Builder, fn_attributes: []const Attributes) Allocator.Error!FunctionAttributes {
return @enumFromInt(try self.attrGeneric(@ptrCast(
try self.function_attributes_set.ensureUnusedCapacity(self.gpa, 1);
const function_attributes: FunctionAttributes = @enumFromInt(try self.attrGeneric(@ptrCast(
fn_attributes[0..if (std.mem.lastIndexOfNone(Attributes, fn_attributes, &.{.none})) |last|
last + 1
else
0],
)));
 
_ = self.function_attributes_set.getOrPutAssumeCapacity(function_attributes);
return function_attributes;
}
 
pub fn addGlobal(self: *Builder, name: String, global: Global) Allocator.Error!Global.Index {
@@ -8586,7 +8603,6 @@ pub fn addGlobalAssumeCapacity(self: *Builder, name: String, global: Global) Glo
global_gop.value_ptr.* = global;
const global_index: Global.Index = @enumFromInt(global_gop.index);
global_index.updateDsoLocal(self);
global_index.updateName(self);
return global_index;
}
 
@@ -8622,12 +8638,6 @@ pub fn addAliasAssumeCapacity(
addr_space: AddrSpace,
aliasee: Constant,
) Alias.Index {
if (self.useLibLlvm()) self.llvm.globals.appendAssumeCapacity(self.llvm.module.?.addAlias(
ty.toLlvm(self),
@intFromEnum(addr_space),
aliasee.toLlvm(self),
name.slice(self).?,
));
const alias_index: Alias.Index = @enumFromInt(self.aliases.items.len);
self.aliases.appendAssumeCapacity(.{ .global = self.addGlobalAssumeCapacity(name, .{
.addr_space = addr_space,
@@ -8656,13 +8666,6 @@ pub fn addVariableAssumeCapacity(
name: String,
addr_space: AddrSpace,
) Variable.Index {
if (self.useLibLlvm()) self.llvm.globals.appendAssumeCapacity(
self.llvm.module.?.addGlobalInAddressSpace(
ty.toLlvm(self),
name.slice(self).?,
@intFromEnum(addr_space),
),
);
const variable_index: Variable.Index = @enumFromInt(self.variables.items.len);
self.variables.appendAssumeCapacity(.{ .global = self.addGlobalAssumeCapacity(name, .{
.addr_space = addr_space,
@@ -8692,13 +8695,6 @@ pub fn addFunctionAssumeCapacity(
addr_space: AddrSpace,
) Function.Index {
assert(ty.isFunction(self));
if (self.useLibLlvm()) self.llvm.globals.appendAssumeCapacity(
self.llvm.module.?.addFunctionInAddressSpace(
name.slice(self).?,
ty.toLlvm(self),
@intFromEnum(addr_space),
),
);
const function_index: Function.Index = @enumFromInt(self.functions.items.len);
self.functions.appendAssumeCapacity(.{ .global = self.addGlobalAssumeCapacity(name, .{
.addr_space = addr_space,
@@ -8714,7 +8710,6 @@ pub fn getIntrinsic(
overload: []const Type,
) Allocator.Error!Function.Index {
const ExpectedContents = extern union {
name: [expected_intrinsic_name_len]u8,
attrs: extern struct {
params: [expected_args_len]Type,
fn_attrs: [FunctionAttributes.params_index + expected_args_len]Attributes,
@@ -8727,12 +8722,10 @@ pub fn getIntrinsic(
const allocator = stack.get();
 
const name = name: {
var buffer = std.ArrayList(u8).init(allocator);
defer buffer.deinit();
 
try buffer.writer().print("llvm.{s}", .{@tagName(id)});
for (overload) |ty| try buffer.writer().print(".{m}", .{ty.fmt(self)});
break :name try self.string(buffer.items);
const writer = self.string_bytes.writer(self.gpa);
try writer.print("llvm.{s}", .{@tagName(id)});
for (overload) |ty| try writer.print(".{m}", .{ty.fmt(self)});
break :name try self.trailingString();
};
if (self.getGlobal(name)) |global| return global.ptrConst(self).kind.function;
 
@@ -8826,7 +8819,6 @@ pub fn bigIntConst(self: *Builder, ty: Type, value: std.math.big.int.Const) Allo
try self.constant_map.ensureUnusedCapacity(self.gpa, 1);
try self.constant_items.ensureUnusedCapacity(self.gpa, 1);
try self.constant_limbs.ensureUnusedCapacity(self.gpa, Constant.Integer.limbs + value.limbs.len);
if (self.useLibLlvm()) try self.llvm.constants.ensureUnusedCapacity(self.gpa, 1);
return self.bigIntConstAssumeCapacity(ty, value);
}
 
@@ -8977,16 +8969,6 @@ pub fn stringValue(self: *Builder, val: String) Allocator.Error!Value {
return (try self.stringConst(val)).toValue();
}
 
pub fn stringNullConst(self: *Builder, val: String) Allocator.Error!Constant {
try self.ensureUnusedTypeCapacity(1, Type.Array, 0);
try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
return self.stringNullConstAssumeCapacity(val);
}
 
pub fn stringNullValue(self: *Builder, val: String) Allocator.Error!Value {
return (try self.stringNullConst(val)).toValue();
}
 
pub fn vectorConst(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Constant {
try self.ensureUnusedConstantCapacity(1, Constant.Aggregate, vals.len);
return self.vectorConstAssumeCapacity(ty, vals);
@@ -9244,72 +9226,20 @@ pub fn asmValue(
return (try self.asmConst(ty, info, assembly, constraints)).toValue();
}
 
pub fn verify(self: *Builder) error{}!bool {
if (self.useLibLlvm()) {
var error_message: [*:0]const u8 = undefined;
// verifyModule always allocs the error_message even if there is no error
defer llvm.disposeMessage(error_message);
 
if (self.llvm.module.?.verify(.ReturnStatus, &error_message).toBool()) {
log.err("failed verification of LLVM module:\n{s}\n", .{error_message});
return false;
}
}
return true;
}
 
pub fn writeBitcodeToFile(self: *Builder, path: []const u8) Allocator.Error!bool {
const path_z = try self.gpa.dupeZ(u8, path);
defer self.gpa.free(path_z);
return self.writeBitcodeToFileZ(path_z);
}
 
pub fn writeBitcodeToFileZ(self: *Builder, path: [*:0]const u8) bool {
if (self.useLibLlvm()) {
const error_code = self.llvm.module.?.writeBitcodeToFile(path);
if (error_code != 0) {
log.err("failed dumping LLVM module to \"{s}\": {d}", .{ path, error_code });
return false;
}
} else {
log.err("writing bitcode without libllvm not implemented", .{});
return false;
}
return true;
}
 
pub fn dump(self: *Builder) void {
if (self.useLibLlvm())
self.llvm.module.?.dump()
else
self.print(std.io.getStdErr().writer()) catch {};
self.print(std.io.getStdErr().writer()) catch {};
}
 
pub fn printToFile(self: *Builder, path: []const u8) Allocator.Error!bool {
const path_z = try self.gpa.dupeZ(u8, path);
defer self.gpa.free(path_z);
return self.printToFileZ(path_z);
}
 
pub fn printToFileZ(self: *Builder, path: [*:0]const u8) bool {
if (self.useLibLlvm()) {
var error_message: [*:0]const u8 = undefined;
if (self.llvm.module.?.printModuleToFile(path, &error_message).toBool()) {
defer llvm.disposeMessage(error_message);
log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, error_message });
return false;
}
} else {
var file = std.fs.cwd().createFileZ(path, .{}) catch |err| {
log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) });
return false;
};
defer file.close();
self.print(file.writer()) catch |err| {
log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) });
return false;
};
}
var file = std.fs.cwd().createFile(path, .{}) catch |err| {
log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) });
return false;
};
defer file.close();
self.print(file.writer()) catch |err| {
log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) });
return false;
};
return true;
}
 
@@ -9324,9 +9254,11 @@ pub fn printUnbuffered(
writer: anytype,
) (@TypeOf(writer).Error || Allocator.Error)!void {
var need_newline = false;
var metadata_formatter: Metadata.Formatter = .{ .builder = self, .need_comma = undefined };
defer metadata_formatter.map.deinit(self.gpa);
 
if (self.source_filename != .none or self.data_layout != .none or self.target_triple != .none) {
if (need_newline) try writer.writeByte('\n');
if (need_newline) try writer.writeByte('\n') else need_newline = true;
if (self.source_filename != .none) try writer.print(
\\; ModuleID = '{s}'
\\source_filename = {"}
@@ -9340,40 +9272,40 @@ pub fn printUnbuffered(
\\target triple = {"}
\\
, .{self.target_triple.fmt(self)});
need_newline = true;
}
 
if (self.module_asm.items.len > 0) {
if (need_newline) try writer.writeByte('\n');
if (need_newline) try writer.writeByte('\n') else need_newline = true;
var line_it = std.mem.tokenizeScalar(u8, self.module_asm.items, '\n');
while (line_it.next()) |line| {
try writer.writeAll("module asm ");
try printEscapedString(line, .always_quote, writer);
try writer.writeByte('\n');
}
need_newline = true;
}
 
if (self.types.count() > 0) {
if (need_newline) try writer.writeByte('\n');
if (need_newline) try writer.writeByte('\n') else need_newline = true;
for (self.types.keys(), self.types.values()) |id, ty| try writer.print(
\\%{} = type {}
\\
, .{ id.fmt(self), ty.fmt(self) });
need_newline = true;
}
 
if (self.variables.items.len > 0) {
if (need_newline) try writer.writeByte('\n');
if (need_newline) try writer.writeByte('\n') else need_newline = true;
for (self.variables.items) |variable| {
if (variable.global.getReplacement(self) != .none) continue;
const global = variable.global.ptrConst(self);
metadata_formatter.need_comma = true;
defer metadata_formatter.need_comma = undefined;
try writer.print(
\\{} ={}{}{}{}{ }{}{ }{} {s} {%}{ }{, }
\\{} ={}{}{}{}{ }{}{ }{} {s} {%}{ }{, }{}
\\
, .{
variable.global.fmt(self),
global.linkage,
Linkage.fmtOptional(if (global.linkage == .external and
variable.init != .no_init) null else global.linkage),
global.preemption,
global.visibility,
global.dll_storage_class,
@@ -9385,18 +9317,20 @@ pub fn printUnbuffered(
global.type.fmt(self),
variable.init.fmt(self),
variable.alignment,
try metadata_formatter.fmt("!dbg ", global.dbg),
});
}
need_newline = true;
}
 
if (self.aliases.items.len > 0) {
if (need_newline) try writer.writeByte('\n');
if (need_newline) try writer.writeByte('\n') else need_newline = true;
for (self.aliases.items) |alias| {
if (alias.global.getReplacement(self) != .none) continue;
const global = alias.global.ptrConst(self);
metadata_formatter.need_comma = true;
defer metadata_formatter.need_comma = undefined;
try writer.print(
\\{} ={}{}{}{}{ }{} alias {%}, {%}
\\{} ={}{}{}{}{ }{} alias {%}, {%}{}
\\
, .{
alias.global.fmt(self),
@@ -9408,9 +9342,9 @@ pub fn printUnbuffered(
global.unnamed_addr,
global.type.fmt(self),
alias.aliasee.fmt(self),
try metadata_formatter.fmt("!dbg ", global.dbg),
});
}
need_newline = true;
}
 
var attribute_groups: std.AutoArrayHashMapUnmanaged(Attributes, void) = .{};
@@ -9418,7 +9352,7 @@ pub fn printUnbuffered(
 
for (0.., self.functions.items) |function_i, function| {
if (function.global.getReplacement(self) != .none) continue;
if (need_newline) try writer.writeByte('\n');
if (need_newline) try writer.writeByte('\n') else need_newline = true;
const function_index: Function.Index = @enumFromInt(function_i);
const global = function.global.ptrConst(self);
const params_len = global.type.functionParameters(self).len;
@@ -9464,13 +9398,23 @@ pub fn printUnbuffered(
if (function_attributes != .none) try writer.print(" #{d}", .{
(try attribute_groups.getOrPutValue(self.gpa, function_attributes, {})).index,
});
try writer.print("{ }", .{function.alignment});
{
metadata_formatter.need_comma = false;
defer metadata_formatter.need_comma = undefined;
try writer.print("{ }{}", .{
function.alignment,
try metadata_formatter.fmt(" !dbg ", global.dbg),
});
}
if (function.instructions.len > 0) {
var block_incoming_len: u32 = undefined;
try writer.writeAll(" {\n");
var dbg: Metadata = .none;
for (params_len..function.instructions.len) |instruction_i| {
const instruction_index: Function.Instruction.Index = @enumFromInt(instruction_i);
const instruction = function.instructions.get(@intFromEnum(instruction_index));
if (function.debug_locations.get(instruction_index)) |debug_location|
dbg = debug_location;
switch (instruction.tag) {
.add,
.@"add nsw",
@@ -9555,7 +9499,7 @@ pub fn printUnbuffered(
.xor,
=> |tag| {
const extra = function.extraData(Function.Instruction.Binary, instruction.data);
try writer.print(" %{} = {s} {%}, {}\n", .{
try writer.print(" %{} = {s} {%}, {}", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.lhs.fmt(function_index, self),
@@ -9577,7 +9521,7 @@ pub fn printUnbuffered(
.zext,
=> |tag| {
const extra = function.extraData(Function.Instruction.Cast, instruction.data);
try writer.print(" %{} = {s} {%} to {%}\n", .{
try writer.print(" %{} = {s} {%} to {%}", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.val.fmt(function_index, self),
@@ -9588,11 +9532,14 @@ pub fn printUnbuffered(
.@"alloca inalloca",
=> |tag| {
const extra = function.extraData(Function.Instruction.Alloca, instruction.data);
try writer.print(" %{} = {s} {%}{,%}{, }{, }\n", .{
try writer.print(" %{} = {s} {%}{,%}{, }{, }", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.type.fmt(self),
extra.len.fmt(function_index, self),
Value.fmt(switch (extra.len) {
.@"1" => .none,
else => extra.len,
}, function_index, self),
extra.info.alignment,
extra.info.addr_space,
});
@@ -9601,7 +9548,7 @@ pub fn printUnbuffered(
.atomicrmw => |tag| {
const extra =
function.extraData(Function.Instruction.AtomicRmw, instruction.data);
try writer.print(" %{} = {s}{ } {s} {%}, {%}{ }{ }{, }\n", .{
try writer.print(" %{} = {s}{ } {s} {%}, {%}{ }{ }{, }", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.info.access_kind,
@@ -9619,16 +9566,17 @@ pub fn printUnbuffered(
if (@intFromEnum(instruction_index) > params_len)
try writer.writeByte('\n');
try writer.print("{}:\n", .{name.fmt(self)});
continue;
},
.br => |tag| {
const target: Function.Block.Index = @enumFromInt(instruction.data);
try writer.print(" {s} {%}\n", .{
try writer.print(" {s} {%}", .{
@tagName(tag), target.toInst(&function).fmt(function_index, self),
});
},
.br_cond => {
const extra = function.extraData(Function.Instruction.BrCond, instruction.data);
try writer.print(" br {%}, {%}, {%}\n", .{
try writer.print(" br {%}, {%}, {%}", .{
extra.cond.fmt(function_index, self),
extra.then.toInst(&function).fmt(function_index, self),
extra.@"else".toInst(&function).fmt(function_index, self),
@@ -9668,10 +9616,12 @@ pub fn printUnbuffered(
});
for (0.., args) |arg_index, arg| {
if (arg_index > 0) try writer.writeAll(", ");
try writer.print("{%}{} {}", .{
metadata_formatter.need_comma = false;
defer metadata_formatter.need_comma = undefined;
try writer.print("{%}{}{}", .{
arg.typeOf(function_index, self).fmt(self),
extra.data.attributes.param(arg_index, self).fmt(self),
arg.fmt(function_index, self),
try metadata_formatter.fmtLocal(" ", arg, function_index),
});
}
try writer.writeByte(')');
@@ -9683,14 +9633,13 @@ pub fn printUnbuffered(
{},
)).index,
});
try writer.writeByte('\n');
},
.cmpxchg,
.@"cmpxchg weak",
=> |tag| {
const extra =
function.extraData(Function.Instruction.CmpXchg, instruction.data);
try writer.print(" %{} = {s}{ } {%}, {%}, {%}{ }{ }{ }{, }\n", .{
try writer.print(" %{} = {s}{ } {%}, {%}, {%}{ }{ }{ }{, }", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.info.access_kind,
@@ -9706,7 +9655,7 @@ pub fn printUnbuffered(
.extractelement => |tag| {
const extra =
function.extraData(Function.Instruction.ExtractElement, instruction.data);
try writer.print(" %{} = {s} {%}, {%}\n", .{
try writer.print(" %{} = {s} {%}, {%}", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.val.fmt(function_index, self),
@@ -9725,7 +9674,6 @@ pub fn printUnbuffered(
extra.data.val.fmt(function_index, self),
});
for (indices) |index| try writer.print(", {d}", .{index});
try writer.writeByte('\n');
},
.fence => |tag| {
const info: MemoryAccessInfo = @bitCast(instruction.data);
@@ -9739,7 +9687,7 @@ pub fn printUnbuffered(
.@"fneg fast",
=> |tag| {
const val: Value = @enumFromInt(instruction.data);
try writer.print(" %{} = {s} {%}\n", .{
try writer.print(" %{} = {s} {%}", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
val.fmt(function_index, self),
@@ -9762,12 +9710,11 @@ pub fn printUnbuffered(
for (indices) |index| try writer.print(", {%}", .{
index.fmt(function_index, self),
});
try writer.writeByte('\n');
},
.insertelement => |tag| {
const extra =
function.extraData(Function.Instruction.InsertElement, instruction.data);
try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{
try writer.print(" %{} = {s} {%}, {%}, {%}", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.val.fmt(function_index, self),
@@ -9786,13 +9733,12 @@ pub fn printUnbuffered(
extra.data.elem.fmt(function_index, self),
});
for (indices) |index| try writer.print(", {d}", .{index});
try writer.writeByte('\n');
},
.load,
.@"load atomic",
=> |tag| {
const extra = function.extraData(Function.Instruction.Load, instruction.data);
try writer.print(" %{} = {s}{ } {%}, {%}{ }{ }{, }\n", .{
try writer.print(" %{} = {s}{ } {%}, {%}{ }{ }{, }", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.info.access_kind,
@@ -9822,23 +9768,22 @@ pub fn printUnbuffered(
incoming_block.toInst(&function).fmt(function_index, self),
});
}
try writer.writeByte('\n');
},
.ret => |tag| {
const val: Value = @enumFromInt(instruction.data);
try writer.print(" {s} {%}\n", .{
try writer.print(" {s} {%}", .{
@tagName(tag),
val.fmt(function_index, self),
});
},
.@"ret void",
.@"unreachable",
=> |tag| try writer.print(" {s}\n", .{@tagName(tag)}),
=> |tag| try writer.print(" {s}", .{@tagName(tag)}),
.select,
.@"select fast",
=> |tag| {
const extra = function.extraData(Function.Instruction.Select, instruction.data);
try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{
try writer.print(" %{} = {s} {%}, {%}, {%}", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.cond.fmt(function_index, self),
@@ -9849,7 +9794,7 @@ pub fn printUnbuffered(
.shufflevector => |tag| {
const extra =
function.extraData(Function.Instruction.ShuffleVector, instruction.data);
try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{
try writer.print(" %{} = {s} {%}, {%}, {%}", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.lhs.fmt(function_index, self),
@@ -9861,7 +9806,7 @@ pub fn printUnbuffered(
.@"store atomic",
=> |tag| {
const extra = function.extraData(Function.Instruction.Store, instruction.data);
try writer.print(" {s}{ } {%}, {%}{ }{ }{, }\n", .{
try writer.print(" {s}{ } {%}, {%}{ }{ }{, }", .{
@tagName(tag),
extra.info.access_kind,
extra.val.fmt(function_index, self),
@@ -9889,11 +9834,11 @@ pub fn printUnbuffered(
case_block.toInst(&function).fmt(function_index, self),
},
);
try writer.writeAll(" ]\n");
try writer.writeAll(" ]");
},
.va_arg => |tag| {
const extra = function.extraData(Function.Instruction.VaArg, instruction.data);
try writer.print(" %{} = {s} {%}, {%}\n", .{
try writer.print(" %{} = {s} {%}, {%}", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.list.fmt(function_index, self),
@@ -9901,11 +9846,13 @@ pub fn printUnbuffered(
});
},
}
metadata_formatter.need_comma = true;
defer metadata_formatter.need_comma = undefined;
try writer.print("{}\n", .{try metadata_formatter.fmt("!dbg ", dbg)});
}
try writer.writeByte('}');
}
try writer.writeByte('\n');
need_newline = true;
}
 
if (attribute_groups.count() > 0) {
@@ -9915,12 +9862,375 @@ pub fn printUnbuffered(
\\attributes #{d} = {{{#"} }}
\\
, .{ attribute_group_index, attribute_group.fmt(self) });
need_newline = true;
}
}
 
pub inline fn useLibLlvm(self: *const Builder) bool {
return build_options.have_llvm and self.use_lib_llvm;
if (self.metadata_named.count() > 0) {
if (need_newline) try writer.writeByte('\n') else need_newline = true;
for (self.metadata_named.keys(), self.metadata_named.values()) |name, data| {
const elements: []const Metadata =
@ptrCast(self.metadata_extra.items[data.index..][0..data.len]);
try writer.writeByte('!');
try printEscapedString(name.slice(self), .quote_unless_valid_identifier, writer);
try writer.writeAll(" = !{");
metadata_formatter.need_comma = false;
defer metadata_formatter.need_comma = undefined;
for (elements) |element| try writer.print("{}", .{try metadata_formatter.fmt("", element)});
try writer.writeAll("}\n");
}
}
 
if (metadata_formatter.map.count() > 0) {
if (need_newline) try writer.writeByte('\n') else need_newline = true;
var metadata_index: usize = 0;
while (metadata_index < metadata_formatter.map.count()) : (metadata_index += 1) {
@setEvalBranchQuota(10_000);
const metadata_item =
self.metadata_items.get(@intFromEnum(metadata_formatter.map.keys()[metadata_index]));
try writer.print("!{} = ", .{metadata_index});
metadata_formatter.need_comma = false;
defer metadata_formatter.need_comma = undefined;
switch (metadata_item.tag) {
.none, .expression, .constant => unreachable,
.file => {
const extra = self.metadataExtraData(Metadata.File, metadata_item.data);
try metadata_formatter.specialized(.@"!", .DIFile, .{
.filename = extra.filename,
.directory = extra.directory,
.checksumkind = null,
.checksum = null,
.source = null,
}, writer);
},
.compile_unit,
.@"compile_unit optimized",
=> |kind| {
const extra = self.metadataExtraData(Metadata.CompileUnit, metadata_item.data);
try metadata_formatter.specialized(.@"distinct !", .DICompileUnit, .{
.language = .DW_LANG_C99,
.file = extra.file,
.producer = extra.producer,
.isOptimized = switch (kind) {
.compile_unit => false,
.@"compile_unit optimized" => true,
else => unreachable,
},
.flags = null,
.runtimeVersion = 0,
.splitDebugFilename = null,
.emissionKind = .FullDebug,
.enums = extra.enums,
.retainedTypes = null,
.globals = extra.globals,
.imports = null,
.macros = null,
.dwoId = null,
.splitDebugInlining = false,
.debugInfoForProfiling = null,
.nameTableKind = null,
.rangesBaseAddress = null,
.sysroot = null,
.sdk = null,
}, writer);
},
.subprogram,
.@"subprogram local",
.@"subprogram definition",
.@"subprogram local definition",
.@"subprogram optimized",
.@"subprogram optimized local",
.@"subprogram optimized definition",
.@"subprogram optimized local definition",
=> |kind| {
const extra = self.metadataExtraData(Metadata.Subprogram, metadata_item.data);
try metadata_formatter.specialized(.@"distinct !", .DISubprogram, .{
.name = extra.name,
.linkageName = extra.linkage_name,
.scope = extra.file,
.file = extra.file,
.line = extra.line,
.type = extra.ty,
.scopeLine = extra.scope_line,
.containingType = null,
.virtualIndex = null,
.thisAdjustment = null,
.flags = extra.di_flags,
.spFlags = @as(Metadata.Subprogram.DISPFlags, @bitCast(@as(u32, @as(u3, @intCast(
@intFromEnum(kind) - @intFromEnum(Metadata.Tag.subprogram),
))) << 2)),
.unit = extra.compile_unit,
.templateParams = null,
.declaration = null,
.retainedNodes = null,
.thrownTypes = null,
.annotations = null,
.targetFuncName = null,
}, writer);
},
.lexical_block => {
const extra = self.metadataExtraData(Metadata.LexicalBlock, metadata_item.data);
try metadata_formatter.specialized(.@"distinct !", .DILexicalBlock, .{
.scope = extra.scope,
.file = extra.file,
.line = extra.line,
.column = extra.column,
}, writer);
},
.location => {
const extra = self.metadataExtraData(Metadata.Location, metadata_item.data);
try metadata_formatter.specialized(.@"!", .DILocation, .{
.line = extra.line,
.column = extra.column,
.scope = extra.scope,
.inlinedAt = extra.inlined_at,
.isImplicitCode = false,
}, writer);
},
.basic_bool_type,
.basic_unsigned_type,
.basic_signed_type,
.basic_float_type,
=> |kind| {
const extra = self.metadataExtraData(Metadata.BasicType, metadata_item.data);
try metadata_formatter.specialized(.@"!", .DIBasicType, .{
.tag = null,
.name = switch (extra.name) {
.none => null,
else => extra.name,
},
.size = extra.bitSize(),
.@"align" = null,
.encoding = @as(enum {
DW_ATE_boolean,
DW_ATE_unsigned,
DW_ATE_signed,
DW_ATE_float,
}, switch (kind) {
.basic_bool_type => .DW_ATE_boolean,
.basic_unsigned_type => .DW_ATE_unsigned,
.basic_signed_type => .DW_ATE_signed,
.basic_float_type => .DW_ATE_float,
else => unreachable,
}),
.flags = null,
}, writer);
},
.composite_struct_type,
.composite_union_type,
.composite_enumeration_type,
.composite_array_type,
.composite_vector_type,
=> |kind| {
const extra = self.metadataExtraData(Metadata.CompositeType, metadata_item.data);
try metadata_formatter.specialized(.@"!", .DICompositeType, .{
.tag = @as(enum {
DW_TAG_structure_type,
DW_TAG_union_type,
DW_TAG_enumeration_type,
DW_TAG_array_type,
}, switch (kind) {
.composite_struct_type => .DW_TAG_structure_type,
.composite_union_type => .DW_TAG_union_type,
.composite_enumeration_type => .DW_TAG_enumeration_type,
.composite_array_type, .composite_vector_type => .DW_TAG_array_type,
else => unreachable,
}),
.name = switch (extra.name) {
.none => null,
else => extra.name,
},
.scope = extra.scope,
.file = null,
.line = null,
.baseType = extra.underlying_type,
.size = extra.bitSize(),
.@"align" = extra.bitAlign(),
.offset = null,
.flags = null,
.elements = extra.fields_tuple,
.runtimeLang = null,
.vtableHolder = null,
.templateParams = null,
.identifier = null,
.discriminator = null,
.dataLocation = null,
.associated = null,
.allocated = null,
.rank = null,
.annotations = null,
}, writer);
},
.derived_pointer_type,
.derived_member_type,
=> |kind| {
const extra = self.metadataExtraData(Metadata.DerivedType, metadata_item.data);
try metadata_formatter.specialized(.@"!", .DIDerivedType, .{
.tag = @as(enum {
DW_TAG_pointer_type,
DW_TAG_member,
}, switch (kind) {
.derived_pointer_type => .DW_TAG_pointer_type,
.derived_member_type => .DW_TAG_member,
else => unreachable,
}),
.name = switch (extra.name) {
.none => null,
else => extra.name,
},
.scope = extra.scope,
.file = null,
.line = null,
.baseType = extra.underlying_type,
.size = extra.bitSize(),
.@"align" = extra.bitAlign(),
.offset = switch (extra.bitOffset()) {
0 => null,
else => |bit_offset| bit_offset,
},
.flags = null,
.extraData = null,
.dwarfAddressSpace = null,
.annotations = null,
}, writer);
},
.subroutine_type => {
const extra = self.metadataExtraData(Metadata.SubroutineType, metadata_item.data);
try metadata_formatter.specialized(.@"!", .DISubroutineType, .{
.flags = null,
.cc = null,
.types = extra.types_tuple,
}, writer);
},
.enumerator_unsigned,
.enumerator_signed_positive,
.enumerator_signed_negative,
=> |kind| {
const extra = self.metadataExtraData(Metadata.Enumerator, metadata_item.data);
 
const ExpectedContents = extern struct {
string: [(64 * 8 / std.math.log2(10)) + 2]u8,
limbs: [
std.math.big.int.calcToStringLimbsBufferLen(
64 / @sizeOf(std.math.big.Limb),
10,
)
]std.math.big.Limb,
};
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
const allocator = stack.get();
 
const limbs = self.metadata_limbs.items[extra.limbs_index..][0..extra.limbs_len];
const bigint: std.math.big.int.Const = .{
.limbs = limbs,
.positive = switch (kind) {
.enumerator_unsigned,
.enumerator_signed_positive,
=> true,
.enumerator_signed_negative => false,
else => unreachable,
},
};
const str = try bigint.toStringAlloc(allocator, 10, undefined);
defer allocator.free(str);
 
try metadata_formatter.specialized(.@"!", .DIEnumerator, .{
.name = extra.name,
.value = str,
.isUnsigned = switch (kind) {
.enumerator_unsigned => true,
.enumerator_signed_positive, .enumerator_signed_negative => false,
else => unreachable,
},
}, writer);
},
.subrange => {
const extra = self.metadataExtraData(Metadata.Subrange, metadata_item.data);
try metadata_formatter.specialized(.@"!", .DISubrange, .{
.count = extra.count,
.lowerBound = extra.lower_bound,
.upperBound = null,
.stride = null,
}, writer);
},
.tuple => {
var extra = self.metadataExtraDataTrail(Metadata.Tuple, metadata_item.data);
const elements = extra.trail.next(extra.data.elements_len, Metadata, self);
try writer.writeAll("!{");
for (elements) |element| try writer.print("{[element]%}", .{
.element = try metadata_formatter.fmt("", element),
});
try writer.writeAll("}\n");
},
.module_flag => {
const extra = self.metadataExtraData(Metadata.ModuleFlag, metadata_item.data);
try writer.print("!{{{[behavior]%}{[name]%}{[constant]%}}}\n", .{
.behavior = try metadata_formatter.fmt("", extra.behavior),
.name = try metadata_formatter.fmt("", extra.name),
.constant = try metadata_formatter.fmt("", extra.constant),
});
},
.local_var => {
const extra = self.metadataExtraData(Metadata.LocalVar, metadata_item.data);
try metadata_formatter.specialized(.@"!", .DILocalVariable, .{
.name = extra.name,
.arg = null,
.scope = extra.scope,
.file = extra.file,
.line = extra.line,
.type = extra.ty,
.flags = null,
.@"align" = null,
.annotations = null,
}, writer);
},
.parameter => {
const extra = self.metadataExtraData(Metadata.Parameter, metadata_item.data);
try metadata_formatter.specialized(.@"!", .DILocalVariable, .{
.name = extra.name,
.arg = extra.arg_no,
.scope = extra.scope,
.file = extra.file,
.line = extra.line,
.type = extra.ty,
.flags = null,
.@"align" = null,
.annotations = null,
}, writer);
},
.global_var,
.@"global_var local",
=> |kind| {
const extra = self.metadataExtraData(Metadata.GlobalVar, metadata_item.data);
try metadata_formatter.specialized(.@"distinct !", .DIGlobalVariable, .{
.name = extra.name,
.linkageName = extra.linkage_name,
.scope = extra.scope,
.file = extra.file,
.line = extra.line,
.type = extra.ty,
.isLocal = switch (kind) {
.global_var => false,
.@"global_var local" => true,
else => unreachable,
},
.isDefinition = true,
.declaration = null,
.templateParams = null,
.@"align" = null,
.annotations = null,
}, writer);
},
.global_var_expression => {
const extra =
self.metadataExtraData(Metadata.GlobalVarExpression, metadata_item.data);
try metadata_formatter.specialized(.@"!", .DIGlobalVariableExpression, .{
.@"var" = extra.variable,
.expr = extra.expression,
}, writer);
},
}
}
}
}
 
const NoExtra = struct {};
@@ -9954,10 +10264,9 @@ fn printEscapedString(
}
 
fn ensureUnusedGlobalCapacity(self: *Builder, name: String) Allocator.Error!void {
if (self.useLibLlvm()) try self.llvm.globals.ensureUnusedCapacity(self.gpa, 1);
try self.string_map.ensureUnusedCapacity(self.gpa, 1);
if (name.slice(self)) |id| {
const count: usize = comptime std.fmt.count("{d}" ++ .{0}, .{std.math.maxInt(u32)});
const count: usize = comptime std.fmt.count("{d}", .{std.math.maxInt(u32)});
try self.string_bytes.ensureUnusedCapacity(self.gpa, id.len + count);
}
try self.string_indices.ensureUnusedCapacity(self.gpa, 1);
@@ -9970,7 +10279,7 @@ fn fnTypeAssumeCapacity(
ret: Type,
params: []const Type,
comptime kind: Type.Function.Kind,
) (if (build_options.have_llvm) Allocator.Error else error{})!Type {
) Type {
const tag: Type.Tag = switch (kind) {
.normal => .function,
.vararg => .vararg_function,
@@ -10007,20 +10316,6 @@ fn fnTypeAssumeCapacity(
}),
});
self.type_extra.appendSliceAssumeCapacity(@ptrCast(params));
if (self.useLibLlvm()) {
const llvm_params = try self.gpa.alloc(*llvm.Type, params.len);
defer self.gpa.free(llvm_params);
for (llvm_params, params) |*llvm_param, param| llvm_param.* = param.toLlvm(self);
self.llvm.types.appendAssumeCapacity(llvm.functionType(
ret.toLlvm(self),
llvm_params.ptr,
@intCast(llvm_params.len),
switch (kind) {
.normal => .False,
.vararg => .True,
},
));
}
}
return @enumFromInt(gop.index);
}
@@ -10028,8 +10323,6 @@ fn fnTypeAssumeCapacity(
fn intTypeAssumeCapacity(self: *Builder, bits: u24) Type {
assert(bits > 0);
const result = self.getOrPutTypeNoExtraAssumeCapacity(.{ .tag = .integer, .data = bits });
if (self.useLibLlvm() and result.new)
self.llvm.types.appendAssumeCapacity(self.llvm.context.intType(bits));
return result.type;
}
 
@@ -10037,8 +10330,6 @@ fn ptrTypeAssumeCapacity(self: *Builder, addr_space: AddrSpace) Type {
const result = self.getOrPutTypeNoExtraAssumeCapacity(
.{ .tag = .pointer, .data = @intFromEnum(addr_space) },
);
if (self.useLibLlvm() and result.new)
self.llvm.types.appendAssumeCapacity(self.llvm.context.pointerType(@intFromEnum(addr_space)));
return result.type;
}
 
@@ -10076,10 +10367,6 @@ fn vectorTypeAssumeCapacity(
.tag = tag,
.data = self.addTypeExtraAssumeCapacity(data),
});
if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(switch (kind) {
.normal => &llvm.Type.vectorType,
.scalable => &llvm.Type.scalableVectorType,
}(child.toLlvm(self), @intCast(len)));
}
return @enumFromInt(gop.index);
}
@@ -10109,9 +10396,6 @@ fn arrayTypeAssumeCapacity(self: *Builder, len: u64, child: Type) Type {
.tag = .small_array,
.data = self.addTypeExtraAssumeCapacity(data),
});
if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(
child.toLlvm(self).arrayType2(len),
);
}
return @enumFromInt(gop.index);
} else {
@@ -10142,9 +10426,6 @@ fn arrayTypeAssumeCapacity(self: *Builder, len: u64, child: Type) Type {
.tag = .array,
.data = self.addTypeExtraAssumeCapacity(data),
});
if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(
child.toLlvm(self).arrayType2(len),
);
}
return @enumFromInt(gop.index);
}
@@ -10154,7 +10435,7 @@ fn structTypeAssumeCapacity(
self: *Builder,
comptime kind: Type.Structure.Kind,
fields: []const Type,
) (if (build_options.have_llvm) Allocator.Error else error{})!Type {
) Type {
const tag: Type.Tag = switch (kind) {
.normal => .structure,
.@"packed" => .packed_structure,
@@ -10186,25 +10467,6 @@ fn structTypeAssumeCapacity(
}),
});
self.type_extra.appendSliceAssumeCapacity(@ptrCast(fields));
if (self.useLibLlvm()) {
const ExpectedContents = [expected_fields_len]*llvm.Type;
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
const allocator = stack.get();
 
const llvm_fields = try allocator.alloc(*llvm.Type, fields.len);
defer allocator.free(llvm_fields);
for (llvm_fields, fields) |*llvm_field, field| llvm_field.* = field.toLlvm(self);
 
self.llvm.types.appendAssumeCapacity(self.llvm.context.structType(
llvm_fields.ptr,
@intCast(llvm_fields.len),
switch (kind) {
.normal => .False,
.@"packed" => .True,
},
));
}
}
return @enumFromInt(gop.index);
}
@@ -10246,9 +10508,6 @@ fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type {
});
const result: Type = @enumFromInt(gop.index);
type_gop.value_ptr.* = result;
if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(
self.llvm.context.structCreateNamed(id.slice(self) orelse ""),
);
return result;
}
 
@@ -10271,7 +10530,6 @@ fn ensureUnusedTypeCapacity(
self.gpa,
count * (@typeInfo(Extra).Struct.fields.len + trail_len),
);
if (self.useLibLlvm()) try self.llvm.types.ensureUnusedCapacity(self.gpa, count);
}
 
fn getOrPutTypeNoExtraAssumeCapacity(self: *Builder, item: Type.Item) struct { new: bool, type: Type } {
@@ -10305,7 +10563,7 @@ fn addTypeExtraAssumeCapacity(self: *Builder, extra: anytype) Type.Item.ExtraInd
self.type_extra.appendAssumeCapacity(switch (field.type) {
u32 => value,
String, Type => @intFromEnum(value),
else => @compileError("bad field type: " ++ @typeName(field.type)),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
});
}
return result;
@@ -10388,10 +10646,7 @@ fn bigIntConstAssumeCapacity(
assert(type_item.tag == .integer);
const bits = type_item.data;
 
const ExpectedContents = extern struct {
limbs: [64 / @sizeOf(std.math.big.Limb)]std.math.big.Limb,
llvm_limbs: if (build_options.have_llvm) [64 / @sizeOf(u64)]u64 else void,
};
const ExpectedContents = [64 / @sizeOf(std.math.big.Limb)]std.math.big.Limb;
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
const allocator = stack.get();
@@ -10448,44 +10703,6 @@ fn bigIntConstAssumeCapacity(
@ptrCast(self.constant_limbs.addManyAsArrayAssumeCapacity(Constant.Integer.limbs));
extra.* = .{ .type = ty, .limbs_len = @intCast(canonical_value.limbs.len) };
self.constant_limbs.appendSliceAssumeCapacity(canonical_value.limbs);
if (self.useLibLlvm()) {
const llvm_type = ty.toLlvm(self);
if (canonical_value.to(c_longlong)) |small| {
self.llvm.constants.appendAssumeCapacity(llvm_type.constInt(@bitCast(small), .True));
} else |_| if (canonical_value.to(c_ulonglong)) |small| {
self.llvm.constants.appendAssumeCapacity(llvm_type.constInt(small, .False));
} else |_| {
const llvm_limbs = try allocator.alloc(u64, std.math.divCeil(
usize,
if (canonical_value.positive) canonical_value.bitCountAbs() else bits,
@bitSizeOf(u64),
) catch unreachable);
defer allocator.free(llvm_limbs);
var limb_index: usize = 0;
var borrow: std.math.big.Limb = 0;
for (llvm_limbs) |*result_limb| {
var llvm_limb: u64 = 0;
inline for (0..Constant.Integer.limbs) |shift| {
const limb = if (limb_index < canonical_value.limbs.len)
canonical_value.limbs[limb_index]
else
0;
limb_index += 1;
llvm_limb |= @as(u64, limb) << shift * @bitSizeOf(std.math.big.Limb);
}
if (!canonical_value.positive) {
const overflow = @subWithOverflow(borrow, llvm_limb);
llvm_limb = overflow[0];
borrow -%= overflow[1];
assert(borrow == 0 or borrow == std.math.maxInt(std.math.big.Limb));
}
result_limb.* = llvm_limb;
}
self.llvm.constants.appendAssumeCapacity(
llvm_type.constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), llvm_limbs.ptr),
);
}
}
}
return @enumFromInt(gop.index);
}
@@ -10494,13 +10711,6 @@ fn halfConstAssumeCapacity(self: *Builder, val: f16) Constant {
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .half, .data = @as(u16, @bitCast(val)) },
);
if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(
if (std.math.isSignalNan(val))
Type.i16.toLlvm(self).constInt(@as(u16, @bitCast(val)), .False)
.constBitCast(Type.half.toLlvm(self))
else
Type.half.toLlvm(self).constReal(val),
);
return result.constant;
}
 
@@ -10509,16 +10719,6 @@ fn bfloatConstAssumeCapacity(self: *Builder, val: f32) Constant {
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .bfloat, .data = @bitCast(val) },
);
if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(
if (std.math.isSignalNan(val))
Type.i16.toLlvm(self).constInt(@as(u32, @bitCast(val)) >> 16, .False)
.constBitCast(Type.bfloat.toLlvm(self))
else
Type.bfloat.toLlvm(self).constReal(val),
);
 
if (self.useLibLlvm() and result.new)
self.llvm.constants.appendAssumeCapacity(Type.bfloat.toLlvm(self).constReal(val));
return result.constant;
}
 
@@ -10526,13 +10726,6 @@ fn floatConstAssumeCapacity(self: *Builder, val: f32) Constant {
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .float, .data = @bitCast(val) },
);
if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(
if (std.math.isSignalNan(val))
Type.i32.toLlvm(self).constInt(@as(u32, @bitCast(val)), .False)
.constBitCast(Type.float.toLlvm(self))
else
Type.float.toLlvm(self).constReal(val),
);
return result.constant;
}
 
@@ -10563,13 +10756,6 @@ fn doubleConstAssumeCapacity(self: *Builder, val: f64) Constant {
.hi = @intCast(@as(u64, @bitCast(val)) >> 32),
}),
});
if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
if (std.math.isSignalNan(val))
Type.i64.toLlvm(self).constInt(@as(u64, @bitCast(val)), .False)
.constBitCast(Type.double.toLlvm(self))
else
Type.double.toLlvm(self).constReal(val),
);
}
return @enumFromInt(gop.index);
}
@@ -10604,17 +10790,6 @@ fn fp128ConstAssumeCapacity(self: *Builder, val: f128) Constant {
.hi_hi = @intCast(@as(u128, @bitCast(val)) >> 96),
}),
});
if (self.useLibLlvm()) {
const llvm_limbs = [_]u64{
@truncate(@as(u128, @bitCast(val))),
@intCast(@as(u128, @bitCast(val)) >> 64),
};
self.llvm.constants.appendAssumeCapacity(
Type.i128.toLlvm(self)
.constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), &llvm_limbs)
.constBitCast(Type.fp128.toLlvm(self)),
);
}
}
return @enumFromInt(gop.index);
}
@@ -10648,17 +10823,6 @@ fn x86_fp80ConstAssumeCapacity(self: *Builder, val: f80) Constant {
.hi = @intCast(@as(u80, @bitCast(val)) >> 64),
}),
});
if (self.useLibLlvm()) {
const llvm_limbs = [_]u64{
@truncate(@as(u80, @bitCast(val))),
@intCast(@as(u80, @bitCast(val)) >> 64),
};
self.llvm.constants.appendAssumeCapacity(
Type.i80.toLlvm(self)
.constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), &llvm_limbs)
.constBitCast(Type.x86_fp80.toLlvm(self)),
);
}
}
return @enumFromInt(gop.index);
}
@@ -10693,14 +10857,6 @@ fn ppc_fp128ConstAssumeCapacity(self: *Builder, val: [2]f64) Constant {
.hi_hi = @intCast(@as(u64, @bitCast(val[1])) >> 32),
}),
});
if (self.useLibLlvm()) {
const llvm_limbs: [2]u64 = @bitCast(val);
self.llvm.constants.appendAssumeCapacity(
Type.i128.toLlvm(self)
.constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), &llvm_limbs)
.constBitCast(Type.ppc_fp128.toLlvm(self)),
);
}
}
return @enumFromInt(gop.index);
}
@@ -10710,8 +10866,6 @@ fn nullConstAssumeCapacity(self: *Builder, ty: Type) Constant {
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .null, .data = @intFromEnum(ty) },
);
if (self.useLibLlvm() and result.new)
self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).constNull());
return result.constant;
}
 
@@ -10720,16 +10874,10 @@ fn noneConstAssumeCapacity(self: *Builder, ty: Type) Constant {
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .none, .data = @intFromEnum(ty) },
);
if (self.useLibLlvm() and result.new)
self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).constNull());
return result.constant;
}
 
fn structConstAssumeCapacity(
self: *Builder,
ty: Type,
vals: []const Constant,
) (if (build_options.have_llvm) Allocator.Error else error{})!Constant {
fn structConstAssumeCapacity(self: *Builder, ty: Type, vals: []const Constant) Constant {
const type_item = self.type_items.items[@intFromEnum(ty)];
var extra = self.typeExtraDataTrail(Type.Structure, switch (type_item.tag) {
.structure, .packed_structure => type_item.data,
@@ -10756,28 +10904,10 @@ fn structConstAssumeCapacity(
else => unreachable,
};
const result = self.getOrPutConstantAggregateAssumeCapacity(tag, ty, vals);
if (self.useLibLlvm() and result.new) {
const ExpectedContents = [expected_fields_len]*llvm.Value;
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
const allocator = stack.get();
 
const llvm_vals = try allocator.alloc(*llvm.Value, vals.len);
defer allocator.free(llvm_vals);
for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self);
 
self.llvm.constants.appendAssumeCapacity(
ty.toLlvm(self).constNamedStruct(llvm_vals.ptr, @intCast(llvm_vals.len)),
);
}
return result.constant;
}
 
fn arrayConstAssumeCapacity(
self: *Builder,
ty: Type,
vals: []const Constant,
) (if (build_options.have_llvm) Allocator.Error else error{})!Constant {
fn arrayConstAssumeCapacity(self: *Builder, ty: Type, vals: []const Constant) Constant {
const type_item = self.type_items.items[@intFromEnum(ty)];
const type_extra: struct { len: u64, child: Type } = switch (type_item.tag) {
inline .small_array, .array => |kind| extra: {
@@ -10798,20 +10928,6 @@ fn arrayConstAssumeCapacity(
} else return self.zeroInitConstAssumeCapacity(ty);
 
const result = self.getOrPutConstantAggregateAssumeCapacity(.array, ty, vals);
if (self.useLibLlvm() and result.new) {
const ExpectedContents = [expected_fields_len]*llvm.Value;
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
const allocator = stack.get();
 
const llvm_vals = try allocator.alloc(*llvm.Value, vals.len);
defer allocator.free(llvm_vals);
for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self);
 
self.llvm.constants.appendAssumeCapacity(
type_extra.child.toLlvm(self).constArray2(llvm_vals.ptr, llvm_vals.len),
);
}
return result.constant;
}
 
@@ -10822,30 +10938,10 @@ fn stringConstAssumeCapacity(self: *Builder, val: String) Constant {
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .string, .data = @intFromEnum(val) },
);
if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(
self.llvm.context.constString(slice.ptr, @intCast(slice.len), .True),
);
return result.constant;
}
 
fn stringNullConstAssumeCapacity(self: *Builder, val: String) Constant {
const slice = val.slice(self).?;
const ty = self.arrayTypeAssumeCapacity(slice.len + 1, .i8);
if (std.mem.allEqual(u8, slice, 0)) return self.zeroInitConstAssumeCapacity(ty);
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .string_null, .data = @intFromEnum(val) },
);
if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(
self.llvm.context.constString(slice.ptr, @intCast(slice.len + 1), .True),
);
return result.constant;
}
 
fn vectorConstAssumeCapacity(
self: *Builder,
ty: Type,
vals: []const Constant,
) (if (build_options.have_llvm) Allocator.Error else error{})!Constant {
fn vectorConstAssumeCapacity(self: *Builder, ty: Type, vals: []const Constant) Constant {
assert(ty.isVector(self));
assert(ty.vectorLen(self) == vals.len);
for (vals) |val| assert(ty.childType(self) == val.typeOf(self));
@@ -10858,28 +10954,10 @@ fn vectorConstAssumeCapacity(
} else return self.zeroInitConstAssumeCapacity(ty);
 
const result = self.getOrPutConstantAggregateAssumeCapacity(.vector, ty, vals);
if (self.useLibLlvm() and result.new) {
const ExpectedContents = [expected_fields_len]*llvm.Value;
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
const allocator = stack.get();
 
const llvm_vals = try allocator.alloc(*llvm.Value, vals.len);
defer allocator.free(llvm_vals);
for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self);
 
self.llvm.constants.appendAssumeCapacity(
llvm.constVector(llvm_vals.ptr, @intCast(llvm_vals.len)),
);
}
return result.constant;
}
 
fn splatConstAssumeCapacity(
self: *Builder,
ty: Type,
val: Constant,
) (if (build_options.have_llvm) Allocator.Error else error{})!Constant {
fn splatConstAssumeCapacity(self: *Builder, ty: Type, val: Constant) Constant {
assert(ty.scalarType(self) == val.typeOf(self));
 
if (!ty.isVector(self)) return val;
@@ -10909,20 +10987,6 @@ fn splatConstAssumeCapacity(
.tag = .splat,
.data = self.addConstantExtraAssumeCapacity(data),
});
if (self.useLibLlvm()) {
const ExpectedContents = [expected_fields_len]*llvm.Value;
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
const allocator = stack.get();
 
const llvm_vals = try allocator.alloc(*llvm.Value, ty.vectorLen(self));
defer allocator.free(llvm_vals);
@memset(llvm_vals, val.toLlvm(self));
 
self.llvm.constants.appendAssumeCapacity(
llvm.constVector(llvm_vals.ptr, @intCast(llvm_vals.len)),
);
}
}
return @enumFromInt(gop.index);
}
@@ -10964,8 +11028,6 @@ fn zeroInitConstAssumeCapacity(self: *Builder, ty: Type) Constant {
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .zeroinitializer, .data = @intFromEnum(ty) },
);
if (self.useLibLlvm() and result.new)
self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).constNull());
return result.constant;
}
 
@@ -10981,8 +11043,6 @@ fn undefConstAssumeCapacity(self: *Builder, ty: Type) Constant {
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .undef, .data = @intFromEnum(ty) },
);
if (self.useLibLlvm() and result.new)
self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).getUndef());
return result.constant;
}
 
@@ -10998,8 +11058,6 @@ fn poisonConstAssumeCapacity(self: *Builder, ty: Type) Constant {
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .poison, .data = @intFromEnum(ty) },
);
if (self.useLibLlvm() and result.new)
self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).getPoison());
return result.constant;
}
 
@@ -11032,9 +11090,6 @@ fn blockAddrConstAssumeCapacity(
.tag = .blockaddress,
.data = self.addConstantExtraAssumeCapacity(data),
});
if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
function.toLlvm(self).blockAddress(block.toValue(self, function).toLlvm(self, function)),
);
}
return @enumFromInt(gop.index);
}
@@ -11043,7 +11098,6 @@ fn dsoLocalEquivalentConstAssumeCapacity(self: *Builder, function: Function.Inde
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .dso_local_equivalent, .data = @intFromEnum(function) },
);
if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(undefined);
return result.constant;
}
 
@@ -11051,7 +11105,6 @@ fn noCfiConstAssumeCapacity(self: *Builder, function: Function.Index) Constant {
const result = self.getOrPutConstantNoExtraAssumeCapacity(
.{ .tag = .no_cfi, .data = @intFromEnum(function) },
);
if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(undefined);
return result.constant;
}
 
@@ -11141,22 +11194,6 @@ fn castConstAssumeCapacity(self: *Builder, tag: Constant.Tag, val: Constant, ty:
.tag = tag,
.data = self.addConstantExtraAssumeCapacity(data.cast),
});
if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(switch (tag) {
.trunc => &llvm.Value.constTrunc,
.zext => &llvm.Value.constZExt,
.sext => &llvm.Value.constSExt,
.fptrunc => &llvm.Value.constFPTrunc,
.fpext => &llvm.Value.constFPExt,
.fptoui => &llvm.Value.constFPToUI,
.fptosi => &llvm.Value.constFPToSI,
.uitofp => &llvm.Value.constUIToFP,
.sitofp => &llvm.Value.constSIToFP,
.ptrtoint => &llvm.Value.constPtrToInt,
.inttoptr => &llvm.Value.constIntToPtr,
.bitcast => &llvm.Value.constBitCast,
.addrspacecast => &llvm.Value.constAddrSpaceCast,
else => unreachable,
}(val.toLlvm(self), ty.toLlvm(self)));
}
return @enumFromInt(gop.index);
}
@@ -11168,7 +11205,7 @@ fn gepConstAssumeCapacity(
base: Constant,
inrange: ?u16,
indices: []const Constant,
) (if (build_options.have_llvm) Allocator.Error else error{})!Constant {
) Constant {
const tag: Constant.Tag = switch (kind) {
.normal => .getelementptr,
.inbounds => .@"getelementptr inbounds",
@@ -11249,21 +11286,6 @@ fn gepConstAssumeCapacity(
}),
});
self.constant_extra.appendSliceAssumeCapacity(@ptrCast(indices));
if (self.useLibLlvm()) {
const ExpectedContents = [expected_gep_indices_len]*llvm.Value;
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
const allocator = stack.get();
 
const llvm_indices = try allocator.alloc(*llvm.Value, indices.len);
defer allocator.free(llvm_indices);
for (llvm_indices, indices) |*llvm_index, index| llvm_index.* = index.toLlvm(self);
 
self.llvm.constants.appendAssumeCapacity(switch (kind) {
.normal => &llvm.Type.constGEP,
.inbounds => &llvm.Type.constInBoundsGEP,
}(ty.toLlvm(self), base.toLlvm(self), llvm_indices.ptr, @intCast(llvm_indices.len)));
}
}
return @enumFromInt(gop.index);
}
@@ -11298,9 +11320,6 @@ fn icmpConstAssumeCapacity(
.tag = .icmp,
.data = self.addConstantExtraAssumeCapacity(data),
});
if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
llvm.constICmp(cond.toLlvm(), lhs.toLlvm(self), rhs.toLlvm(self)),
);
}
return @enumFromInt(gop.index);
}
@@ -11335,9 +11354,6 @@ fn fcmpConstAssumeCapacity(
.tag = .fcmp,
.data = self.addConstantExtraAssumeCapacity(data),
});
if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
llvm.constFCmp(cond.toLlvm(), lhs.toLlvm(self), rhs.toLlvm(self)),
);
}
return @enumFromInt(gop.index);
}
@@ -11371,9 +11387,6 @@ fn extractElementConstAssumeCapacity(
.tag = .extractelement,
.data = self.addConstantExtraAssumeCapacity(data),
});
if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
val.toLlvm(self).constExtractElement(index.toLlvm(self)),
);
}
return @enumFromInt(gop.index);
}
@@ -11408,9 +11421,6 @@ fn insertElementConstAssumeCapacity(
.tag = .insertelement,
.data = self.addConstantExtraAssumeCapacity(data),
});
if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
val.toLlvm(self).constInsertElement(elem.toLlvm(self), index.toLlvm(self)),
);
}
return @enumFromInt(gop.index);
}
@@ -11449,9 +11459,6 @@ fn shuffleVectorConstAssumeCapacity(
.tag = .shufflevector,
.data = self.addConstantExtraAssumeCapacity(data),
});
if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
lhs.toLlvm(self).constShuffleVector(rhs.toLlvm(self), mask.toLlvm(self)),
);
}
return @enumFromInt(gop.index);
}
@@ -11506,18 +11513,6 @@ fn binConstAssumeCapacity(
.tag = tag,
.data = self.addConstantExtraAssumeCapacity(data.extra),
});
if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(switch (tag) {
.add => &llvm.Value.constAdd,
.sub => &llvm.Value.constSub,
.mul => &llvm.Value.constMul,
.shl => &llvm.Value.constShl,
.lshr => &llvm.Value.constLShr,
.ashr => &llvm.Value.constAShr,
.@"and" => &llvm.Value.constAnd,
.@"or" => &llvm.Value.constOr,
.xor => &llvm.Value.constXor,
else => unreachable,
}(lhs.toLlvm(self), rhs.toLlvm(self)));
}
return @enumFromInt(gop.index);
}
@@ -11560,21 +11555,6 @@ fn asmConstAssumeCapacity(
.tag = data.tag,
.data = self.addConstantExtraAssumeCapacity(data.extra),
});
if (self.useLibLlvm()) {
const assembly_slice = assembly.slice(self).?;
const constraints_slice = constraints.slice(self).?;
self.llvm.constants.appendAssumeCapacity(llvm.getInlineAsm(
ty.toLlvm(self),
assembly_slice.ptr,
assembly_slice.len,
constraints_slice.ptr,
constraints_slice.len,
llvm.Bool.fromBool(info.sideeffect),
llvm.Bool.fromBool(info.alignstack),
if (info.inteldialect) .Intel else .ATT,
llvm.Bool.fromBool(info.unwind),
));
}
}
return @enumFromInt(gop.index);
}
@@ -11591,7 +11571,6 @@ fn ensureUnusedConstantCapacity(
self.gpa,
count * (@typeInfo(Extra).Struct.fields.len + trail_len),
);
if (self.useLibLlvm()) try self.llvm.constants.ensureUnusedCapacity(self.gpa, count);
}
 
fn getOrPutConstantNoExtraAssumeCapacity(
@@ -11722,15 +11701,3260 @@ fn constantExtraData(self: *const Builder, comptime T: type, index: Constant.Ite
return self.constantExtraDataTrail(T, index).data;
}
 
const assert = std.debug.assert;
const build_options = @import("build_options");
const builtin = @import("builtin");
const llvm = if (build_options.have_llvm)
@import("bindings.zig")
else
@compileError("LLVM unavailable");
const log = std.log.scoped(.llvm);
const std = @import("std");
fn ensureUnusedMetadataCapacity(
self: *Builder,
count: usize,
comptime Extra: type,
trail_len: usize,
) Allocator.Error!void {
try self.metadata_map.ensureUnusedCapacity(self.gpa, count);
try self.metadata_items.ensureUnusedCapacity(self.gpa, count);
try self.metadata_extra.ensureUnusedCapacity(
self.gpa,
count * (@typeInfo(Extra).Struct.fields.len + trail_len),
);
}
 
fn addMetadataExtraAssumeCapacity(self: *Builder, extra: anytype) Metadata.Item.ExtraIndex {
const result: Metadata.Item.ExtraIndex = @intCast(self.metadata_extra.items.len);
inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
const value = @field(extra, field.name);
self.metadata_extra.appendAssumeCapacity(switch (field.type) {
u32 => value,
MetadataString, Metadata, Variable.Index, Value => @intFromEnum(value),
Metadata.DIFlags => @bitCast(value),
else => @compileError("bad field type: " ++ @typeName(field.type)),
});
}
return result;
}
 
const MetadataExtraDataTrail = struct {
index: Metadata.Item.ExtraIndex,
 
fn nextMut(self: *MetadataExtraDataTrail, len: u32, comptime Item: type, builder: *Builder) []Item {
const items: []Item = @ptrCast(builder.metadata_extra.items[self.index..][0..len]);
self.index += @intCast(len);
return items;
}
 
fn next(
self: *MetadataExtraDataTrail,
len: u32,
comptime Item: type,
builder: *const Builder,
) []const Item {
const items: []const Item = @ptrCast(builder.metadata_extra.items[self.index..][0..len]);
self.index += @intCast(len);
return items;
}
};
 
fn metadataExtraDataTrail(
self: *const Builder,
comptime T: type,
index: Metadata.Item.ExtraIndex,
) struct { data: T, trail: MetadataExtraDataTrail } {
var result: T = undefined;
const fields = @typeInfo(T).Struct.fields;
inline for (fields, self.metadata_extra.items[index..][0..fields.len]) |field, value|
@field(result, field.name) = switch (field.type) {
u32 => value,
MetadataString, Metadata, Variable.Index, Value => @enumFromInt(value),
Metadata.DIFlags => @bitCast(value),
else => @compileError("bad field type: " ++ @typeName(field.type)),
};
return .{
.data = result,
.trail = .{ .index = index + @as(Metadata.Item.ExtraIndex, @intCast(fields.len)) },
};
}
 
fn metadataExtraData(self: *const Builder, comptime T: type, index: Metadata.Item.ExtraIndex) T {
return self.metadataExtraDataTrail(T, index).data;
}
 
pub fn metadataString(self: *Builder, bytes: []const u8) Allocator.Error!MetadataString {
try self.metadata_string_bytes.ensureUnusedCapacity(self.gpa, bytes.len);
try self.metadata_string_indices.ensureUnusedCapacity(self.gpa, 1);
try self.metadata_string_map.ensureUnusedCapacity(self.gpa, 1);
 
const gop = self.metadata_string_map.getOrPutAssumeCapacityAdapted(
bytes,
MetadataString.Adapter{ .builder = self },
);
if (!gop.found_existing) {
self.metadata_string_bytes.appendSliceAssumeCapacity(bytes);
self.metadata_string_indices.appendAssumeCapacity(@intCast(self.metadata_string_bytes.items.len));
}
return @enumFromInt(gop.index);
}
 
pub fn metadataStringFromString(self: *Builder, str: String) Allocator.Error!MetadataString {
if (str == .none or str == .empty) return MetadataString.none;
return try self.metadataString(str.slice(self).?);
}
 
pub fn metadataStringFmt(self: *Builder, comptime fmt_str: []const u8, fmt_args: anytype) Allocator.Error!MetadataString {
try self.metadata_string_map.ensureUnusedCapacity(self.gpa, 1);
try self.metadata_string_bytes.ensureUnusedCapacity(self.gpa, @intCast(std.fmt.count(fmt_str, fmt_args)));
try self.metadata_string_indices.ensureUnusedCapacity(self.gpa, 1);
return self.metadataStringFmtAssumeCapacity(fmt_str, fmt_args);
}
 
pub fn metadataStringFmtAssumeCapacity(self: *Builder, comptime fmt_str: []const u8, fmt_args: anytype) MetadataString {
self.metadata_string_bytes.writer(undefined).print(fmt_str, fmt_args) catch unreachable;
return self.trailingMetadataStringAssumeCapacity();
}
 
pub fn trailingMetadataString(self: *Builder) Allocator.Error!MetadataString {
try self.metadata_string_indices.ensureUnusedCapacity(self.gpa, 1);
try self.metadata_string_map.ensureUnusedCapacity(self.gpa, 1);
return self.trailingMetadataStringAssumeCapacity();
}
 
pub fn trailingMetadataStringAssumeCapacity(self: *Builder) MetadataString {
const start = self.metadata_string_indices.getLast();
const bytes: []const u8 = self.metadata_string_bytes.items[start..];
const gop = self.metadata_string_map.getOrPutAssumeCapacityAdapted(bytes, String.Adapter{ .builder = self });
if (gop.found_existing) {
self.metadata_string_bytes.shrinkRetainingCapacity(start);
} else {
self.metadata_string_indices.appendAssumeCapacity(@intCast(self.metadata_string_bytes.items.len));
}
return @enumFromInt(gop.index);
}
 
pub fn debugNamed(self: *Builder, name: MetadataString, operands: []const Metadata) Allocator.Error!void {
try self.metadata_extra.ensureUnusedCapacity(self.gpa, operands.len);
try self.metadata_named.ensureUnusedCapacity(self.gpa, 1);
self.debugNamedAssumeCapacity(name, operands);
}
 
fn debugNone(self: *Builder) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, NoExtra, 0);
return self.debugNoneAssumeCapacity();
}
 
pub fn debugFile(
self: *Builder,
filename: MetadataString,
directory: MetadataString,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.File, 0);
return self.debugFileAssumeCapacity(filename, directory);
}
 
pub fn debugCompileUnit(
self: *Builder,
file: Metadata,
producer: MetadataString,
enums: Metadata,
globals: Metadata,
options: Metadata.CompileUnit.Options,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.CompileUnit, 0);
return self.debugCompileUnitAssumeCapacity(file, producer, enums, globals, options);
}
 
pub fn debugSubprogram(
self: *Builder,
file: Metadata,
name: MetadataString,
linkage_name: MetadataString,
line: u32,
scope_line: u32,
ty: Metadata,
options: Metadata.Subprogram.Options,
compile_unit: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.Subprogram, 0);
return self.debugSubprogramAssumeCapacity(
file,
name,
linkage_name,
line,
scope_line,
ty,
options,
compile_unit,
);
}
 
pub fn debugLexicalBlock(self: *Builder, scope: Metadata, file: Metadata, line: u32, column: u32) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.LexicalBlock, 0);
return self.debugLexicalBlockAssumeCapacity(scope, file, line, column);
}
 
pub fn debugLocation(self: *Builder, line: u32, column: u32, scope: Metadata, inlined_at: Metadata) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.Location, 0);
return self.debugLocationAssumeCapacity(line, column, scope, inlined_at);
}
 
pub fn debugBoolType(self: *Builder, name: MetadataString, size_in_bits: u64) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.BasicType, 0);
return self.debugBoolTypeAssumeCapacity(name, size_in_bits);
}
 
pub fn debugUnsignedType(self: *Builder, name: MetadataString, size_in_bits: u64) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.BasicType, 0);
return self.debugUnsignedTypeAssumeCapacity(name, size_in_bits);
}
 
pub fn debugSignedType(self: *Builder, name: MetadataString, size_in_bits: u64) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.BasicType, 0);
return self.debugSignedTypeAssumeCapacity(name, size_in_bits);
}
 
pub fn debugFloatType(self: *Builder, name: MetadataString, size_in_bits: u64) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.BasicType, 0);
return self.debugFloatTypeAssumeCapacity(name, size_in_bits);
}
 
pub fn debugForwardReference(self: *Builder) Allocator.Error!Metadata {
try self.metadata_forward_references.ensureUnusedCapacity(self.gpa, 1);
return self.debugForwardReferenceAssumeCapacity();
}
 
pub fn debugStructType(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
fields_tuple: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.CompositeType, 0);
return self.debugStructTypeAssumeCapacity(
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
fields_tuple,
);
}
 
pub fn debugUnionType(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
fields_tuple: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.CompositeType, 0);
return self.debugUnionTypeAssumeCapacity(
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
fields_tuple,
);
}
 
pub fn debugEnumerationType(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
fields_tuple: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.CompositeType, 0);
return self.debugEnumerationTypeAssumeCapacity(
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
fields_tuple,
);
}
 
pub fn debugArrayType(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
fields_tuple: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.CompositeType, 0);
return self.debugArrayTypeAssumeCapacity(
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
fields_tuple,
);
}
 
pub fn debugVectorType(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
fields_tuple: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.CompositeType, 0);
return self.debugVectorTypeAssumeCapacity(
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
fields_tuple,
);
}
 
pub fn debugPointerType(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
offset_in_bits: u64,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.DerivedType, 0);
return self.debugPointerTypeAssumeCapacity(
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
offset_in_bits,
);
}
 
pub fn debugMemberType(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
offset_in_bits: u64,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.DerivedType, 0);
return self.debugMemberTypeAssumeCapacity(
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
offset_in_bits,
);
}
 
pub fn debugSubroutineType(
self: *Builder,
types_tuple: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.SubroutineType, 0);
return self.debugSubroutineTypeAssumeCapacity(types_tuple);
}
 
pub fn debugEnumerator(
self: *Builder,
name: MetadataString,
unsigned: bool,
bit_width: u32,
value: std.math.big.int.Const,
) Allocator.Error!Metadata {
assert(!(unsigned and !value.positive));
try self.ensureUnusedMetadataCapacity(1, Metadata.Enumerator, 0);
try self.metadata_limbs.ensureUnusedCapacity(self.gpa, value.limbs.len);
return self.debugEnumeratorAssumeCapacity(name, unsigned, bit_width, value);
}
 
pub fn debugSubrange(
self: *Builder,
lower_bound: Metadata,
count: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.Subrange, 0);
return self.debugSubrangeAssumeCapacity(lower_bound, count);
}
 
pub fn debugExpression(
self: *Builder,
elements: []const u32,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.Expression, elements.len * @sizeOf(u32));
return self.debugExpressionAssumeCapacity(elements);
}
 
pub fn debugTuple(
self: *Builder,
elements: []const Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.Tuple, elements.len * @sizeOf(Metadata));
return self.debugTupleAssumeCapacity(elements);
}
 
pub fn debugModuleFlag(
self: *Builder,
behavior: Metadata,
name: MetadataString,
constant: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.ModuleFlag, 0);
return self.debugModuleFlagAssumeCapacity(behavior, name, constant);
}
 
pub fn debugLocalVar(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
ty: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.LocalVar, 0);
return self.debugLocalVarAssumeCapacity(name, file, scope, line, ty);
}
 
pub fn debugParameter(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
ty: Metadata,
arg_no: u32,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.Parameter, 0);
return self.debugParameterAssumeCapacity(name, file, scope, line, ty, arg_no);
}
 
pub fn debugGlobalVar(
self: *Builder,
name: MetadataString,
linkage_name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
ty: Metadata,
variable: Variable.Index,
options: Metadata.GlobalVar.Options,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.GlobalVar, 0);
return self.debugGlobalVarAssumeCapacity(
name,
linkage_name,
file,
scope,
line,
ty,
variable,
options,
);
}
 
pub fn debugGlobalVarExpression(
self: *Builder,
variable: Metadata,
expression: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.GlobalVarExpression, 0);
return self.debugGlobalVarExpressionAssumeCapacity(variable, expression);
}
 
pub fn debugConstant(self: *Builder, value: Constant) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, NoExtra, 0);
return self.debugConstantAssumeCapacity(value);
}
 
pub fn debugForwardReferenceSetType(self: *Builder, fwd_ref: Metadata, ty: Metadata) void {
assert(
@intFromEnum(fwd_ref) >= Metadata.first_forward_reference and
@intFromEnum(fwd_ref) <= Metadata.first_local_metadata,
);
const index = @intFromEnum(fwd_ref) - Metadata.first_forward_reference;
self.metadata_forward_references.items[index] = ty;
}
 
fn metadataSimpleAssumeCapacity(self: *Builder, tag: Metadata.Tag, value: anytype) Metadata {
const Key = struct {
tag: Metadata.Tag,
value: @TypeOf(value),
};
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(key.tag)));
inline for (std.meta.fields(@TypeOf(value))) |field| {
hasher.update(std.mem.asBytes(&@field(key.value, field.name)));
}
return @truncate(hasher.final());
}
 
pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
if (lhs_key.tag != ctx.builder.metadata_items.items(.tag)[rhs_index]) return false;
const rhs_data = ctx.builder.metadata_items.items(.data)[rhs_index];
const rhs_extra = ctx.builder.metadataExtraData(@TypeOf(value), rhs_data);
return std.meta.eql(lhs_key.value, rhs_extra);
}
};
 
const gop = self.metadata_map.getOrPutAssumeCapacityAdapted(
Key{ .tag = tag, .value = value },
Adapter{ .builder = self },
);
 
if (!gop.found_existing) {
gop.key_ptr.* = {};
gop.value_ptr.* = {};
self.metadata_items.appendAssumeCapacity(.{
.tag = tag,
.data = self.addMetadataExtraAssumeCapacity(value),
});
}
return @enumFromInt(gop.index);
}
 
fn metadataDistinctAssumeCapacity(self: *Builder, tag: Metadata.Tag, value: anytype) Metadata {
const Key = struct { tag: Metadata.Tag, index: Metadata };
const Adapter = struct {
pub fn hash(_: @This(), key: Key) u32 {
return @truncate(std.hash.Wyhash.hash(
std.hash.uint32(@intFromEnum(key.tag)),
std.mem.asBytes(&key.index),
));
}
 
pub fn eql(_: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
return @intFromEnum(lhs_key.index) == rhs_index;
}
};
 
const gop = self.metadata_map.getOrPutAssumeCapacityAdapted(
Key{ .tag = tag, .index = @enumFromInt(self.metadata_map.count()) },
Adapter{},
);
 
if (!gop.found_existing) {
gop.key_ptr.* = {};
gop.value_ptr.* = {};
self.metadata_items.appendAssumeCapacity(.{
.tag = tag,
.data = self.addMetadataExtraAssumeCapacity(value),
});
}
return @enumFromInt(gop.index);
}
 
fn debugNamedAssumeCapacity(self: *Builder, name: MetadataString, operands: []const Metadata) void {
assert(!self.strip);
assert(name != .none);
const extra_index: u32 = @intCast(self.metadata_extra.items.len);
self.metadata_extra.appendSliceAssumeCapacity(@ptrCast(operands));
 
const gop = self.metadata_named.getOrPutAssumeCapacity(name);
gop.value_ptr.* = .{
.index = extra_index,
.len = @intCast(operands.len),
};
}
 
pub fn debugNoneAssumeCapacity(self: *Builder) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.none, .{});
}
 
fn debugFileAssumeCapacity(
self: *Builder,
filename: MetadataString,
directory: MetadataString,
) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.file, Metadata.File{
.filename = filename,
.directory = directory,
});
}
 
pub fn debugCompileUnitAssumeCapacity(
self: *Builder,
file: Metadata,
producer: MetadataString,
enums: Metadata,
globals: Metadata,
options: Metadata.CompileUnit.Options,
) Metadata {
assert(!self.strip);
return self.metadataDistinctAssumeCapacity(
if (options.optimized) .@"compile_unit optimized" else .compile_unit,
Metadata.CompileUnit{
.file = file,
.producer = producer,
.enums = enums,
.globals = globals,
},
);
}
 
fn debugSubprogramAssumeCapacity(
self: *Builder,
file: Metadata,
name: MetadataString,
linkage_name: MetadataString,
line: u32,
scope_line: u32,
ty: Metadata,
options: Metadata.Subprogram.Options,
compile_unit: Metadata,
) Metadata {
assert(!self.strip);
const tag: Metadata.Tag = @enumFromInt(@intFromEnum(Metadata.Tag.subprogram) +
@as(u3, @truncate(@as(u32, @bitCast(options.sp_flags)) >> 2)));
return self.metadataDistinctAssumeCapacity(tag, Metadata.Subprogram{
.file = file,
.name = name,
.linkage_name = linkage_name,
.line = line,
.scope_line = scope_line,
.ty = ty,
.di_flags = options.di_flags,
.compile_unit = compile_unit,
});
}
 
fn debugLexicalBlockAssumeCapacity(self: *Builder, scope: Metadata, file: Metadata, line: u32, column: u32) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.lexical_block, Metadata.LexicalBlock{
.scope = scope,
.file = file,
.line = line,
.column = column,
});
}
 
fn debugLocationAssumeCapacity(self: *Builder, line: u32, column: u32, scope: Metadata, inlined_at: Metadata) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.location, Metadata.Location{
.line = line,
.column = column,
.scope = scope,
.inlined_at = inlined_at,
});
}
 
fn debugBoolTypeAssumeCapacity(self: *Builder, name: MetadataString, size_in_bits: u64) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.basic_bool_type, Metadata.BasicType{
.name = name,
.size_in_bits_lo = @truncate(size_in_bits),
.size_in_bits_hi = @truncate(size_in_bits >> 32),
});
}
 
fn debugUnsignedTypeAssumeCapacity(self: *Builder, name: MetadataString, size_in_bits: u64) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.basic_unsigned_type, Metadata.BasicType{
.name = name,
.size_in_bits_lo = @truncate(size_in_bits),
.size_in_bits_hi = @truncate(size_in_bits >> 32),
});
}
 
fn debugSignedTypeAssumeCapacity(self: *Builder, name: MetadataString, size_in_bits: u64) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.basic_signed_type, Metadata.BasicType{
.name = name,
.size_in_bits_lo = @truncate(size_in_bits),
.size_in_bits_hi = @truncate(size_in_bits >> 32),
});
}
 
fn debugFloatTypeAssumeCapacity(self: *Builder, name: MetadataString, size_in_bits: u64) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.basic_float_type, Metadata.BasicType{
.name = name,
.size_in_bits_lo = @truncate(size_in_bits),
.size_in_bits_hi = @truncate(size_in_bits >> 32),
});
}
 
fn debugForwardReferenceAssumeCapacity(self: *Builder) Metadata {
assert(!self.strip);
const index = Metadata.first_forward_reference + self.metadata_forward_references.items.len;
self.metadata_forward_references.appendAssumeCapacity(.none);
return @enumFromInt(index);
}
 
fn debugStructTypeAssumeCapacity(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
fields_tuple: Metadata,
) Metadata {
assert(!self.strip);
return self.debugCompositeTypeAssumeCapacity(
.composite_struct_type,
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
fields_tuple,
);
}
 
fn debugUnionTypeAssumeCapacity(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
fields_tuple: Metadata,
) Metadata {
assert(!self.strip);
return self.debugCompositeTypeAssumeCapacity(
.composite_union_type,
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
fields_tuple,
);
}
 
fn debugEnumerationTypeAssumeCapacity(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
fields_tuple: Metadata,
) Metadata {
assert(!self.strip);
return self.debugCompositeTypeAssumeCapacity(
.composite_enumeration_type,
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
fields_tuple,
);
}
 
fn debugArrayTypeAssumeCapacity(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
fields_tuple: Metadata,
) Metadata {
assert(!self.strip);
return self.debugCompositeTypeAssumeCapacity(
.composite_array_type,
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
fields_tuple,
);
}
 
fn debugVectorTypeAssumeCapacity(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
fields_tuple: Metadata,
) Metadata {
assert(!self.strip);
return self.debugCompositeTypeAssumeCapacity(
.composite_vector_type,
name,
file,
scope,
line,
underlying_type,
size_in_bits,
align_in_bits,
fields_tuple,
);
}
 
fn debugCompositeTypeAssumeCapacity(
self: *Builder,
tag: Metadata.Tag,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
fields_tuple: Metadata,
) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(tag, Metadata.CompositeType{
.name = name,
.file = file,
.scope = scope,
.line = line,
.underlying_type = underlying_type,
.size_in_bits_lo = @truncate(size_in_bits),
.size_in_bits_hi = @truncate(size_in_bits >> 32),
.align_in_bits_lo = @truncate(align_in_bits),
.align_in_bits_hi = @truncate(align_in_bits >> 32),
.fields_tuple = fields_tuple,
});
}
 
fn debugPointerTypeAssumeCapacity(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
offset_in_bits: u64,
) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.derived_pointer_type, Metadata.DerivedType{
.name = name,
.file = file,
.scope = scope,
.line = line,
.underlying_type = underlying_type,
.size_in_bits_lo = @truncate(size_in_bits),
.size_in_bits_hi = @truncate(size_in_bits >> 32),
.align_in_bits_lo = @truncate(align_in_bits),
.align_in_bits_hi = @truncate(align_in_bits >> 32),
.offset_in_bits_lo = @truncate(offset_in_bits),
.offset_in_bits_hi = @truncate(offset_in_bits >> 32),
});
}
 
fn debugMemberTypeAssumeCapacity(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
underlying_type: Metadata,
size_in_bits: u64,
align_in_bits: u64,
offset_in_bits: u64,
) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.derived_member_type, Metadata.DerivedType{
.name = name,
.file = file,
.scope = scope,
.line = line,
.underlying_type = underlying_type,
.size_in_bits_lo = @truncate(size_in_bits),
.size_in_bits_hi = @truncate(size_in_bits >> 32),
.align_in_bits_lo = @truncate(align_in_bits),
.align_in_bits_hi = @truncate(align_in_bits >> 32),
.offset_in_bits_lo = @truncate(offset_in_bits),
.offset_in_bits_hi = @truncate(offset_in_bits >> 32),
});
}
 
fn debugSubroutineTypeAssumeCapacity(
self: *Builder,
types_tuple: Metadata,
) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.subroutine_type, Metadata.SubroutineType{
.types_tuple = types_tuple,
});
}
 
fn debugEnumeratorAssumeCapacity(
self: *Builder,
name: MetadataString,
unsigned: bool,
bit_width: u32,
value: std.math.big.int.Const,
) Metadata {
assert(!self.strip);
const Key = struct {
tag: Metadata.Tag,
name: MetadataString,
bit_width: u32,
value: std.math.big.int.Const,
};
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(key.tag)));
hasher.update(std.mem.asBytes(&key.name));
hasher.update(std.mem.asBytes(&key.bit_width));
hasher.update(std.mem.sliceAsBytes(key.value.limbs));
return @truncate(hasher.final());
}
 
pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
if (lhs_key.tag != ctx.builder.metadata_items.items(.tag)[rhs_index]) return false;
const rhs_data = ctx.builder.metadata_items.items(.data)[rhs_index];
const rhs_extra = ctx.builder.metadataExtraData(Metadata.Enumerator, rhs_data);
const limbs = ctx.builder.metadata_limbs
.items[rhs_extra.limbs_index..][0..rhs_extra.limbs_len];
const rhs_value = std.math.big.int.Const{
.limbs = limbs,
.positive = lhs_key.value.positive,
};
return lhs_key.name == rhs_extra.name and
lhs_key.bit_width == rhs_extra.bit_width and
lhs_key.value.eql(rhs_value);
}
};
 
const tag: Metadata.Tag = if (unsigned)
.enumerator_unsigned
else if (value.positive)
.enumerator_signed_positive
else
.enumerator_signed_negative;
 
assert(!(tag == .enumerator_unsigned and !value.positive));
 
const gop = self.metadata_map.getOrPutAssumeCapacityAdapted(
Key{
.tag = tag,
.name = name,
.bit_width = bit_width,
.value = value,
},
Adapter{ .builder = self },
);
 
if (!gop.found_existing) {
gop.key_ptr.* = {};
gop.value_ptr.* = {};
self.metadata_items.appendAssumeCapacity(.{
.tag = tag,
.data = self.addMetadataExtraAssumeCapacity(Metadata.Enumerator{
.name = name,
.bit_width = bit_width,
.limbs_index = @intCast(self.metadata_limbs.items.len),
.limbs_len = @intCast(value.limbs.len),
}),
});
self.metadata_limbs.appendSliceAssumeCapacity(value.limbs);
}
return @enumFromInt(gop.index);
}
 
fn debugSubrangeAssumeCapacity(
self: *Builder,
lower_bound: Metadata,
count: Metadata,
) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.subrange, Metadata.Subrange{
.lower_bound = lower_bound,
.count = count,
});
}
 
fn debugExpressionAssumeCapacity(
self: *Builder,
elements: []const u32,
) Metadata {
assert(!self.strip);
const Key = struct {
elements: []const u32,
};
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = comptime std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(Metadata.Tag.expression)));
hasher.update(std.mem.sliceAsBytes(key.elements));
return @truncate(hasher.final());
}
 
pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
if (Metadata.Tag.expression != ctx.builder.metadata_items.items(.tag)[rhs_index]) return false;
const rhs_data = ctx.builder.metadata_items.items(.data)[rhs_index];
var rhs_extra = ctx.builder.metadataExtraDataTrail(Metadata.Expression, rhs_data);
return std.mem.eql(
u32,
lhs_key.elements,
rhs_extra.trail.next(rhs_extra.data.elements_len, u32, ctx.builder),
);
}
};
 
const gop = self.metadata_map.getOrPutAssumeCapacityAdapted(
Key{ .elements = elements },
Adapter{ .builder = self },
);
 
if (!gop.found_existing) {
gop.key_ptr.* = {};
gop.value_ptr.* = {};
self.metadata_items.appendAssumeCapacity(.{
.tag = .expression,
.data = self.addMetadataExtraAssumeCapacity(Metadata.Expression{
.elements_len = @intCast(elements.len),
}),
});
self.metadata_extra.appendSliceAssumeCapacity(@ptrCast(elements));
}
return @enumFromInt(gop.index);
}
 
fn debugTupleAssumeCapacity(
self: *Builder,
elements: []const Metadata,
) Metadata {
assert(!self.strip);
const Key = struct {
elements: []const Metadata,
};
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = comptime std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(Metadata.Tag.tuple)));
hasher.update(std.mem.sliceAsBytes(key.elements));
return @truncate(hasher.final());
}
 
pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
if (Metadata.Tag.tuple != ctx.builder.metadata_items.items(.tag)[rhs_index]) return false;
const rhs_data = ctx.builder.metadata_items.items(.data)[rhs_index];
var rhs_extra = ctx.builder.metadataExtraDataTrail(Metadata.Tuple, rhs_data);
return std.mem.eql(
Metadata,
lhs_key.elements,
rhs_extra.trail.next(rhs_extra.data.elements_len, Metadata, ctx.builder),
);
}
};
 
const gop = self.metadata_map.getOrPutAssumeCapacityAdapted(
Key{ .elements = elements },
Adapter{ .builder = self },
);
 
if (!gop.found_existing) {
gop.key_ptr.* = {};
gop.value_ptr.* = {};
self.metadata_items.appendAssumeCapacity(.{
.tag = .tuple,
.data = self.addMetadataExtraAssumeCapacity(Metadata.Tuple{
.elements_len = @intCast(elements.len),
}),
});
self.metadata_extra.appendSliceAssumeCapacity(@ptrCast(elements));
}
return @enumFromInt(gop.index);
}
 
fn debugModuleFlagAssumeCapacity(
self: *Builder,
behavior: Metadata,
name: MetadataString,
constant: Metadata,
) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.module_flag, Metadata.ModuleFlag{
.behavior = behavior,
.name = name,
.constant = constant,
});
}
 
fn debugLocalVarAssumeCapacity(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
ty: Metadata,
) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.local_var, Metadata.LocalVar{
.name = name,
.file = file,
.scope = scope,
.line = line,
.ty = ty,
});
}
 
fn debugParameterAssumeCapacity(
self: *Builder,
name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
ty: Metadata,
arg_no: u32,
) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.parameter, Metadata.Parameter{
.name = name,
.file = file,
.scope = scope,
.line = line,
.ty = ty,
.arg_no = arg_no,
});
}
 
fn debugGlobalVarAssumeCapacity(
self: *Builder,
name: MetadataString,
linkage_name: MetadataString,
file: Metadata,
scope: Metadata,
line: u32,
ty: Metadata,
variable: Variable.Index,
options: Metadata.GlobalVar.Options,
) Metadata {
assert(!self.strip);
return self.metadataDistinctAssumeCapacity(
if (options.local) .@"global_var local" else .global_var,
Metadata.GlobalVar{
.name = name,
.linkage_name = linkage_name,
.file = file,
.scope = scope,
.line = line,
.ty = ty,
.variable = variable,
},
);
}
 
fn debugGlobalVarExpressionAssumeCapacity(
self: *Builder,
variable: Metadata,
expression: Metadata,
) Metadata {
assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.global_var_expression, Metadata.GlobalVarExpression{
.variable = variable,
.expression = expression,
});
}
 
fn debugConstantAssumeCapacity(self: *Builder, constant: Constant) Metadata {
assert(!self.strip);
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Constant) u32 {
var hasher = comptime std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(Metadata.Tag.constant)));
hasher.update(std.mem.asBytes(&key));
return @truncate(hasher.final());
}
 
pub fn eql(ctx: @This(), lhs_key: Constant, _: void, rhs_index: usize) bool {
if (Metadata.Tag.constant != ctx.builder.metadata_items.items(.tag)[rhs_index]) return false;
const rhs_data: Constant = @enumFromInt(ctx.builder.metadata_items.items(.data)[rhs_index]);
return rhs_data == lhs_key;
}
};
 
const gop = self.metadata_map.getOrPutAssumeCapacityAdapted(
constant,
Adapter{ .builder = self },
);
 
if (!gop.found_existing) {
gop.key_ptr.* = {};
gop.value_ptr.* = {};
self.metadata_items.appendAssumeCapacity(.{
.tag = .constant,
.data = @intFromEnum(constant),
});
}
return @enumFromInt(gop.index);
}
 
pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]const u32 {
const BitcodeWriter = bitcode_writer.BitcodeWriter(&.{ Type, FunctionAttributes });
var bitcode = BitcodeWriter.init(allocator, .{
std.math.log2_int_ceil(usize, self.type_items.items.len),
std.math.log2_int_ceil(usize, 1 + self.function_attributes_set.count()),
});
errdefer bitcode.deinit();
 
// Write LLVM IR magic
try bitcode.writeBits(ir.MAGIC, 32);
 
var record: std.ArrayListUnmanaged(u64) = .{};
defer record.deinit(self.gpa);
 
// IDENTIFICATION_BLOCK
{
const Identification = ir.Identification;
var identification_block = try bitcode.enterTopBlock(Identification);
 
const producer = try std.fmt.allocPrint(self.gpa, "zig {d}.{d}.{d}", .{
build_options.semver.major,
build_options.semver.minor,
build_options.semver.patch,
});
defer self.gpa.free(producer);
 
try identification_block.writeAbbrev(Identification.Version{ .string = producer });
try identification_block.writeAbbrev(Identification.Epoch{ .epoch = 0 });
 
try identification_block.end();
}
 
// MODULE_BLOCK
{
const Module = ir.Module;
var module_block = try bitcode.enterTopBlock(Module);
 
try module_block.writeAbbrev(Module.Version{});
 
if (self.target_triple.slice(self)) |triple| {
try module_block.writeAbbrev(Module.String{
.code = 2,
.string = triple,
});
}
 
if (self.data_layout.slice(self)) |data_layout| {
try module_block.writeAbbrev(Module.String{
.code = 3,
.string = data_layout,
});
}
 
if (self.source_filename.slice(self)) |source_filename| {
try module_block.writeAbbrev(Module.String{
.code = 16,
.string = source_filename,
});
}
 
if (self.module_asm.items.len != 0) {
try module_block.writeAbbrev(Module.String{
.code = 4,
.string = self.module_asm.items,
});
}
 
// TYPE_BLOCK
{
var type_block = try module_block.enterSubBlock(ir.Type);
 
try type_block.writeAbbrev(ir.Type.NumEntry{ .num = @intCast(self.type_items.items.len) });
 
for (self.type_items.items, 0..) |item, i| {
const ty: Type = @enumFromInt(i);
 
switch (item.tag) {
.simple => try type_block.writeAbbrev(ir.Type.Simple{ .code = @truncate(item.data) }),
.integer => try type_block.writeAbbrev(ir.Type.Integer{ .width = item.data }),
.structure,
.packed_structure,
=> |kind| {
const is_packed = switch (kind) {
.structure => false,
.packed_structure => true,
else => unreachable,
};
var extra = self.typeExtraDataTrail(Type.Structure, item.data);
try type_block.writeAbbrev(ir.Type.StructAnon{
.is_packed = is_packed,
.types = extra.trail.next(extra.data.fields_len, Type, self),
});
},
.named_structure => {
const extra = self.typeExtraData(Type.NamedStructure, item.data);
try type_block.writeAbbrev(ir.Type.StructName{
.string = extra.id.slice(self).?,
});
 
switch (extra.body) {
.none => try type_block.writeAbbrev(ir.Type.Opaque{}),
else => {
const real_struct = self.type_items.items[@intFromEnum(extra.body)];
const is_packed: bool = switch (real_struct.tag) {
.structure => false,
.packed_structure => true,
else => unreachable,
};
 
var real_extra = self.typeExtraDataTrail(Type.Structure, real_struct.data);
try type_block.writeAbbrev(ir.Type.StructNamed{
.is_packed = is_packed,
.types = real_extra.trail.next(real_extra.data.fields_len, Type, self),
});
},
}
},
.array,
.small_array,
=> try type_block.writeAbbrev(ir.Type.Array{
.len = ty.aggregateLen(self),
.child = ty.childType(self),
}),
.vector,
.scalable_vector,
=> try type_block.writeAbbrev(ir.Type.Vector{
.len = ty.aggregateLen(self),
.child = ty.childType(self),
}),
.pointer => try type_block.writeAbbrev(ir.Type.Pointer{
.addr_space = ty.pointerAddrSpace(self),
}),
.target => {
var extra = self.typeExtraDataTrail(Type.Target, item.data);
try type_block.writeAbbrev(ir.Type.StructName{
.string = extra.data.name.slice(self).?,
});
 
const types = extra.trail.next(extra.data.types_len, Type, self);
const ints = extra.trail.next(extra.data.ints_len, u32, self);
 
try type_block.writeAbbrev(ir.Type.Target{
.num_types = extra.data.types_len,
.types = types,
.ints = ints,
});
},
.function, .vararg_function => |kind| {
const is_vararg = switch (kind) {
.function => false,
.vararg_function => true,
else => unreachable,
};
var extra = self.typeExtraDataTrail(Type.Function, item.data);
try type_block.writeAbbrev(ir.Type.Function{
.is_vararg = is_vararg,
.return_type = extra.data.ret,
.param_types = extra.trail.next(extra.data.params_len, Type, self),
});
},
}
}
 
try type_block.end();
}
 
var attributes_set: std.AutoArrayHashMapUnmanaged(struct {
attributes: Attributes,
index: u32,
}, void) = .{};
defer attributes_set.deinit(self.gpa);
 
// PARAMATTR_GROUP_BLOCK
{
const ParamattrGroup = ir.ParamattrGroup;
 
var paramattr_group_block = try module_block.enterSubBlock(ParamattrGroup);
 
for (self.function_attributes_set.keys()) |func_attributes| {
for (func_attributes.slice(self), 0..) |attributes, i| {
const attributes_slice = attributes.slice(self);
if (attributes_slice.len == 0) continue;
 
const attr_gop = try attributes_set.getOrPut(self.gpa, .{
.attributes = attributes,
.index = @intCast(i),
});
 
if (attr_gop.found_existing) continue;
 
record.clearRetainingCapacity();
try record.ensureUnusedCapacity(self.gpa, 2);
 
record.appendAssumeCapacity(attr_gop.index);
record.appendAssumeCapacity(switch (i) {
0 => 0xffffffff,
else => i - 1,
});
 
for (attributes_slice) |attr_index| {
const kind = attr_index.getKind(self);
switch (attr_index.toAttribute(self)) {
.zeroext,
.signext,
.inreg,
.@"noalias",
.nocapture,
.nofree,
.nest,
.returned,
.nonnull,
.swiftself,
.swiftasync,
.swifterror,
.immarg,
.noundef,
.allocalign,
.allocptr,
.readnone,
.readonly,
.writeonly,
.alwaysinline,
.builtin,
.cold,
.convergent,
.disable_sanitizer_information,
.fn_ret_thunk_extern,
.hot,
.inlinehint,
.jumptable,
.minsize,
.naked,
.nobuiltin,
.nocallback,
.noduplicate,
.noimplicitfloat,
.@"noinline",
.nomerge,
.nonlazybind,
.noprofile,
.skipprofile,
.noredzone,
.noreturn,
.norecurse,
.willreturn,
.nosync,
.nounwind,
.nosanitize_bounds,
.nosanitize_coverage,
.null_pointer_is_valid,
.optforfuzzing,
.optnone,
.optsize,
.returns_twice,
.safestack,
.sanitize_address,
.sanitize_memory,
.sanitize_thread,
.sanitize_hwaddress,
.sanitize_memtag,
.speculative_load_hardening,
.speculatable,
.ssp,
.sspstrong,
.sspreq,
.strictfp,
.nocf_check,
.shadowcallstack,
.mustprogress,
.no_sanitize_address,
.no_sanitize_hwaddress,
.sanitize_address_dyninit,
=> {
try record.ensureUnusedCapacity(self.gpa, 2);
record.appendAssumeCapacity(0);
record.appendAssumeCapacity(@intFromEnum(kind));
},
.byval,
.byref,
.preallocated,
.inalloca,
.sret,
.elementtype,
=> |ty| {
try record.ensureUnusedCapacity(self.gpa, 3);
record.appendAssumeCapacity(6);
record.appendAssumeCapacity(@intFromEnum(kind));
record.appendAssumeCapacity(@intFromEnum(ty));
},
.@"align",
.alignstack,
=> |alignment| {
try record.ensureUnusedCapacity(self.gpa, 3);
record.appendAssumeCapacity(1);
record.appendAssumeCapacity(@intFromEnum(kind));
record.appendAssumeCapacity(alignment.toByteUnits() orelse 0);
},
.dereferenceable,
.dereferenceable_or_null,
=> |size| {
try record.ensureUnusedCapacity(self.gpa, 3);
record.appendAssumeCapacity(1);
record.appendAssumeCapacity(@intFromEnum(kind));
record.appendAssumeCapacity(size);
},
.nofpclass => |fpclass| {
try record.ensureUnusedCapacity(self.gpa, 3);
record.appendAssumeCapacity(1);
record.appendAssumeCapacity(@intFromEnum(kind));
record.appendAssumeCapacity(@as(u32, @bitCast(fpclass)));
},
.allockind => |allockind| {
try record.ensureUnusedCapacity(self.gpa, 3);
record.appendAssumeCapacity(1);
record.appendAssumeCapacity(@intFromEnum(kind));
record.appendAssumeCapacity(@as(u32, @bitCast(allockind)));
},
 
.allocsize => |allocsize| {
try record.ensureUnusedCapacity(self.gpa, 3);
record.appendAssumeCapacity(1);
record.appendAssumeCapacity(@intFromEnum(kind));
record.appendAssumeCapacity(@bitCast(allocsize.toLlvm()));
},
.memory => |memory| {
try record.ensureUnusedCapacity(self.gpa, 3);
record.appendAssumeCapacity(1);
record.appendAssumeCapacity(@intFromEnum(kind));
record.appendAssumeCapacity(@as(u32, @bitCast(memory)));
},
.uwtable => |uwtable| if (uwtable != .none) {
try record.ensureUnusedCapacity(self.gpa, 3);
record.appendAssumeCapacity(1);
record.appendAssumeCapacity(@intFromEnum(kind));
record.appendAssumeCapacity(@intFromEnum(uwtable));
},
.vscale_range => |vscale_range| {
try record.ensureUnusedCapacity(self.gpa, 3);
record.appendAssumeCapacity(1);
record.appendAssumeCapacity(@intFromEnum(kind));
record.appendAssumeCapacity(@bitCast(vscale_range.toLlvm()));
},
.string => |string_attr| {
const string_attr_kind_slice = string_attr.kind.slice(self).?;
const string_attr_value_slice = if (string_attr.value != .none)
string_attr.value.slice(self).?
else
null;
 
try record.ensureUnusedCapacity(
self.gpa,
2 + string_attr_kind_slice.len + if (string_attr_value_slice) |slice| slice.len + 1 else 0,
);
record.appendAssumeCapacity(if (string_attr.value == .none) 3 else 4);
for (string_attr.kind.slice(self).?) |c| {
record.appendAssumeCapacity(c);
}
record.appendAssumeCapacity(0);
if (string_attr_value_slice) |slice| {
for (slice) |c| {
record.appendAssumeCapacity(c);
}
record.appendAssumeCapacity(0);
}
},
.none => unreachable,
}
}
 
try paramattr_group_block.writeUnabbrev(3, record.items);
}
}
 
try paramattr_group_block.end();
}
 
// PARAMATTR_BLOCK
{
const Paramattr = ir.Paramattr;
var paramattr_block = try module_block.enterSubBlock(Paramattr);
 
for (self.function_attributes_set.keys()) |func_attributes| {
const func_attributes_slice = func_attributes.slice(self);
record.clearRetainingCapacity();
try record.ensureUnusedCapacity(self.gpa, func_attributes_slice.len);
for (func_attributes_slice, 0..) |attributes, i| {
const attributes_slice = attributes.slice(self);
if (attributes_slice.len == 0) continue;
 
const group_index = attributes_set.getIndex(.{
.attributes = attributes,
.index = @intCast(i),
}).?;
record.appendAssumeCapacity(@intCast(group_index));
}
 
try paramattr_block.writeAbbrev(Paramattr.Entry{ .group_indices = record.items });
}
 
try paramattr_block.end();
}
 
var globals: std.AutoArrayHashMapUnmanaged(Global.Index, void) = .{};
defer globals.deinit(self.gpa);
try globals.ensureUnusedCapacity(
self.gpa,
self.variables.items.len +
self.functions.items.len +
self.aliases.items.len,
);
 
for (self.variables.items) |variable| {
if (variable.global.getReplacement(self) != .none) continue;
 
globals.putAssumeCapacity(variable.global, {});
}
 
for (self.functions.items) |function| {
if (function.global.getReplacement(self) != .none) continue;
 
globals.putAssumeCapacity(function.global, {});
}
 
for (self.aliases.items) |alias| {
if (alias.global.getReplacement(self) != .none) continue;
 
globals.putAssumeCapacity(alias.global, {});
}
 
const ConstantAdapter = struct {
const ConstantAdapter = @This();
builder: *const Builder,
globals: *const std.AutoArrayHashMapUnmanaged(Global.Index, void),
 
pub fn get(adapter: @This(), param: anytype, comptime field_name: []const u8) @TypeOf(param) {
_ = field_name;
return switch (@TypeOf(param)) {
Constant => @enumFromInt(adapter.getConstantIndex(param)),
else => param,
};
}
 
pub fn getConstantIndex(adapter: ConstantAdapter, constant: Constant) u32 {
return switch (constant.unwrap()) {
.constant => |c| c + adapter.numGlobals(),
.global => |global| @intCast(adapter.globals.getIndex(global.unwrap(adapter.builder)).?),
};
}
 
pub fn numConstants(adapter: ConstantAdapter) u32 {
return @intCast(adapter.globals.count() + adapter.builder.constant_items.len);
}
 
pub fn numGlobals(adapter: ConstantAdapter) u32 {
return @intCast(adapter.globals.count());
}
};
 
const constant_adapter = ConstantAdapter{
.builder = self,
.globals = &globals,
};
 
// Globals
{
var section_map: std.AutoArrayHashMapUnmanaged(String, void) = .{};
defer section_map.deinit(self.gpa);
try section_map.ensureUnusedCapacity(self.gpa, globals.count());
 
for (self.variables.items) |variable| {
if (variable.global.getReplacement(self) != .none) continue;
 
const section = blk: {
if (variable.section == .none) break :blk 0;
const gop = section_map.getOrPutAssumeCapacity(variable.section);
if (!gop.found_existing) {
try module_block.writeAbbrev(Module.String{
.code = 5,
.string = variable.section.slice(self).?,
});
}
break :blk gop.index + 1;
};
 
const initid = if (variable.init == .no_init)
0
else
(constant_adapter.getConstantIndex(variable.init) + 1);
 
const strtab = variable.global.strtab(self);
 
const global = variable.global.ptrConst(self);
try module_block.writeAbbrev(Module.Variable{
.strtab_offset = strtab.offset,
.strtab_size = strtab.size,
.type_index = global.type,
.is_const = .{
.is_const = switch (variable.mutability) {
.global => false,
.constant => true,
},
.addr_space = global.addr_space,
},
.initid = initid,
.linkage = global.linkage,
.alignment = variable.alignment.toLlvm(),
.section = section,
.visibility = global.visibility,
.thread_local = variable.thread_local,
.unnamed_addr = global.unnamed_addr,
.externally_initialized = global.externally_initialized,
.dllstorageclass = global.dll_storage_class,
.preemption = global.preemption,
});
}
 
for (self.functions.items) |func| {
if (func.global.getReplacement(self) != .none) continue;
 
const section = blk: {
if (func.section == .none) break :blk 0;
const gop = section_map.getOrPutAssumeCapacity(func.section);
if (!gop.found_existing) {
try module_block.writeAbbrev(Module.String{
.code = 5,
.string = func.section.slice(self).?,
});
}
break :blk gop.index + 1;
};
 
const paramattr_index = if (self.function_attributes_set.getIndex(func.attributes)) |index|
index + 1
else
0;
 
const strtab = func.global.strtab(self);
 
const global = func.global.ptrConst(self);
try module_block.writeAbbrev(Module.Function{
.strtab_offset = strtab.offset,
.strtab_size = strtab.size,
.type_index = global.type,
.call_conv = func.call_conv,
.is_proto = func.instructions.len == 0,
.linkage = global.linkage,
.paramattr = paramattr_index,
.alignment = func.alignment.toLlvm(),
.section = section,
.visibility = global.visibility,
.unnamed_addr = global.unnamed_addr,
.dllstorageclass = global.dll_storage_class,
.preemption = global.preemption,
.addr_space = global.addr_space,
});
}
 
for (self.aliases.items) |alias| {
if (alias.global.getReplacement(self) != .none) continue;
 
const strtab = alias.global.strtab(self);
 
const global = alias.global.ptrConst(self);
try module_block.writeAbbrev(Module.Alias{
.strtab_offset = strtab.offset,
.strtab_size = strtab.size,
.type_index = global.type,
.addr_space = global.addr_space,
.aliasee = constant_adapter.getConstantIndex(alias.aliasee),
.linkage = global.linkage,
.visibility = global.visibility,
.thread_local = alias.thread_local,
.unnamed_addr = global.unnamed_addr,
.dllstorageclass = global.dll_storage_class,
.preemption = global.preemption,
});
}
}
 
// CONSTANTS_BLOCK
{
const Constants = ir.Constants;
var constants_block = try module_block.enterSubBlock(Constants);
 
var current_type: Type = .none;
const tags = self.constant_items.items(.tag);
const datas = self.constant_items.items(.data);
for (0..self.constant_items.len) |index| {
record.clearRetainingCapacity();
const constant: Constant = @enumFromInt(index);
const constant_type = constant.typeOf(self);
if (constant_type != current_type) {
try constants_block.writeAbbrev(Constants.SetType{ .type_id = constant_type });
current_type = constant_type;
}
const data = datas[index];
switch (tags[index]) {
.null,
.zeroinitializer,
.none,
=> try constants_block.writeAbbrev(Constants.Null{}),
.undef => try constants_block.writeAbbrev(Constants.Undef{}),
.poison => try constants_block.writeAbbrev(Constants.Poison{}),
.positive_integer,
.negative_integer,
=> |tag| {
const extra: *align(@alignOf(std.math.big.Limb)) Constant.Integer =
@ptrCast(self.constant_limbs.items[data..][0..Constant.Integer.limbs]);
const limbs = self.constant_limbs
.items[data + Constant.Integer.limbs ..][0..extra.limbs_len];
const bigint: std.math.big.int.Const = .{
.limbs = limbs,
.positive = tag == .positive_integer,
};
 
const bit_count = extra.type.scalarBits(self);
if (bit_count <= 64) {
const val = bigint.to(i64) catch unreachable;
const emit_val = if (tag == .positive_integer)
@shlWithOverflow(val, 1)[0]
else
(@shlWithOverflow(@addWithOverflow(~val, 1)[0], 1)[0] | 1);
try constants_block.writeAbbrev(Constants.Integer{ .value = @bitCast(emit_val) });
} else {
const word_count = std.mem.alignForward(u24, bit_count, 64) / 64;
try record.ensureUnusedCapacity(self.gpa, word_count);
const buffer: [*]u8 = @ptrCast(record.items.ptr);
bigint.writeTwosComplement(buffer[0..(word_count * 8)], .little);
 
const signed_buffer: [*]i64 = @ptrCast(record.items.ptr);
for (signed_buffer[0..word_count], 0..) |val, i| {
signed_buffer[i] = if (val >= 0)
@shlWithOverflow(val, 1)[0]
else
(@shlWithOverflow(@addWithOverflow(~val, 1)[0], 1)[0] | 1);
}
 
try constants_block.writeUnabbrev(5, record.items.ptr[0..word_count]);
}
},
.half,
.bfloat,
=> try constants_block.writeAbbrev(Constants.Half{ .value = @truncate(data) }),
.float => try constants_block.writeAbbrev(Constants.Float{ .value = data }),
.double => {
const extra = self.constantExtraData(Constant.Double, data);
try constants_block.writeAbbrev(Constants.Double{
.value = (@as(u64, extra.hi) << 32) | extra.lo,
});
},
.x86_fp80 => {
const extra = self.constantExtraData(Constant.Fp80, data);
try constants_block.writeAbbrev(Constants.Fp80{
.hi = @as(u64, extra.hi) << 48 | @as(u64, extra.lo_hi) << 16 |
extra.lo_lo >> 16,
.lo = @truncate(extra.lo_lo),
});
},
.fp128,
.ppc_fp128,
=> {
const extra = self.constantExtraData(Constant.Fp128, data);
try constants_block.writeAbbrev(Constants.Fp128{
.lo = @as(u64, extra.lo_hi) << 32 | @as(u64, extra.lo_lo),
.hi = @as(u64, extra.hi_hi) << 32 | @as(u64, extra.hi_lo),
});
},
.array,
.vector,
.structure,
.packed_structure,
=> {
var extra = self.constantExtraDataTrail(Constant.Aggregate, data);
const len: u32 = @intCast(extra.data.type.aggregateLen(self));
const values = extra.trail.next(len, Constant, self);
 
try constants_block.writeAbbrevAdapted(
Constants.Aggregate{ .values = values },
constant_adapter,
);
},
.splat => {
const ConstantsWriter = @TypeOf(constants_block);
const extra = self.constantExtraData(Constant.Splat, data);
const vector_len = extra.type.vectorLen(self);
const c = constant_adapter.getConstantIndex(extra.value);
 
try bitcode.writeBits(
ConstantsWriter.abbrevId(Constants.Aggregate),
ConstantsWriter.abbrev_len,
);
try bitcode.writeVBR(vector_len, 6);
for (0..vector_len) |_| {
try bitcode.writeBits(c, Constants.Aggregate.ops[1].array_fixed);
}
},
.string => {
const str: String = @enumFromInt(data);
if (str == .none) {
try constants_block.writeAbbrev(Constants.Null{});
} else {
const slice = str.slice(self).?;
if (slice.len > 0 and slice[slice.len - 1] == 0)
try constants_block.writeAbbrev(Constants.CString{ .string = slice[0 .. slice.len - 1] })
else
try constants_block.writeAbbrev(Constants.String{ .string = slice });
}
},
.bitcast,
.inttoptr,
.ptrtoint,
.fptosi,
.fptoui,
.sitofp,
.uitofp,
.addrspacecast,
.fptrunc,
.trunc,
.fpext,
.sext,
.zext,
=> |tag| {
const extra = self.constantExtraData(Constant.Cast, data);
try constants_block.writeAbbrevAdapted(Constants.Cast{
.type_index = extra.type,
.val = extra.val,
.opcode = tag.toCastOpcode(),
}, constant_adapter);
},
.add,
.@"add nsw",
.@"add nuw",
.sub,
.@"sub nsw",
.@"sub nuw",
.mul,
.@"mul nsw",
.@"mul nuw",
.shl,
.lshr,
.ashr,
.@"and",
.@"or",
.xor,
=> |tag| {
const extra = self.constantExtraData(Constant.Binary, data);
try constants_block.writeAbbrevAdapted(Constants.Binary{
.opcode = tag.toBinaryOpcode(),
.lhs = extra.lhs,
.rhs = extra.rhs,
}, constant_adapter);
},
.icmp,
.fcmp,
=> {
const extra = self.constantExtraData(Constant.Compare, data);
try constants_block.writeAbbrevAdapted(Constants.Cmp{
.ty = extra.lhs.typeOf(self),
.lhs = extra.lhs,
.rhs = extra.rhs,
.pred = extra.cond,
}, constant_adapter);
},
.extractelement => {
const extra = self.constantExtraData(Constant.ExtractElement, data);
try constants_block.writeAbbrevAdapted(Constants.ExtractElement{
.val_type = extra.val.typeOf(self),
.val = extra.val,
.index_type = extra.index.typeOf(self),
.index = extra.index,
}, constant_adapter);
},
.insertelement => {
const extra = self.constantExtraData(Constant.InsertElement, data);
try constants_block.writeAbbrevAdapted(Constants.InsertElement{
.val = extra.val,
.elem = extra.elem,
.index_type = extra.index.typeOf(self),
.index = extra.index,
}, constant_adapter);
},
.shufflevector => {
const extra = self.constantExtraData(Constant.ShuffleVector, data);
const ty = constant.typeOf(self);
const lhs_type = extra.lhs.typeOf(self);
// Check if instruction is widening, truncating or not
if (ty == lhs_type) {
try constants_block.writeAbbrevAdapted(Constants.ShuffleVector{
.lhs = extra.lhs,
.rhs = extra.rhs,
.mask = extra.mask,
}, constant_adapter);
} else {
try constants_block.writeAbbrevAdapted(Constants.ShuffleVectorEx{
.ty = ty,
.lhs = extra.lhs,
.rhs = extra.rhs,
.mask = extra.mask,
}, constant_adapter);
}
},
.getelementptr,
.@"getelementptr inbounds",
=> |tag| {
var extra = self.constantExtraDataTrail(Constant.GetElementPtr, data);
const indices = extra.trail.next(extra.data.info.indices_len, Constant, self);
try record.ensureUnusedCapacity(self.gpa, 1 + 2 + 2 * indices.len);
 
record.appendAssumeCapacity(@intFromEnum(extra.data.type));
 
record.appendAssumeCapacity(@intFromEnum(extra.data.base.typeOf(self)));
record.appendAssumeCapacity(constant_adapter.getConstantIndex(extra.data.base));
 
for (indices) |i| {
record.appendAssumeCapacity(@intFromEnum(i.typeOf(self)));
record.appendAssumeCapacity(constant_adapter.getConstantIndex(i));
}
 
try constants_block.writeUnabbrev(switch (tag) {
.getelementptr => 12,
.@"getelementptr inbounds" => 20,
else => unreachable,
}, record.items);
},
.@"asm",
.@"asm sideeffect",
.@"asm alignstack",
.@"asm sideeffect alignstack",
.@"asm inteldialect",
.@"asm sideeffect inteldialect",
.@"asm alignstack inteldialect",
.@"asm sideeffect alignstack inteldialect",
.@"asm unwind",
.@"asm sideeffect unwind",
.@"asm alignstack unwind",
.@"asm sideeffect alignstack unwind",
.@"asm inteldialect unwind",
.@"asm sideeffect inteldialect unwind",
.@"asm alignstack inteldialect unwind",
.@"asm sideeffect alignstack inteldialect unwind",
=> |tag| {
const extra = self.constantExtraData(Constant.Assembly, data);
 
const assembly_slice = extra.assembly.slice(self).?;
const constraints_slice = extra.constraints.slice(self).?;
 
try record.ensureUnusedCapacity(self.gpa, 4 + assembly_slice.len + constraints_slice.len);
 
record.appendAssumeCapacity(@intFromEnum(extra.type));
record.appendAssumeCapacity(switch (tag) {
.@"asm" => 0,
.@"asm sideeffect" => 0b0001,
.@"asm sideeffect alignstack" => 0b0011,
.@"asm sideeffect inteldialect" => 0b0101,
.@"asm sideeffect alignstack inteldialect" => 0b0111,
.@"asm sideeffect unwind" => 0b1001,
.@"asm sideeffect alignstack unwind" => 0b1011,
.@"asm sideeffect inteldialect unwind" => 0b1101,
.@"asm sideeffect alignstack inteldialect unwind" => 0b1111,
.@"asm alignstack" => 0b0010,
.@"asm inteldialect" => 0b0100,
.@"asm alignstack inteldialect" => 0b0110,
.@"asm unwind" => 0b1000,
.@"asm alignstack unwind" => 0b1010,
.@"asm inteldialect unwind" => 0b1100,
.@"asm alignstack inteldialect unwind" => 0b1110,
else => unreachable,
});
 
record.appendAssumeCapacity(assembly_slice.len);
for (assembly_slice) |c| record.appendAssumeCapacity(c);
 
record.appendAssumeCapacity(constraints_slice.len);
for (constraints_slice) |c| record.appendAssumeCapacity(c);
 
try constants_block.writeUnabbrev(30, record.items);
},
.blockaddress => {
const extra = self.constantExtraData(Constant.BlockAddress, data);
try constants_block.writeAbbrev(Constants.BlockAddress{
.type_id = extra.function.typeOf(self),
.function = constant_adapter.getConstantIndex(extra.function.toConst(self)),
.block = @intFromEnum(extra.block),
});
},
.dso_local_equivalent,
.no_cfi,
=> |tag| {
const function: Function.Index = @enumFromInt(data);
try constants_block.writeAbbrev(Constants.DsoLocalEquivalentOrNoCfi{
.code = switch (tag) {
.dso_local_equivalent => 27,
.no_cfi => 29,
else => unreachable,
},
.type_id = function.typeOf(self),
.function = constant_adapter.getConstantIndex(function.toConst(self)),
});
},
}
}
 
try constants_block.end();
}
 
// METADATA_KIND_BLOCK
if (!self.strip) {
const MetadataKindBlock = ir.MetadataKindBlock;
var metadata_kind_block = try module_block.enterSubBlock(MetadataKindBlock);
 
inline for (@typeInfo(ir.MetadataKind).Enum.fields) |field| {
try metadata_kind_block.writeAbbrev(MetadataKindBlock.Kind{
.id = field.value,
.name = field.name,
});
}
 
try metadata_kind_block.end();
}
 
const MetadataAdapter = struct {
builder: *const Builder,
constant_adapter: ConstantAdapter,
 
pub fn init(
builder: *const Builder,
const_adapter: ConstantAdapter,
) @This() {
return .{
.builder = builder,
.constant_adapter = const_adapter,
};
}
 
pub fn get(adapter: @This(), value: anytype, comptime field_name: []const u8) @TypeOf(value) {
_ = field_name;
const Ty = @TypeOf(value);
return switch (Ty) {
Metadata => @enumFromInt(adapter.getMetadataIndex(value)),
MetadataString => @enumFromInt(adapter.getMetadataStringIndex(value)),
Constant => @enumFromInt(adapter.constant_adapter.getConstantIndex(value)),
else => value,
};
}
 
pub fn getMetadataIndex(adapter: @This(), metadata: Metadata) u32 {
if (metadata == .none) return 0;
return @intCast(adapter.builder.metadata_string_map.count() +
@intFromEnum(metadata.unwrap(adapter.builder)) - 1);
}
 
pub fn getMetadataStringIndex(_: @This(), metadata_string: MetadataString) u32 {
return @intFromEnum(metadata_string);
}
};
 
const metadata_adapter = MetadataAdapter.init(self, constant_adapter);
 
// METADATA_BLOCK
if (!self.strip) {
const MetadataBlock = ir.MetadataBlock;
var metadata_block = try module_block.enterSubBlock(MetadataBlock);
 
const MetadataBlockWriter = @TypeOf(metadata_block);
 
// Emit all MetadataStrings
{
const strings_offset, const strings_size = blk: {
var strings_offset: u32 = 0;
var strings_size: u32 = 0;
for (1..self.metadata_string_map.count()) |metadata_string_index| {
const metadata_string: MetadataString = @enumFromInt(metadata_string_index);
const slice = metadata_string.slice(self);
strings_offset += bitcode.bitsVBR(@as(u32, @intCast(slice.len)), 6);
strings_size += @intCast(slice.len * 8);
}
break :blk .{
std.mem.alignForward(u32, strings_offset, 32) / 8,
std.mem.alignForward(u32, strings_size, 32) / 8,
};
};
 
try bitcode.writeBits(
comptime MetadataBlockWriter.abbrevId(MetadataBlock.Strings),
MetadataBlockWriter.abbrev_len,
);
 
try bitcode.writeVBR(@as(u32, @intCast(self.metadata_string_map.count() - 1)), 6);
try bitcode.writeVBR(strings_offset, 6);
 
try bitcode.writeVBR(strings_size + strings_offset, 6);
 
try bitcode.alignTo32();
 
for (1..self.metadata_string_map.count()) |metadata_string_index| {
const metadata_string: MetadataString = @enumFromInt(metadata_string_index);
const slice = metadata_string.slice(self);
try bitcode.writeVBR(@as(u32, @intCast(slice.len)), 6);
}
 
try bitcode.alignTo32();
 
for (1..self.metadata_string_map.count()) |metadata_string_index| {
const metadata_string: MetadataString = @enumFromInt(metadata_string_index);
const slice = metadata_string.slice(self);
for (slice) |c| {
try bitcode.writeBits(c, 8);
}
}
 
try bitcode.alignTo32();
}
 
for (
self.metadata_items.items(.tag)[1..],
self.metadata_items.items(.data)[1..],
) |tag, data| {
switch (tag) {
.none => unreachable,
.file => {
const extra = self.metadataExtraData(Metadata.File, data);
 
try metadata_block.writeAbbrevAdapted(MetadataBlock.File{
.filename = extra.filename,
.directory = extra.directory,
}, metadata_adapter);
},
.compile_unit,
.@"compile_unit optimized",
=> |kind| {
const extra = self.metadataExtraData(Metadata.CompileUnit, data);
try metadata_block.writeAbbrevAdapted(MetadataBlock.CompileUnit{
.file = extra.file,
.producer = extra.producer,
.is_optimized = switch (kind) {
.compile_unit => false,
.@"compile_unit optimized" => true,
else => unreachable,
},
.enums = extra.enums,
.globals = extra.globals,
}, metadata_adapter);
},
.subprogram,
.@"subprogram local",
.@"subprogram definition",
.@"subprogram local definition",
.@"subprogram optimized",
.@"subprogram optimized local",
.@"subprogram optimized definition",
.@"subprogram optimized local definition",
=> |kind| {
const extra = self.metadataExtraData(Metadata.Subprogram, data);
 
try metadata_block.writeAbbrevAdapted(MetadataBlock.Subprogram{
.scope = extra.file,
.name = extra.name,
.linkage_name = extra.linkage_name,
.file = extra.file,
.line = extra.line,
.ty = extra.ty,
.scope_line = extra.scope_line,
.sp_flags = @bitCast(@as(u32, @as(u3, @intCast(
@intFromEnum(kind) - @intFromEnum(Metadata.Tag.subprogram),
))) << 2),
.flags = extra.di_flags,
.compile_unit = extra.compile_unit,
}, metadata_adapter);
},
.lexical_block => {
const extra = self.metadataExtraData(Metadata.LexicalBlock, data);
try metadata_block.writeAbbrevAdapted(MetadataBlock.LexicalBlock{
.scope = extra.scope,
.file = extra.file,
.line = extra.line,
.column = extra.column,
}, metadata_adapter);
},
.location => {
const extra = self.metadataExtraData(Metadata.Location, data);
assert(extra.scope != .none);
try metadata_block.writeAbbrev(MetadataBlock.Location{
.line = extra.line,
.column = extra.column,
.scope = metadata_adapter.getMetadataIndex(extra.scope) - 1,
.inlined_at = @enumFromInt(metadata_adapter.getMetadataIndex(extra.inlined_at)),
});
},
.basic_bool_type,
.basic_unsigned_type,
.basic_signed_type,
.basic_float_type,
=> |kind| {
const extra = self.metadataExtraData(Metadata.BasicType, data);
try metadata_block.writeAbbrevAdapted(MetadataBlock.BasicType{
.name = extra.name,
.size_in_bits = extra.bitSize(),
.encoding = switch (kind) {
.basic_bool_type => DW.ATE.boolean,
.basic_unsigned_type => DW.ATE.unsigned,
.basic_signed_type => DW.ATE.signed,
.basic_float_type => DW.ATE.float,
else => unreachable,
},
}, metadata_adapter);
},
.composite_struct_type,
.composite_union_type,
.composite_enumeration_type,
.composite_array_type,
.composite_vector_type,
=> |kind| {
const extra = self.metadataExtraData(Metadata.CompositeType, data);
 
try metadata_block.writeAbbrevAdapted(MetadataBlock.CompositeType{
.tag = switch (kind) {
.composite_struct_type => DW.TAG.structure_type,
.composite_union_type => DW.TAG.union_type,
.composite_enumeration_type => DW.TAG.enumeration_type,
.composite_array_type, .composite_vector_type => DW.TAG.array_type,
else => unreachable,
},
.name = extra.name,
.file = extra.file,
.line = extra.line,
.scope = extra.scope,
.underlying_type = extra.underlying_type,
.size_in_bits = extra.bitSize(),
.align_in_bits = extra.bitAlign(),
.flags = if (kind == .composite_vector_type) .{ .Vector = true } else .{},
.elements = extra.fields_tuple,
}, metadata_adapter);
},
.derived_pointer_type,
.derived_member_type,
=> |kind| {
const extra = self.metadataExtraData(Metadata.DerivedType, data);
try metadata_block.writeAbbrevAdapted(MetadataBlock.DerivedType{
.tag = switch (kind) {
.derived_pointer_type => DW.TAG.pointer_type,
.derived_member_type => DW.TAG.member,
else => unreachable,
},
.name = extra.name,
.file = extra.file,
.line = extra.line,
.scope = extra.scope,
.underlying_type = extra.underlying_type,
.size_in_bits = extra.bitSize(),
.align_in_bits = extra.bitAlign(),
.offset_in_bits = extra.bitOffset(),
}, metadata_adapter);
},
.subroutine_type => {
const extra = self.metadataExtraData(Metadata.SubroutineType, data);
 
try metadata_block.writeAbbrevAdapted(MetadataBlock.SubroutineType{
.types = extra.types_tuple,
}, metadata_adapter);
},
.enumerator_unsigned,
.enumerator_signed_positive,
.enumerator_signed_negative,
=> |kind| {
const positive = switch (kind) {
.enumerator_unsigned,
.enumerator_signed_positive,
=> true,
.enumerator_signed_negative => false,
else => unreachable,
};
 
const unsigned = switch (kind) {
.enumerator_unsigned => true,
.enumerator_signed_positive,
.enumerator_signed_negative,
=> false,
else => unreachable,
};
 
const extra = self.metadataExtraData(Metadata.Enumerator, data);
 
const limbs = self.metadata_limbs.items[extra.limbs_index..][0..extra.limbs_len];
 
const bigint: std.math.big.int.Const = .{
.limbs = limbs,
.positive = positive,
};
 
if (extra.bit_width <= 64) {
const val = bigint.to(i64) catch unreachable;
const emit_val = if (positive)
@shlWithOverflow(val, 1)[0]
else
(@shlWithOverflow(@addWithOverflow(~val, 1)[0], 1)[0] | 1);
try metadata_block.writeAbbrevAdapted(MetadataBlock.Enumerator{
.flags = .{
.unsigned = unsigned,
.bigint = false,
},
.bit_width = extra.bit_width,
.name = extra.name,
.value = @bitCast(emit_val),
}, metadata_adapter);
} else {
const word_count = std.mem.alignForward(u32, extra.bit_width, 64) / 64;
try record.ensureUnusedCapacity(self.gpa, 3 + word_count);
 
const flags: MetadataBlock.Enumerator.Flags = .{
.unsigned = unsigned,
.bigint = true,
};
 
const FlagsInt = @typeInfo(MetadataBlock.Enumerator.Flags).Struct.backing_integer.?;
 
const flags_int: FlagsInt = @bitCast(flags);
 
record.appendAssumeCapacity(@intCast(flags_int));
record.appendAssumeCapacity(@intCast(extra.bit_width));
record.appendAssumeCapacity(metadata_adapter.getMetadataStringIndex(extra.name));
 
const buffer: [*]u8 = @ptrCast(record.items.ptr);
bigint.writeTwosComplement(buffer[0..(word_count * 8)], .little);
 
const signed_buffer: [*]i64 = @ptrCast(record.items.ptr);
for (signed_buffer[0..word_count], 0..) |val, i| {
signed_buffer[i] = if (val >= 0)
@shlWithOverflow(val, 1)[0]
else
(@shlWithOverflow(@addWithOverflow(~val, 1)[0], 1)[0] | 1);
}
 
try metadata_block.writeUnabbrev(
MetadataBlock.Enumerator.id,
record.items.ptr[0..(3 + word_count)],
);
}
},
.subrange => {
const extra = self.metadataExtraData(Metadata.Subrange, data);
 
try metadata_block.writeAbbrevAdapted(MetadataBlock.Subrange{
.count = extra.count,
.lower_bound = extra.lower_bound,
}, metadata_adapter);
},
.expression => {
var extra = self.metadataExtraDataTrail(Metadata.Expression, data);
 
const elements = extra.trail.next(extra.data.elements_len, u32, self);
 
try metadata_block.writeAbbrevAdapted(MetadataBlock.Expression{
.elements = elements,
}, metadata_adapter);
},
.tuple => {
var extra = self.metadataExtraDataTrail(Metadata.Tuple, data);
 
const elements = extra.trail.next(extra.data.elements_len, Metadata, self);
 
try metadata_block.writeAbbrevAdapted(MetadataBlock.Node{
.elements = elements,
}, metadata_adapter);
},
.module_flag => {
const extra = self.metadataExtraData(Metadata.ModuleFlag, data);
try metadata_block.writeAbbrev(MetadataBlock.Node{
.elements = &.{
@enumFromInt(metadata_adapter.getMetadataIndex(extra.behavior)),
@enumFromInt(metadata_adapter.getMetadataStringIndex(extra.name)),
@enumFromInt(metadata_adapter.getMetadataIndex(extra.constant)),
},
});
},
.local_var => {
const extra = self.metadataExtraData(Metadata.LocalVar, data);
try metadata_block.writeAbbrevAdapted(MetadataBlock.LocalVar{
.scope = extra.scope,
.name = extra.name,
.file = extra.file,
.line = extra.line,
.ty = extra.ty,
}, metadata_adapter);
},
.parameter => {
const extra = self.metadataExtraData(Metadata.Parameter, data);
try metadata_block.writeAbbrevAdapted(MetadataBlock.Parameter{
.scope = extra.scope,
.name = extra.name,
.file = extra.file,
.line = extra.line,
.ty = extra.ty,
.arg = extra.arg_no,
}, metadata_adapter);
},
.global_var,
.@"global_var local",
=> |kind| {
const extra = self.metadataExtraData(Metadata.GlobalVar, data);
try metadata_block.writeAbbrevAdapted(MetadataBlock.GlobalVar{
.scope = extra.scope,
.name = extra.name,
.linkage_name = extra.linkage_name,
.file = extra.file,
.line = extra.line,
.ty = extra.ty,
.local = kind == .@"global_var local",
}, metadata_adapter);
},
.global_var_expression => {
const extra = self.metadataExtraData(Metadata.GlobalVarExpression, data);
try metadata_block.writeAbbrevAdapted(MetadataBlock.GlobalVarExpression{
.variable = extra.variable,
.expression = extra.expression,
}, metadata_adapter);
},
.constant => {
const constant: Constant = @enumFromInt(data);
try metadata_block.writeAbbrevAdapted(MetadataBlock.Constant{
.ty = constant.typeOf(self),
.constant = constant,
}, metadata_adapter);
},
}
record.clearRetainingCapacity();
}
 
// Write named metadata
for (self.metadata_named.keys(), self.metadata_named.values()) |name, operands| {
const slice = name.slice(self);
try metadata_block.writeAbbrev(MetadataBlock.Name{
.name = slice,
});
 
const elements = self.metadata_extra.items[operands.index..][0..operands.len];
for (elements) |*e| {
e.* = metadata_adapter.getMetadataIndex(@enumFromInt(e.*)) - 1;
}
 
try metadata_block.writeAbbrev(MetadataBlock.NamedNode{
.elements = @ptrCast(elements),
});
}
 
// Write global attached metadata
{
for (globals.keys()) |global| {
const global_ptr = global.ptrConst(self);
if (global_ptr.dbg == .none) continue;
 
switch (global_ptr.kind) {
.function => |f| if (f.ptrConst(self).instructions.len != 0) continue,
else => {},
}
 
try metadata_block.writeAbbrev(MetadataBlock.GlobalDeclAttachment{
.value = @enumFromInt(constant_adapter.getConstantIndex(global.toConst())),
.kind = ir.MetadataKind.dbg,
.metadata = @enumFromInt(metadata_adapter.getMetadataIndex(global_ptr.dbg) - 1),
});
}
}
 
try metadata_block.end();
}
 
// FUNCTION_BLOCKS
{
const FunctionAdapter = struct {
constant_adapter: ConstantAdapter,
metadata_adapter: MetadataAdapter,
func: *const Function,
instruction_index: u32 = 0,
 
pub fn init(
const_adapter: ConstantAdapter,
meta_adapter: MetadataAdapter,
func: *const Function,
) @This() {
return .{
.constant_adapter = const_adapter,
.metadata_adapter = meta_adapter,
.func = func,
.instruction_index = 0,
};
}
 
pub fn get(adapter: @This(), value: anytype, comptime field_name: []const u8) @TypeOf(value) {
_ = field_name;
const Ty = @TypeOf(value);
return switch (Ty) {
Value => @enumFromInt(adapter.getOffsetValueIndex(value)),
Constant => @enumFromInt(adapter.getOffsetConstantIndex(value)),
FunctionAttributes => @enumFromInt(switch (value) {
.none => 0,
else => 1 + adapter.constant_adapter.builder.function_attributes_set.getIndex(value).?,
}),
else => value,
};
}
 
pub fn getValueIndex(adapter: @This(), value: Value) u32 {
return @intCast(switch (value.unwrap()) {
.instruction => |instruction| instruction.valueIndex(adapter.func) + adapter.firstInstr(),
.constant => |constant| adapter.constant_adapter.getConstantIndex(constant),
.metadata => |metadata| if (!adapter.metadata_adapter.builder.strip) blk: {
const real_metadata = metadata.unwrap(adapter.metadata_adapter.builder);
if (@intFromEnum(real_metadata) < Metadata.first_local_metadata)
break :blk adapter.metadata_adapter.getMetadataIndex(real_metadata) - 1;
 
return @intCast(@intFromEnum(metadata) -
Metadata.first_local_metadata +
adapter.metadata_adapter.builder.metadata_string_map.count() - 1 +
adapter.metadata_adapter.builder.metadata_map.count() - 1);
} else unreachable,
});
}
 
pub fn getOffsetValueIndex(adapter: @This(), value: Value) u32 {
return @subWithOverflow(adapter.offset(), adapter.getValueIndex(value))[0];
}
 
pub fn getOffsetValueSignedIndex(adapter: @This(), value: Value) i32 {
const signed_offset: i32 = @intCast(adapter.offset());
const signed_value: i32 = @intCast(adapter.getValueIndex(value));
return signed_offset - signed_value;
}
 
pub fn getOffsetConstantIndex(adapter: @This(), constant: Constant) u32 {
return adapter.offset() - adapter.constant_adapter.getConstantIndex(constant);
}
 
pub fn offset(adapter: @This()) u32 {
return @as(
Function.Instruction.Index,
@enumFromInt(adapter.instruction_index),
).valueIndex(adapter.func) + adapter.firstInstr();
}
 
fn firstInstr(adapter: @This()) u32 {
return adapter.constant_adapter.numConstants();
}
 
pub fn next(adapter: *@This()) void {
adapter.instruction_index += 1;
}
};
 
for (self.functions.items, 0..) |func, func_index| {
const FunctionBlock = ir.FunctionBlock;
if (func.global.getReplacement(self) != .none) continue;
 
if (func.instructions.len == 0) continue;
 
var function_block = try module_block.enterSubBlock(FunctionBlock);
 
try function_block.writeAbbrev(FunctionBlock.DeclareBlocks{ .num_blocks = func.blocks.len });
 
var adapter = FunctionAdapter.init(constant_adapter, metadata_adapter, &func);
 
// Emit function level metadata block
if (!self.strip and func.debug_values.len != 0) {
const MetadataBlock = ir.FunctionMetadataBlock;
var metadata_block = try function_block.enterSubBlock(MetadataBlock);
 
for (func.debug_values) |value| {
try metadata_block.writeAbbrev(MetadataBlock.Value{
.ty = value.typeOf(@enumFromInt(func_index), self),
.value = @enumFromInt(adapter.getValueIndex(value.toValue())),
});
}
 
try metadata_block.end();
}
 
const tags = func.instructions.items(.tag);
const datas = func.instructions.items(.data);
 
var has_location = false;
 
var block_incoming_len: u32 = undefined;
for (0..func.instructions.len) |instr_index| {
const tag = tags[instr_index];
 
record.clearRetainingCapacity();
 
switch (tag) {
.block => block_incoming_len = datas[instr_index],
.arg => {},
.@"unreachable" => try function_block.writeAbbrev(FunctionBlock.Unreachable{}),
.call,
.@"musttail call",
.@"notail call",
.@"tail call",
=> |kind| {
var extra = func.extraDataTrail(Function.Instruction.Call, datas[instr_index]);
 
const call_conv = extra.data.info.call_conv;
const args = extra.trail.next(extra.data.args_len, Value, &func);
try function_block.writeAbbrevAdapted(FunctionBlock.Call{
.attributes = extra.data.attributes,
.call_type = switch (kind) {
.call => .{ .call_conv = call_conv },
.@"tail call" => .{ .tail = true, .call_conv = call_conv },
.@"musttail call" => .{ .must_tail = true, .call_conv = call_conv },
.@"notail call" => .{ .no_tail = true, .call_conv = call_conv },
else => unreachable,
},
.type_id = extra.data.ty,
.callee = extra.data.callee,
.args = args,
}, adapter);
},
.@"call fast",
.@"musttail call fast",
.@"notail call fast",
.@"tail call fast",
=> |kind| {
var extra = func.extraDataTrail(Function.Instruction.Call, datas[instr_index]);
 
const call_conv = extra.data.info.call_conv;
const args = extra.trail.next(extra.data.args_len, Value, &func);
try function_block.writeAbbrevAdapted(FunctionBlock.CallFast{
.attributes = extra.data.attributes,
.call_type = switch (kind) {
.call => .{ .call_conv = call_conv },
.@"tail call" => .{ .tail = true, .call_conv = call_conv },
.@"musttail call" => .{ .must_tail = true, .call_conv = call_conv },
.@"notail call" => .{ .no_tail = true, .call_conv = call_conv },
else => unreachable,
},
.fast_math = .{},
.type_id = extra.data.ty,
.callee = extra.data.callee,
.args = args,
}, adapter);
},
.add,
.@"add nsw",
.@"add nuw",
.@"add nuw nsw",
.@"and",
.fadd,
.fdiv,
.fmul,
.mul,
.@"mul nsw",
.@"mul nuw",
.@"mul nuw nsw",
.frem,
.fsub,
.sdiv,
.@"sdiv exact",
.sub,
.@"sub nsw",
.@"sub nuw",
.@"sub nuw nsw",
.udiv,
.@"udiv exact",
.xor,
.shl,
.@"shl nsw",
.@"shl nuw",
.@"shl nuw nsw",
.lshr,
.@"lshr exact",
.@"or",
.urem,
.srem,
.ashr,
.@"ashr exact",
=> |kind| {
const extra = func.extraData(Function.Instruction.Binary, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.Binary{
.opcode = kind.toBinaryOpcode(),
.lhs = adapter.getOffsetValueIndex(extra.lhs),
.rhs = adapter.getOffsetValueIndex(extra.rhs),
});
},
.@"fadd fast",
.@"fdiv fast",
.@"fmul fast",
.@"frem fast",
.@"fsub fast",
=> |kind| {
const extra = func.extraData(Function.Instruction.Binary, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.BinaryFast{
.opcode = kind.toBinaryOpcode(),
.lhs = adapter.getOffsetValueIndex(extra.lhs),
.rhs = adapter.getOffsetValueIndex(extra.rhs),
.fast_math = .{},
});
},
.alloca,
.@"alloca inalloca",
=> |kind| {
const extra = func.extraData(Function.Instruction.Alloca, datas[instr_index]);
const alignment = extra.info.alignment.toLlvm();
try function_block.writeAbbrev(FunctionBlock.Alloca{
.inst_type = extra.type,
.len_type = extra.len.typeOf(@enumFromInt(func_index), self),
.len_value = adapter.getValueIndex(extra.len),
.flags = .{
.align_lower = @truncate(alignment),
.inalloca = kind == .@"alloca inalloca",
.explicit_type = true,
.swift_error = false,
.align_upper = @truncate(alignment << 5),
},
});
},
.bitcast,
.inttoptr,
.ptrtoint,
.fptosi,
.fptoui,
.sitofp,
.uitofp,
.addrspacecast,
.fptrunc,
.trunc,
.fpext,
.sext,
.zext,
=> |kind| {
const extra = func.extraData(Function.Instruction.Cast, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.Cast{
.val = adapter.getOffsetValueIndex(extra.val),
.type_index = extra.type,
.opcode = kind.toCastOpcode(),
});
},
.@"fcmp false",
.@"fcmp oeq",
.@"fcmp oge",
.@"fcmp ogt",
.@"fcmp ole",
.@"fcmp olt",
.@"fcmp one",
.@"fcmp ord",
.@"fcmp true",
.@"fcmp ueq",
.@"fcmp uge",
.@"fcmp ugt",
.@"fcmp ule",
.@"fcmp ult",
.@"fcmp une",
.@"fcmp uno",
.@"icmp eq",
.@"icmp ne",
.@"icmp sge",
.@"icmp sgt",
.@"icmp sle",
.@"icmp slt",
.@"icmp uge",
.@"icmp ugt",
.@"icmp ule",
.@"icmp ult",
=> |kind| {
const extra = func.extraData(Function.Instruction.Binary, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.Cmp{
.lhs = adapter.getOffsetValueIndex(extra.lhs),
.rhs = adapter.getOffsetValueIndex(extra.rhs),
.pred = kind.toCmpPredicate(),
});
},
.@"fcmp fast false",
.@"fcmp fast oeq",
.@"fcmp fast oge",
.@"fcmp fast ogt",
.@"fcmp fast ole",
.@"fcmp fast olt",
.@"fcmp fast one",
.@"fcmp fast ord",
.@"fcmp fast true",
.@"fcmp fast ueq",
.@"fcmp fast uge",
.@"fcmp fast ugt",
.@"fcmp fast ule",
.@"fcmp fast ult",
.@"fcmp fast une",
.@"fcmp fast uno",
=> |kind| {
const extra = func.extraData(Function.Instruction.Binary, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.CmpFast{
.lhs = adapter.getOffsetValueIndex(extra.lhs),
.rhs = adapter.getOffsetValueIndex(extra.rhs),
.pred = kind.toCmpPredicate(),
.fast_math = .{},
});
},
.fneg => try function_block.writeAbbrev(FunctionBlock.FNeg{
.val = adapter.getOffsetValueIndex(@enumFromInt(datas[instr_index])),
}),
.@"fneg fast" => try function_block.writeAbbrev(FunctionBlock.FNegFast{
.val = adapter.getOffsetValueIndex(@enumFromInt(datas[instr_index])),
.fast_math = .{},
}),
.extractvalue => {
var extra = func.extraDataTrail(Function.Instruction.ExtractValue, datas[instr_index]);
const indices = extra.trail.next(extra.data.indices_len, u32, &func);
try function_block.writeAbbrev(FunctionBlock.ExtractValue{
.val = adapter.getOffsetValueIndex(extra.data.val),
.indices = indices,
});
},
.insertvalue => {
var extra = func.extraDataTrail(Function.Instruction.InsertValue, datas[instr_index]);
const indices = extra.trail.next(extra.data.indices_len, u32, &func);
try function_block.writeAbbrev(FunctionBlock.InsertValue{
.val = adapter.getOffsetValueIndex(extra.data.val),
.elem = adapter.getOffsetValueIndex(extra.data.elem),
.indices = indices,
});
},
.extractelement => {
const extra = func.extraData(Function.Instruction.ExtractElement, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.ExtractElement{
.val = adapter.getOffsetValueIndex(extra.val),
.index = adapter.getOffsetValueIndex(extra.index),
});
},
.insertelement => {
const extra = func.extraData(Function.Instruction.InsertElement, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.InsertElement{
.val = adapter.getOffsetValueIndex(extra.val),
.elem = adapter.getOffsetValueIndex(extra.elem),
.index = adapter.getOffsetValueIndex(extra.index),
});
},
.select => {
const extra = func.extraData(Function.Instruction.Select, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.Select{
.lhs = adapter.getOffsetValueIndex(extra.lhs),
.rhs = adapter.getOffsetValueIndex(extra.rhs),
.cond = adapter.getOffsetValueIndex(extra.cond),
});
},
.@"select fast" => {
const extra = func.extraData(Function.Instruction.Select, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.SelectFast{
.lhs = adapter.getOffsetValueIndex(extra.lhs),
.rhs = adapter.getOffsetValueIndex(extra.rhs),
.cond = adapter.getOffsetValueIndex(extra.cond),
.fast_math = .{},
});
},
.shufflevector => {
const extra = func.extraData(Function.Instruction.ShuffleVector, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.ShuffleVector{
.lhs = adapter.getOffsetValueIndex(extra.lhs),
.rhs = adapter.getOffsetValueIndex(extra.rhs),
.mask = adapter.getOffsetValueIndex(extra.mask),
});
},
.getelementptr,
.@"getelementptr inbounds",
=> |kind| {
var extra = func.extraDataTrail(Function.Instruction.GetElementPtr, datas[instr_index]);
const indices = extra.trail.next(extra.data.indices_len, Value, &func);
try function_block.writeAbbrevAdapted(
FunctionBlock.GetElementPtr{
.is_inbounds = kind == .@"getelementptr inbounds",
.type_index = extra.data.type,
.base = extra.data.base,
.indices = indices,
},
adapter,
);
},
.load => {
const extra = func.extraData(Function.Instruction.Load, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.Load{
.ptr = adapter.getOffsetValueIndex(extra.ptr),
.ty = extra.type,
.alignment = extra.info.alignment.toLlvm(),
.is_volatile = extra.info.access_kind == .@"volatile",
});
},
.@"load atomic" => {
const extra = func.extraData(Function.Instruction.Load, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.LoadAtomic{
.ptr = adapter.getOffsetValueIndex(extra.ptr),
.ty = extra.type,
.alignment = extra.info.alignment.toLlvm(),
.is_volatile = extra.info.access_kind == .@"volatile",
.success_ordering = extra.info.success_ordering,
.sync_scope = extra.info.sync_scope,
});
},
.store => {
const extra = func.extraData(Function.Instruction.Store, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.Store{
.ptr = adapter.getOffsetValueIndex(extra.ptr),
.val = adapter.getOffsetValueIndex(extra.val),
.alignment = extra.info.alignment.toLlvm(),
.is_volatile = extra.info.access_kind == .@"volatile",
});
},
.@"store atomic" => {
const extra = func.extraData(Function.Instruction.Store, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.StoreAtomic{
.ptr = adapter.getOffsetValueIndex(extra.ptr),
.val = adapter.getOffsetValueIndex(extra.val),
.alignment = extra.info.alignment.toLlvm(),
.is_volatile = extra.info.access_kind == .@"volatile",
.success_ordering = extra.info.success_ordering,
.sync_scope = extra.info.sync_scope,
});
},
.br => {
try function_block.writeAbbrev(FunctionBlock.BrUnconditional{
.block = datas[instr_index],
});
},
.br_cond => {
const extra = func.extraData(Function.Instruction.BrCond, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.BrConditional{
.then_block = @intFromEnum(extra.then),
.else_block = @intFromEnum(extra.@"else"),
.condition = adapter.getOffsetValueIndex(extra.cond),
});
},
.@"switch" => {
var extra = func.extraDataTrail(Function.Instruction.Switch, datas[instr_index]);
 
try record.ensureUnusedCapacity(self.gpa, 3 + extra.data.cases_len * 2);
 
// Conditional type
record.appendAssumeCapacity(@intFromEnum(extra.data.val.typeOf(@enumFromInt(func_index), self)));
 
// Conditional
record.appendAssumeCapacity(adapter.getOffsetValueIndex(extra.data.val));
 
// Default block
record.appendAssumeCapacity(@intFromEnum(extra.data.default));
 
const vals = extra.trail.next(extra.data.cases_len, Constant, &func);
const blocks = extra.trail.next(extra.data.cases_len, Function.Block.Index, &func);
for (vals, blocks) |val, block| {
record.appendAssumeCapacity(adapter.constant_adapter.getConstantIndex(val));
record.appendAssumeCapacity(@intFromEnum(block));
}
 
try function_block.writeUnabbrev(12, record.items);
},
.va_arg => {
const extra = func.extraData(Function.Instruction.VaArg, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.VaArg{
.list_type = extra.list.typeOf(@enumFromInt(func_index), self),
.list = adapter.getOffsetValueIndex(extra.list),
.type = extra.type,
});
},
.phi,
.@"phi fast",
=> |kind| {
var extra = func.extraDataTrail(Function.Instruction.Phi, datas[instr_index]);
const vals = extra.trail.next(block_incoming_len, Value, &func);
const blocks = extra.trail.next(block_incoming_len, Function.Block.Index, &func);
 
try record.ensureUnusedCapacity(
self.gpa,
1 + block_incoming_len * 2 + @intFromBool(kind == .@"phi fast"),
);
 
record.appendAssumeCapacity(@intFromEnum(extra.data.type));
 
for (vals, blocks) |val, block| {
const offset_value = adapter.getOffsetValueSignedIndex(val);
const abs_value: u32 = @intCast(@abs(offset_value));
const signed_vbr = if (offset_value > 0) abs_value << 1 else ((abs_value << 1) | 1);
record.appendAssumeCapacity(signed_vbr);
record.appendAssumeCapacity(@intFromEnum(block));
}
 
if (kind == .@"phi fast") record.appendAssumeCapacity(@as(u8, @bitCast(FastMath{})));
 
try function_block.writeUnabbrev(16, record.items);
},
.ret => try function_block.writeAbbrev(FunctionBlock.Ret{
.val = adapter.getOffsetValueIndex(@enumFromInt(datas[instr_index])),
}),
.@"ret void" => try function_block.writeAbbrev(FunctionBlock.RetVoid{}),
.atomicrmw => {
const extra = func.extraData(Function.Instruction.AtomicRmw, datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.AtomicRmw{
.ptr = adapter.getOffsetValueIndex(extra.ptr),
.val = adapter.getOffsetValueIndex(extra.val),
.operation = extra.info.atomic_rmw_operation,
.is_volatile = extra.info.access_kind == .@"volatile",
.success_ordering = extra.info.success_ordering,
.sync_scope = extra.info.sync_scope,
.alignment = extra.info.alignment.toLlvm(),
});
},
.cmpxchg,
.@"cmpxchg weak",
=> |kind| {
const extra = func.extraData(Function.Instruction.CmpXchg, datas[instr_index]);
 
try function_block.writeAbbrev(FunctionBlock.CmpXchg{
.ptr = adapter.getOffsetValueIndex(extra.ptr),
.cmp = adapter.getOffsetValueIndex(extra.cmp),
.new = adapter.getOffsetValueIndex(extra.new),
.is_volatile = extra.info.access_kind == .@"volatile",
.success_ordering = extra.info.success_ordering,
.sync_scope = extra.info.sync_scope,
.failure_ordering = extra.info.failure_ordering,
.is_weak = kind == .@"cmpxchg weak",
.alignment = extra.info.alignment.toLlvm(),
});
},
.fence => {
const info: MemoryAccessInfo = @bitCast(datas[instr_index]);
try function_block.writeAbbrev(FunctionBlock.Fence{
.ordering = info.success_ordering,
.sync_scope = info.sync_scope,
});
},
}
 
if (!self.strip) {
if (func.debug_locations.get(@enumFromInt(instr_index))) |debug_location| {
if (debug_location != .none) {
const location = self.metadata_items.get(@intFromEnum(debug_location));
assert(location.tag == .location);
const extra = self.metadataExtraData(Metadata.Location, location.data);
try function_block.writeAbbrev(FunctionBlock.DebugLoc{
.line = extra.line,
.column = extra.column,
.scope = @enumFromInt(metadata_adapter.getMetadataIndex(extra.scope)),
.inlined_at = @enumFromInt(metadata_adapter.getMetadataIndex(extra.inlined_at)),
.is_implicit = false,
});
has_location = true;
} else {
has_location = false;
}
} else if (has_location) {
try function_block.writeAbbrev(FunctionBlock.DebugLocAgain{});
}
}
 
adapter.next();
}
 
// VALUE_SYMTAB
if (!self.strip) {
const ValueSymbolTable = ir.FunctionValueSymbolTable;
 
var value_symtab_block = try function_block.enterSubBlock(ValueSymbolTable);
 
for (func.blocks, 0..) |block, block_index| {
const name = block.instruction.name(&func);
 
if (name == .none or name == .empty) continue;
 
try value_symtab_block.writeAbbrev(ValueSymbolTable.BlockEntry{
.value_id = @intCast(block_index),
.string = name.slice(self).?,
});
}
 
// TODO: Emit non block entries if the builder ever starts assigning names to non blocks
 
try value_symtab_block.end();
}
 
// METADATA_ATTACHMENT_BLOCK
if (!self.strip) blk: {
const dbg = func.global.ptrConst(self).dbg;
 
if (dbg == .none) break :blk;
 
const MetadataAttachmentBlock = ir.MetadataAttachmentBlock;
var metadata_attach_block = try function_block.enterSubBlock(MetadataAttachmentBlock);
 
try metadata_attach_block.writeAbbrev(MetadataAttachmentBlock.AttachmentSingle{
.kind = ir.MetadataKind.dbg,
.metadata = @enumFromInt(metadata_adapter.getMetadataIndex(dbg) - 1),
});
 
try metadata_attach_block.end();
}
 
try function_block.end();
}
}
 
try module_block.end();
}
 
// STRTAB_BLOCK
{
const Strtab = ir.Strtab;
var strtab_block = try bitcode.enterTopBlock(Strtab);
 
try strtab_block.writeAbbrev(Strtab.Blob{ .blob = self.string_bytes.items });
 
try strtab_block.end();
}
 
return bitcode.toSlice();
}
 
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const bitcode_writer = @import("bitcode_writer.zig");
const build_options = @import("build_options");
const Builder = @This();
const builtin = @import("builtin");
const DW = std.dwarf;
const ir = @import("ir.zig");
const log = std.log.scoped(.llvm);
const std = @import("std");
 
src/codegen/llvm/bindings.zig added: 8538, removed: 5358, total 3180
@@ -15,7 +15,14 @@ pub const Bool = enum(c_int) {
return b != .False;
}
};
pub const AttributeIndex = c_uint;
 
pub const MemoryBuffer = opaque {
pub const createMemoryBufferWithMemoryRange = LLVMCreateMemoryBufferWithMemoryRange;
pub const dispose = LLVMDisposeMemoryBuffer;
 
extern fn LLVMCreateMemoryBufferWithMemoryRange(InputData: [*]const u8, InputDataLength: usize, BufferName: ?[*:0]const u8, RequiresNullTerminator: Bool) *MemoryBuffer;
extern fn LLVMDisposeMemoryBuffer(MemBuf: *MemoryBuffer) void;
};
 
/// Make sure to use the *InContext functions instead of the global ones.
pub const Context = opaque {
@@ -25,382 +32,17 @@ pub const Context = opaque {
pub const dispose = LLVMContextDispose;
extern fn LLVMContextDispose(C: *Context) void;
 
pub const createEnumAttribute = LLVMCreateEnumAttribute;
extern fn LLVMCreateEnumAttribute(C: *Context, KindID: c_uint, Val: u64) *Attribute;
 
pub const createTypeAttribute = LLVMCreateTypeAttribute;
extern fn LLVMCreateTypeAttribute(C: *Context, KindID: c_uint, Type: *Type) *Attribute;
 
pub const createStringAttribute = LLVMCreateStringAttribute;
extern fn LLVMCreateStringAttribute(C: *Context, Key: [*]const u8, Key_Len: c_uint, Value: [*]const u8, Value_Len: c_uint) *Attribute;
 
pub const pointerType = LLVMPointerTypeInContext;
extern fn LLVMPointerTypeInContext(C: *Context, AddressSpace: c_uint) *Type;
 
pub const intType = LLVMIntTypeInContext;
extern fn LLVMIntTypeInContext(C: *Context, NumBits: c_uint) *Type;
 
pub const halfType = LLVMHalfTypeInContext;
extern fn LLVMHalfTypeInContext(C: *Context) *Type;
 
pub const bfloatType = LLVMBFloatTypeInContext;
extern fn LLVMBFloatTypeInContext(C: *Context) *Type;
 
pub const floatType = LLVMFloatTypeInContext;
extern fn LLVMFloatTypeInContext(C: *Context) *Type;
 
pub const doubleType = LLVMDoubleTypeInContext;
extern fn LLVMDoubleTypeInContext(C: *Context) *Type;
 
pub const fp128Type = LLVMFP128TypeInContext;
extern fn LLVMFP128TypeInContext(C: *Context) *Type;
 
pub const x86_fp80Type = LLVMX86FP80TypeInContext;
extern fn LLVMX86FP80TypeInContext(C: *Context) *Type;
 
pub const ppc_fp128Type = LLVMPPCFP128TypeInContext;
extern fn LLVMPPCFP128TypeInContext(C: *Context) *Type;
 
pub const x86_amxType = LLVMX86AMXTypeInContext;
extern fn LLVMX86AMXTypeInContext(C: *Context) *Type;
 
pub const x86_mmxType = LLVMX86MMXTypeInContext;
extern fn LLVMX86MMXTypeInContext(C: *Context) *Type;
 
pub const voidType = LLVMVoidTypeInContext;
extern fn LLVMVoidTypeInContext(C: *Context) *Type;
 
pub const labelType = LLVMLabelTypeInContext;
extern fn LLVMLabelTypeInContext(C: *Context) *Type;
 
pub const tokenType = LLVMTokenTypeInContext;
extern fn LLVMTokenTypeInContext(C: *Context) *Type;
 
pub const metadataType = LLVMMetadataTypeInContext;
extern fn LLVMMetadataTypeInContext(C: *Context) *Type;
 
pub const structType = LLVMStructTypeInContext;
extern fn LLVMStructTypeInContext(
C: *Context,
ElementTypes: [*]const *Type,
ElementCount: c_uint,
Packed: Bool,
) *Type;
 
pub const structCreateNamed = LLVMStructCreateNamed;
extern fn LLVMStructCreateNamed(C: *Context, Name: [*:0]const u8) *Type;
 
pub const constString = LLVMConstStringInContext;
extern fn LLVMConstStringInContext(C: *Context, Str: [*]const u8, Length: c_uint, DontNullTerminate: Bool) *Value;
 
pub const appendBasicBlock = LLVMAppendBasicBlockInContext;
extern fn LLVMAppendBasicBlockInContext(C: *Context, Fn: *Value, Name: [*:0]const u8) *BasicBlock;
 
pub const createBuilder = LLVMCreateBuilderInContext;
extern fn LLVMCreateBuilderInContext(C: *Context) *Builder;
pub const parseBitcodeInContext2 = LLVMParseBitcodeInContext2;
extern fn LLVMParseBitcodeInContext2(C: *Context, MemBuf: *MemoryBuffer, OutModule: **Module) Bool;
 
pub const setOptBisectLimit = ZigLLVMSetOptBisectLimit;
extern fn ZigLLVMSetOptBisectLimit(C: *Context, limit: c_int) void;
};
 
pub const Value = opaque {
pub const addAttributeAtIndex = LLVMAddAttributeAtIndex;
extern fn LLVMAddAttributeAtIndex(F: *Value, Idx: AttributeIndex, A: *Attribute) void;
 
pub const removeEnumAttributeAtIndex = LLVMRemoveEnumAttributeAtIndex;
extern fn LLVMRemoveEnumAttributeAtIndex(F: *Value, Idx: AttributeIndex, KindID: c_uint) void;
 
pub const removeStringAttributeAtIndex = LLVMRemoveStringAttributeAtIndex;
extern fn LLVMRemoveStringAttributeAtIndex(F: *Value, Idx: AttributeIndex, K: [*]const u8, KLen: c_uint) void;
 
pub const getFirstBasicBlock = LLVMGetFirstBasicBlock;
extern fn LLVMGetFirstBasicBlock(Fn: *Value) ?*BasicBlock;
 
pub const addIncoming = LLVMAddIncoming;
extern fn LLVMAddIncoming(
PhiNode: *Value,
IncomingValues: [*]const *Value,
IncomingBlocks: [*]const *BasicBlock,
Count: c_uint,
) void;
 
pub const setGlobalConstant = LLVMSetGlobalConstant;
extern fn LLVMSetGlobalConstant(GlobalVar: *Value, IsConstant: Bool) void;
 
pub const setLinkage = LLVMSetLinkage;
extern fn LLVMSetLinkage(Global: *Value, Linkage: Linkage) void;
 
pub const setVisibility = LLVMSetVisibility;
extern fn LLVMSetVisibility(Global: *Value, Linkage: Visibility) void;
 
pub const setUnnamedAddr = LLVMSetUnnamedAddr;
extern fn LLVMSetUnnamedAddr(Global: *Value, HasUnnamedAddr: Bool) void;
 
pub const setThreadLocalMode = LLVMSetThreadLocalMode;
extern fn LLVMSetThreadLocalMode(Global: *Value, Mode: ThreadLocalMode) void;
 
pub const setSection = LLVMSetSection;
extern fn LLVMSetSection(Global: *Value, Section: [*:0]const u8) void;
 
pub const removeGlobalValue = ZigLLVMRemoveGlobalValue;
extern fn ZigLLVMRemoveGlobalValue(GlobalVal: *Value) void;
 
pub const eraseGlobalValue = ZigLLVMEraseGlobalValue;
extern fn ZigLLVMEraseGlobalValue(GlobalVal: *Value) void;
 
pub const deleteGlobalValue = ZigLLVMDeleteGlobalValue;
extern fn ZigLLVMDeleteGlobalValue(GlobalVal: *Value) void;
 
pub const setAliasee = LLVMAliasSetAliasee;
extern fn LLVMAliasSetAliasee(Alias: *Value, Aliasee: *Value) void;
 
pub const constAdd = LLVMConstAdd;
extern fn LLVMConstAdd(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constNSWAdd = LLVMConstNSWAdd;
extern fn LLVMConstNSWAdd(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constNUWAdd = LLVMConstNUWAdd;
extern fn LLVMConstNUWAdd(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constSub = LLVMConstSub;
extern fn LLVMConstSub(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constNSWSub = LLVMConstNSWSub;
extern fn LLVMConstNSWSub(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constNUWSub = LLVMConstNUWSub;
extern fn LLVMConstNUWSub(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constMul = LLVMConstMul;
extern fn LLVMConstMul(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constNSWMul = LLVMConstNSWMul;
extern fn LLVMConstNSWMul(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constNUWMul = LLVMConstNUWMul;
extern fn LLVMConstNUWMul(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constAnd = LLVMConstAnd;
extern fn LLVMConstAnd(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constOr = LLVMConstOr;
extern fn LLVMConstOr(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constXor = LLVMConstXor;
extern fn LLVMConstXor(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constShl = LLVMConstShl;
extern fn LLVMConstShl(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constLShr = LLVMConstLShr;
extern fn LLVMConstLShr(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constAShr = LLVMConstAShr;
extern fn LLVMConstAShr(LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constTrunc = LLVMConstTrunc;
extern fn LLVMConstTrunc(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constSExt = LLVMConstSExt;
extern fn LLVMConstSExt(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constZExt = LLVMConstZExt;
extern fn LLVMConstZExt(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constFPTrunc = LLVMConstFPTrunc;
extern fn LLVMConstFPTrunc(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constFPExt = LLVMConstFPExt;
extern fn LLVMConstFPExt(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constUIToFP = LLVMConstUIToFP;
extern fn LLVMConstUIToFP(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constSIToFP = LLVMConstSIToFP;
extern fn LLVMConstSIToFP(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constFPToUI = LLVMConstFPToUI;
extern fn LLVMConstFPToUI(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constFPToSI = LLVMConstFPToSI;
extern fn LLVMConstFPToSI(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constPtrToInt = LLVMConstPtrToInt;
extern fn LLVMConstPtrToInt(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constIntToPtr = LLVMConstIntToPtr;
extern fn LLVMConstIntToPtr(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constBitCast = LLVMConstBitCast;
extern fn LLVMConstBitCast(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constAddrSpaceCast = LLVMConstAddrSpaceCast;
extern fn LLVMConstAddrSpaceCast(ConstantVal: *Value, ToType: *Type) *Value;
 
pub const constExtractElement = LLVMConstExtractElement;
extern fn LLVMConstExtractElement(VectorConstant: *Value, IndexConstant: *Value) *Value;
 
pub const constInsertElement = LLVMConstInsertElement;
extern fn LLVMConstInsertElement(
VectorConstant: *Value,
ElementValueConstant: *Value,
IndexConstant: *Value,
) *Value;
 
pub const constShuffleVector = LLVMConstShuffleVector;
extern fn LLVMConstShuffleVector(
VectorAConstant: *Value,
VectorBConstant: *Value,
MaskConstant: *Value,
) *Value;
 
pub const isConstant = LLVMIsConstant;
extern fn LLVMIsConstant(Val: *Value) Bool;
 
pub const blockAddress = LLVMBlockAddress;
extern fn LLVMBlockAddress(F: *Value, BB: *BasicBlock) *Value;
 
pub const setWeak = LLVMSetWeak;
extern fn LLVMSetWeak(CmpXchgInst: *Value, IsWeak: Bool) void;
 
pub const setOrdering = LLVMSetOrdering;
extern fn LLVMSetOrdering(MemoryAccessInst: *Value, Ordering: AtomicOrdering) void;
 
pub const setVolatile = LLVMSetVolatile;
extern fn LLVMSetVolatile(MemoryAccessInst: *Value, IsVolatile: Bool) void;
 
pub const setAlignment = LLVMSetAlignment;
extern fn LLVMSetAlignment(V: *Value, Bytes: c_uint) void;
 
pub const getAlignment = LLVMGetAlignment;
extern fn LLVMGetAlignment(V: *Value) c_uint;
 
pub const setFunctionCallConv = LLVMSetFunctionCallConv;
extern fn LLVMSetFunctionCallConv(Fn: *Value, CC: CallConv) void;
 
pub const setInstructionCallConv = LLVMSetInstructionCallConv;
extern fn LLVMSetInstructionCallConv(Instr: *Value, CC: CallConv) void;
 
pub const setTailCallKind = ZigLLVMSetTailCallKind;
extern fn ZigLLVMSetTailCallKind(CallInst: *Value, TailCallKind: TailCallKind) void;
 
pub const addCallSiteAttribute = LLVMAddCallSiteAttribute;
extern fn LLVMAddCallSiteAttribute(C: *Value, Idx: AttributeIndex, A: *Attribute) void;
 
pub const fnSetSubprogram = ZigLLVMFnSetSubprogram;
extern fn ZigLLVMFnSetSubprogram(f: *Value, subprogram: *DISubprogram) void;
 
pub const setValueName = LLVMSetValueName2;
extern fn LLVMSetValueName2(Val: *Value, Name: [*]const u8, NameLen: usize) void;
 
pub const takeName = ZigLLVMTakeName;
extern fn ZigLLVMTakeName(new_owner: *Value, victim: *Value) void;
 
pub const getParam = LLVMGetParam;
extern fn LLVMGetParam(Fn: *Value, Index: c_uint) *Value;
 
pub const setInitializer = ZigLLVMSetInitializer;
extern fn ZigLLVMSetInitializer(GlobalVar: *Value, ConstantVal: ?*Value) void;
 
pub const setDLLStorageClass = LLVMSetDLLStorageClass;
extern fn LLVMSetDLLStorageClass(Global: *Value, Class: DLLStorageClass) void;
 
pub const addCase = LLVMAddCase;
extern fn LLVMAddCase(Switch: *Value, OnVal: *Value, Dest: *BasicBlock) void;
 
pub const replaceAllUsesWith = LLVMReplaceAllUsesWith;
extern fn LLVMReplaceAllUsesWith(OldVal: *Value, NewVal: *Value) void;
 
pub const attachMetaData = ZigLLVMAttachMetaData;
extern fn ZigLLVMAttachMetaData(GlobalVar: *Value, DIG: *DIGlobalVariableExpression) void;
 
pub const dump = LLVMDumpValue;
extern fn LLVMDumpValue(Val: *Value) void;
};
 
pub const Type = opaque {
pub const constNull = LLVMConstNull;
extern fn LLVMConstNull(Ty: *Type) *Value;
 
pub const constInt = LLVMConstInt;
extern fn LLVMConstInt(IntTy: *Type, N: c_ulonglong, SignExtend: Bool) *Value;
 
pub const constIntOfArbitraryPrecision = LLVMConstIntOfArbitraryPrecision;
extern fn LLVMConstIntOfArbitraryPrecision(IntTy: *Type, NumWords: c_uint, Words: [*]const u64) *Value;
 
pub const constReal = LLVMConstReal;
extern fn LLVMConstReal(RealTy: *Type, N: f64) *Value;
 
pub const constArray2 = LLVMConstArray2;
extern fn LLVMConstArray2(ElementTy: *Type, ConstantVals: [*]const *Value, Length: u64) *Value;
 
pub const constNamedStruct = LLVMConstNamedStruct;
extern fn LLVMConstNamedStruct(
StructTy: *Type,
ConstantVals: [*]const *Value,
Count: c_uint,
) *Value;
 
pub const getUndef = LLVMGetUndef;
extern fn LLVMGetUndef(Ty: *Type) *Value;
 
pub const getPoison = LLVMGetPoison;
extern fn LLVMGetPoison(Ty: *Type) *Value;
 
pub const arrayType2 = LLVMArrayType2;
extern fn LLVMArrayType2(ElementType: *Type, ElementCount: u64) *Type;
 
pub const vectorType = LLVMVectorType;
extern fn LLVMVectorType(ElementType: *Type, ElementCount: c_uint) *Type;
 
pub const scalableVectorType = LLVMScalableVectorType;
extern fn LLVMScalableVectorType(ElementType: *Type, ElementCount: c_uint) *Type;
 
pub const structSetBody = LLVMStructSetBody;
extern fn LLVMStructSetBody(
StructTy: *Type,
ElementTypes: [*]*Type,
ElementCount: c_uint,
Packed: Bool,
) void;
 
pub const isSized = LLVMTypeIsSized;
extern fn LLVMTypeIsSized(Ty: *Type) Bool;
 
pub const constGEP = LLVMConstGEP2;
extern fn LLVMConstGEP2(
Ty: *Type,
ConstantVal: *Value,
ConstantIndices: [*]const *Value,
NumIndices: c_uint,
) *Value;
 
pub const constInBoundsGEP = LLVMConstInBoundsGEP2;
extern fn LLVMConstInBoundsGEP2(
Ty: *Type,
ConstantVal: *Value,
ConstantIndices: [*]const *Value,
NumIndices: c_uint,
) *Value;
 
pub const dump = LLVMDumpType;
extern fn LLVMDumpType(Ty: *Type) void;
};
 
pub const Module = opaque {
pub const createWithName = LLVMModuleCreateWithNameInContext;
extern fn LLVMModuleCreateWithNameInContext(ModuleID: [*:0]const u8, C: *Context) *Module;
 
pub const dispose = LLVMDisposeModule;
extern fn LLVMDisposeModule(*Module) void;
 
pub const verify = LLVMVerifyModule;
extern fn LLVMVerifyModule(*Module, Action: VerifierFailureAction, OutMessage: *[*:0]const u8) Bool;
 
pub const setModuleDataLayout = LLVMSetModuleDataLayout;
extern fn LLVMSetModuleDataLayout(*Module, *TargetData) void;
 
pub const setModulePICLevel = ZigLLVMSetModulePICLevel;
extern fn ZigLLVMSetModulePICLevel(module: *Module) void;
 
@@ -409,508 +51,11 @@ pub const Module = opaque {
 
pub const setModuleCodeModel = ZigLLVMSetModuleCodeModel;
extern fn ZigLLVMSetModuleCodeModel(module: *Module, code_model: CodeModel) void;
 
pub const addFunctionInAddressSpace = ZigLLVMAddFunctionInAddressSpace;
extern fn ZigLLVMAddFunctionInAddressSpace(*Module, Name: [*:0]const u8, FunctionTy: *Type, AddressSpace: c_uint) *Value;
 
pub const printToString = LLVMPrintModuleToString;
extern fn LLVMPrintModuleToString(*Module) [*:0]const u8;
 
pub const addGlobalInAddressSpace = LLVMAddGlobalInAddressSpace;
extern fn LLVMAddGlobalInAddressSpace(M: *Module, Ty: *Type, Name: [*:0]const u8, AddressSpace: c_uint) *Value;
 
pub const dump = LLVMDumpModule;
extern fn LLVMDumpModule(M: *Module) void;
 
pub const addAlias = LLVMAddAlias2;
extern fn LLVMAddAlias2(
M: *Module,
Ty: *Type,
AddrSpace: c_uint,
Aliasee: *Value,
Name: [*:0]const u8,
) *Value;
 
pub const setTarget = LLVMSetTarget;
extern fn LLVMSetTarget(M: *Module, Triple: [*:0]const u8) void;
 
pub const addModuleDebugInfoFlag = ZigLLVMAddModuleDebugInfoFlag;
extern fn ZigLLVMAddModuleDebugInfoFlag(module: *Module, dwarf64: bool) void;
 
pub const addModuleCodeViewFlag = ZigLLVMAddModuleCodeViewFlag;
extern fn ZigLLVMAddModuleCodeViewFlag(module: *Module) void;
 
pub const createDIBuilder = ZigLLVMCreateDIBuilder;
extern fn ZigLLVMCreateDIBuilder(module: *Module, allow_unresolved: bool) *DIBuilder;
 
pub const setModuleInlineAsm = LLVMSetModuleInlineAsm2;
extern fn LLVMSetModuleInlineAsm2(M: *Module, Asm: [*]const u8, Len: usize) void;
 
pub const printModuleToFile = LLVMPrintModuleToFile;
extern fn LLVMPrintModuleToFile(M: *Module, Filename: [*:0]const u8, ErrorMessage: *[*:0]const u8) Bool;
 
pub const writeBitcodeToFile = LLVMWriteBitcodeToFile;
extern fn LLVMWriteBitcodeToFile(M: *Module, Path: [*:0]const u8) c_int;
};
 
pub const disposeMessage = LLVMDisposeMessage;
extern fn LLVMDisposeMessage(Message: [*:0]const u8) void;
 
pub const VerifierFailureAction = enum(c_int) {
AbortProcess,
PrintMessage,
ReturnStatus,
};
 
pub const constVector = LLVMConstVector;
extern fn LLVMConstVector(
ScalarConstantVals: [*]*Value,
Size: c_uint,
) *Value;
 
pub const constICmp = LLVMConstICmp;
extern fn LLVMConstICmp(Predicate: IntPredicate, LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const constFCmp = LLVMConstFCmp;
extern fn LLVMConstFCmp(Predicate: RealPredicate, LHSConstant: *Value, RHSConstant: *Value) *Value;
 
pub const getEnumAttributeKindForName = LLVMGetEnumAttributeKindForName;
extern fn LLVMGetEnumAttributeKindForName(Name: [*]const u8, SLen: usize) c_uint;
 
pub const getInlineAsm = LLVMGetInlineAsm;
extern fn LLVMGetInlineAsm(
Ty: *Type,
AsmString: [*]const u8,
AsmStringSize: usize,
Constraints: [*]const u8,
ConstraintsSize: usize,
HasSideEffects: Bool,
IsAlignStack: Bool,
Dialect: InlineAsmDialect,
CanThrow: Bool,
) *Value;
 
pub const functionType = LLVMFunctionType;
extern fn LLVMFunctionType(
ReturnType: *Type,
ParamTypes: [*]const *Type,
ParamCount: c_uint,
IsVarArg: Bool,
) *Type;
 
pub const InlineAsmDialect = enum(c_uint) { ATT, Intel };
 
pub const Attribute = opaque {};
 
pub const Builder = opaque {
pub const dispose = LLVMDisposeBuilder;
extern fn LLVMDisposeBuilder(Builder: *Builder) void;
 
pub const positionBuilder = LLVMPositionBuilder;
extern fn LLVMPositionBuilder(
Builder: *Builder,
Block: *BasicBlock,
Instr: ?*Value,
) void;
 
pub const buildZExt = LLVMBuildZExt;
extern fn LLVMBuildZExt(
*Builder,
Value: *Value,
DestTy: *Type,
Name: [*:0]const u8,
) *Value;
 
pub const buildSExt = LLVMBuildSExt;
extern fn LLVMBuildSExt(
*Builder,
Val: *Value,
DestTy: *Type,
Name: [*:0]const u8,
) *Value;
 
pub const buildCall = LLVMBuildCall2;
extern fn LLVMBuildCall2(
*Builder,
*Type,
Fn: *Value,
Args: [*]const *Value,
NumArgs: c_uint,
Name: [*:0]const u8,
) *Value;
 
pub const buildRetVoid = LLVMBuildRetVoid;
extern fn LLVMBuildRetVoid(*Builder) *Value;
 
pub const buildRet = LLVMBuildRet;
extern fn LLVMBuildRet(*Builder, V: *Value) *Value;
 
pub const buildUnreachable = LLVMBuildUnreachable;
extern fn LLVMBuildUnreachable(*Builder) *Value;
 
pub const buildAlloca = LLVMBuildAlloca;
extern fn LLVMBuildAlloca(*Builder, Ty: *Type, Name: [*:0]const u8) *Value;
 
pub const buildStore = LLVMBuildStore;
extern fn LLVMBuildStore(*Builder, Val: *Value, Ptr: *Value) *Value;
 
pub const buildLoad = LLVMBuildLoad2;
extern fn LLVMBuildLoad2(*Builder, Ty: *Type, PointerVal: *Value, Name: [*:0]const u8) *Value;
 
pub const buildFAdd = LLVMBuildFAdd;
extern fn LLVMBuildFAdd(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildAdd = LLVMBuildAdd;
extern fn LLVMBuildAdd(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildNSWAdd = LLVMBuildNSWAdd;
extern fn LLVMBuildNSWAdd(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildNUWAdd = LLVMBuildNUWAdd;
extern fn LLVMBuildNUWAdd(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildFSub = LLVMBuildFSub;
extern fn LLVMBuildFSub(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildFNeg = LLVMBuildFNeg;
extern fn LLVMBuildFNeg(*Builder, V: *Value, Name: [*:0]const u8) *Value;
 
pub const buildSub = LLVMBuildSub;
extern fn LLVMBuildSub(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildNSWSub = LLVMBuildNSWSub;
extern fn LLVMBuildNSWSub(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildNUWSub = LLVMBuildNUWSub;
extern fn LLVMBuildNUWSub(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildFMul = LLVMBuildFMul;
extern fn LLVMBuildFMul(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildMul = LLVMBuildMul;
extern fn LLVMBuildMul(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildNSWMul = LLVMBuildNSWMul;
extern fn LLVMBuildNSWMul(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildNUWMul = LLVMBuildNUWMul;
extern fn LLVMBuildNUWMul(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildUDiv = LLVMBuildUDiv;
extern fn LLVMBuildUDiv(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildSDiv = LLVMBuildSDiv;
extern fn LLVMBuildSDiv(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildFDiv = LLVMBuildFDiv;
extern fn LLVMBuildFDiv(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildURem = LLVMBuildURem;
extern fn LLVMBuildURem(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildSRem = LLVMBuildSRem;
extern fn LLVMBuildSRem(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildFRem = LLVMBuildFRem;
extern fn LLVMBuildFRem(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildAnd = LLVMBuildAnd;
extern fn LLVMBuildAnd(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildLShr = LLVMBuildLShr;
extern fn LLVMBuildLShr(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildAShr = LLVMBuildAShr;
extern fn LLVMBuildAShr(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildLShrExact = ZigLLVMBuildLShrExact;
extern fn ZigLLVMBuildLShrExact(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildAShrExact = ZigLLVMBuildAShrExact;
extern fn ZigLLVMBuildAShrExact(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildShl = LLVMBuildShl;
extern fn LLVMBuildShl(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildNUWShl = ZigLLVMBuildNUWShl;
extern fn ZigLLVMBuildNUWShl(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildNSWShl = ZigLLVMBuildNSWShl;
extern fn ZigLLVMBuildNSWShl(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildOr = LLVMBuildOr;
extern fn LLVMBuildOr(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildXor = LLVMBuildXor;
extern fn LLVMBuildXor(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildBitCast = LLVMBuildBitCast;
extern fn LLVMBuildBitCast(*Builder, Val: *Value, DestTy: *Type, Name: [*:0]const u8) *Value;
 
pub const buildGEP = LLVMBuildGEP2;
extern fn LLVMBuildGEP2(
B: *Builder,
Ty: *Type,
Pointer: *Value,
Indices: [*]const *Value,
NumIndices: c_uint,
Name: [*:0]const u8,
) *Value;
 
pub const buildInBoundsGEP = LLVMBuildInBoundsGEP2;
extern fn LLVMBuildInBoundsGEP2(
B: *Builder,
Ty: *Type,
Pointer: *Value,
Indices: [*]const *Value,
NumIndices: c_uint,
Name: [*:0]const u8,
) *Value;
 
pub const buildICmp = LLVMBuildICmp;
extern fn LLVMBuildICmp(*Builder, Op: IntPredicate, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildFCmp = LLVMBuildFCmp;
extern fn LLVMBuildFCmp(*Builder, Op: RealPredicate, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildBr = LLVMBuildBr;
extern fn LLVMBuildBr(*Builder, Dest: *BasicBlock) *Value;
 
pub const buildCondBr = LLVMBuildCondBr;
extern fn LLVMBuildCondBr(*Builder, If: *Value, Then: *BasicBlock, Else: *BasicBlock) *Value;
 
pub const buildSwitch = LLVMBuildSwitch;
extern fn LLVMBuildSwitch(*Builder, V: *Value, Else: *BasicBlock, NumCases: c_uint) *Value;
 
pub const buildPhi = LLVMBuildPhi;
extern fn LLVMBuildPhi(*Builder, Ty: *Type, Name: [*:0]const u8) *Value;
 
pub const buildExtractValue = LLVMBuildExtractValue;
extern fn LLVMBuildExtractValue(
*Builder,
AggVal: *Value,
Index: c_uint,
Name: [*:0]const u8,
) *Value;
 
pub const buildExtractElement = LLVMBuildExtractElement;
extern fn LLVMBuildExtractElement(
*Builder,
VecVal: *Value,
Index: *Value,
Name: [*:0]const u8,
) *Value;
 
pub const buildInsertElement = LLVMBuildInsertElement;
extern fn LLVMBuildInsertElement(
*Builder,
VecVal: *Value,
EltVal: *Value,
Index: *Value,
Name: [*:0]const u8,
) *Value;
 
pub const buildPtrToInt = LLVMBuildPtrToInt;
extern fn LLVMBuildPtrToInt(
*Builder,
Val: *Value,
DestTy: *Type,
Name: [*:0]const u8,
) *Value;
 
pub const buildIntToPtr = LLVMBuildIntToPtr;
extern fn LLVMBuildIntToPtr(
*Builder,
Val: *Value,
DestTy: *Type,
Name: [*:0]const u8,
) *Value;
 
pub const buildTrunc = LLVMBuildTrunc;
extern fn LLVMBuildTrunc(
*Builder,
Val: *Value,
DestTy: *Type,
Name: [*:0]const u8,
) *Value;
 
pub const buildInsertValue = LLVMBuildInsertValue;
extern fn LLVMBuildInsertValue(
*Builder,
AggVal: *Value,
EltVal: *Value,
Index: c_uint,
Name: [*:0]const u8,
) *Value;
 
pub const buildAtomicCmpXchg = LLVMBuildAtomicCmpXchg;
extern fn LLVMBuildAtomicCmpXchg(
builder: *Builder,
ptr: *Value,
cmp: *Value,
new_val: *Value,
success_ordering: AtomicOrdering,
failure_ordering: AtomicOrdering,
is_single_threaded: Bool,
) *Value;
 
pub const buildSelect = LLVMBuildSelect;
extern fn LLVMBuildSelect(
*Builder,
If: *Value,
Then: *Value,
Else: *Value,
Name: [*:0]const u8,
) *Value;
 
pub const buildFence = LLVMBuildFence;
extern fn LLVMBuildFence(
B: *Builder,
ordering: AtomicOrdering,
singleThread: Bool,
Name: [*:0]const u8,
) *Value;
 
pub const buildAtomicRmw = LLVMBuildAtomicRMW;
extern fn LLVMBuildAtomicRMW(
B: *Builder,
op: AtomicRMWBinOp,
PTR: *Value,
Val: *Value,
ordering: AtomicOrdering,
singleThread: Bool,
) *Value;
 
pub const buildFPToUI = LLVMBuildFPToUI;
extern fn LLVMBuildFPToUI(
*Builder,
Val: *Value,
DestTy: *Type,
Name: [*:0]const u8,
) *Value;
 
pub const buildFPToSI = LLVMBuildFPToSI;
extern fn LLVMBuildFPToSI(
*Builder,
Val: *Value,
DestTy: *Type,
Name: [*:0]const u8,
) *Value;
 
pub const buildUIToFP = LLVMBuildUIToFP;
extern fn LLVMBuildUIToFP(
*Builder,
Val: *Value,
DestTy: *Type,
Name: [*:0]const u8,
) *Value;
 
pub const buildSIToFP = LLVMBuildSIToFP;
extern fn LLVMBuildSIToFP(
*Builder,
Val: *Value,
DestTy: *Type,
Name: [*:0]const u8,
) *Value;
 
pub const buildFPTrunc = LLVMBuildFPTrunc;
extern fn LLVMBuildFPTrunc(
*Builder,
Val: *Value,
DestTy: *Type,
Name: [*:0]const u8,
) *Value;
 
pub const buildFPExt = LLVMBuildFPExt;
extern fn LLVMBuildFPExt(
*Builder,
Val: *Value,
DestTy: *Type,
Name: [*:0]const u8,
) *Value;
 
pub const buildExactUDiv = LLVMBuildExactUDiv;
extern fn LLVMBuildExactUDiv(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const buildExactSDiv = LLVMBuildExactSDiv;
extern fn LLVMBuildExactSDiv(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value;
 
pub const setCurrentDebugLocation = ZigLLVMSetCurrentDebugLocation2;
extern fn ZigLLVMSetCurrentDebugLocation2(builder: *Builder, line: c_uint, column: c_uint, scope: *DIScope, inlined_at: ?*DILocation) void;
 
pub const clearCurrentDebugLocation = ZigLLVMClearCurrentDebugLocation;
extern fn ZigLLVMClearCurrentDebugLocation(builder: *Builder) void;
 
pub const getCurrentDebugLocation2 = LLVMGetCurrentDebugLocation2;
extern fn LLVMGetCurrentDebugLocation2(Builder: *Builder) *Metadata;
 
pub const setCurrentDebugLocation2 = LLVMSetCurrentDebugLocation2;
extern fn LLVMSetCurrentDebugLocation2(Builder: *Builder, Loc: *Metadata) void;
 
pub const buildShuffleVector = LLVMBuildShuffleVector;
extern fn LLVMBuildShuffleVector(*Builder, V1: *Value, V2: *Value, Mask: *Value, Name: [*:0]const u8) *Value;
 
pub const setFastMath = ZigLLVMSetFastMath;
extern fn ZigLLVMSetFastMath(B: *Builder, on_state: bool) void;
 
pub const buildAddrSpaceCast = LLVMBuildAddrSpaceCast;
extern fn LLVMBuildAddrSpaceCast(B: *Builder, Val: *Value, DestTy: *Type, Name: [*:0]const u8) *Value;
 
pub const buildAllocaInAddressSpace = ZigLLVMBuildAllocaInAddressSpace;
extern fn ZigLLVMBuildAllocaInAddressSpace(B: *Builder, Ty: *Type, AddressSpace: c_uint, Name: [*:0]const u8) *Value;
 
pub const buildVAArg = LLVMBuildVAArg;
extern fn LLVMBuildVAArg(*Builder, List: *Value, Ty: *Type, Name: [*:0]const u8) *Value;
};
 
pub const MDString = opaque {
pub const get = LLVMMDStringInContext2;
extern fn LLVMMDStringInContext2(C: *Context, Str: [*]const u8, SLen: usize) *MDString;
};
 
pub const DIScope = opaque {
pub const toNode = ZigLLVMScopeToNode;
extern fn ZigLLVMScopeToNode(scope: *DIScope) *DINode;
};
 
pub const DINode = opaque {};
pub const Metadata = opaque {};
 
pub const IntPredicate = enum(c_uint) {
EQ = 32,
NE = 33,
UGT = 34,
UGE = 35,
ULT = 36,
ULE = 37,
SGT = 38,
SGE = 39,
SLT = 40,
SLE = 41,
};
 
pub const RealPredicate = enum(c_uint) {
OEQ = 1,
OGT = 2,
OGE = 3,
OLT = 4,
OLE = 5,
ONE = 6,
ORD = 7,
UNO = 8,
UEQ = 9,
UGT = 10,
UGE = 11,
ULT = 12,
ULE = 13,
UNE = 14,
};
 
pub const BasicBlock = opaque {
pub const deleteBasicBlock = LLVMDeleteBasicBlock;
extern fn LLVMDeleteBasicBlock(BB: *BasicBlock) void;
};
 
pub const TargetMachine = opaque {
pub const create = ZigLLVMCreateTargetMachine;
extern fn ZigLLVMCreateTargetMachine(
@@ -945,23 +90,11 @@ pub const TargetMachine = opaque {
llvm_ir_filename: ?[*:0]const u8,
bitcode_filename: ?[*:0]const u8,
) bool;
 
pub const createTargetDataLayout = LLVMCreateTargetDataLayout;
extern fn LLVMCreateTargetDataLayout(*TargetMachine) *TargetData;
};
 
pub const TargetData = opaque {
pub const dispose = LLVMDisposeTargetData;
extern fn LLVMDisposeTargetData(*TargetData) void;
 
pub const abiAlignmentOfType = LLVMABIAlignmentOfType;
extern fn LLVMABIAlignmentOfType(TD: *TargetData, Ty: *Type) c_uint;
 
pub const abiSizeOfType = LLVMABISizeOfType;
extern fn LLVMABISizeOfType(TD: *TargetData, Ty: *Type) c_ulonglong;
 
pub const stringRep = LLVMCopyStringRepOfTargetData;
extern fn LLVMCopyStringRepOfTargetData(TD: *TargetData) [*:0]const u8;
};
 
pub const CodeModel = enum(c_int) {
@@ -991,11 +124,6 @@ pub const RelocMode = enum(c_int) {
ROPI_RWPI,
};
 
pub const CodeGenFileType = enum(c_int) {
AssemblyFile,
ObjectFile,
};
 
pub const ABIType = enum(c_int) {
/// Target-specific (either soft or hard depending on triple, etc).
Default,
@@ -1266,576 +394,3 @@ extern fn ZigLLVMWriteImportLibrary(
output_lib_path: [*:0]const u8,
kill_at: bool,
) bool;
 
pub const Linkage = enum(c_uint) {
External,
AvailableExternally,
LinkOnceAny,
LinkOnceODR,
LinkOnceODRAutoHide,
WeakAny,
WeakODR,
Appending,
Internal,
Private,
DLLImport,
DLLExport,
ExternalWeak,
Ghost,
Common,
LinkerPrivate,
LinkerPrivateWeak,
};
 
pub const Visibility = enum(c_uint) {
Default,
Hidden,
Protected,
};
 
pub const ThreadLocalMode = enum(c_uint) {
NotThreadLocal,
GeneralDynamicTLSModel,
LocalDynamicTLSModel,
InitialExecTLSModel,
LocalExecTLSModel,
};
 
pub const AtomicOrdering = enum(c_uint) {
NotAtomic = 0,
Unordered = 1,
Monotonic = 2,
Acquire = 4,
Release = 5,
AcquireRelease = 6,
SequentiallyConsistent = 7,
};
 
pub const AtomicRMWBinOp = enum(c_int) {
Xchg,
Add,
Sub,
And,
Nand,
Or,
Xor,
Max,
Min,
UMax,
UMin,
FAdd,
FSub,
FMax,
FMin,
};
 
pub const CallConv = enum(c_uint) {
C = 0,
Fast = 8,
Cold = 9,
GHC = 10,
HiPE = 11,
WebKit_JS = 12,
AnyReg = 13,
PreserveMost = 14,
PreserveAll = 15,
Swift = 16,
CXX_FAST_TLS = 17,
 
X86_StdCall = 64,
X86_FastCall = 65,
ARM_APCS = 66,
ARM_AAPCS = 67,
ARM_AAPCS_VFP = 68,
MSP430_INTR = 69,
X86_ThisCall = 70,
PTX_Kernel = 71,
PTX_Device = 72,
SPIR_FUNC = 75,
SPIR_KERNEL = 76,
Intel_OCL_BI = 77,
X86_64_SysV = 78,
Win64 = 79,
X86_VectorCall = 80,
HHVM = 81,
HHVM_C = 82,
X86_INTR = 83,
AVR_INTR = 84,
AVR_SIGNAL = 85,
AVR_BUILTIN = 86,
AMDGPU_VS = 87,
AMDGPU_GS = 88,
AMDGPU_PS = 89,
AMDGPU_CS = 90,
AMDGPU_KERNEL = 91,
X86_RegCall = 92,
AMDGPU_HS = 93,
MSP430_BUILTIN = 94,
AMDGPU_LS = 95,
AMDGPU_ES = 96,
AArch64_VectorCall = 97,
};
 
pub const CallAttr = enum(c_int) {
Auto,
NeverTail,
NeverInline,
AlwaysTail,
AlwaysInline,
};
 
pub const TailCallKind = enum(c_uint) {
None,
Tail,
MustTail,
NoTail,
};
 
pub const DLLStorageClass = enum(c_uint) {
Default,
DLLImport,
DLLExport,
};
 
pub const address_space = struct {
pub const default: c_uint = 0;
 
// See llvm/lib/Target/X86/X86.h
pub const x86_64 = x86;
pub const x86 = struct {
pub const gs: c_uint = 256;
pub const fs: c_uint = 257;
pub const ss: c_uint = 258;
 
pub const ptr32_sptr: c_uint = 270;
pub const ptr32_uptr: c_uint = 271;
pub const ptr64: c_uint = 272;
};
 
// See llvm/lib/Target/AVR/AVR.h
pub const avr = struct {
pub const flash: c_uint = 1;
pub const flash1: c_uint = 2;
pub const flash2: c_uint = 3;
pub const flash3: c_uint = 4;
pub const flash4: c_uint = 5;
pub const flash5: c_uint = 6;
};
 
// See llvm/lib/Target/NVPTX/NVPTX.h
pub const nvptx = struct {
pub const generic: c_uint = 0;
pub const global: c_uint = 1;
pub const constant: c_uint = 2;
pub const shared: c_uint = 3;
pub const param: c_uint = 4;
pub const local: c_uint = 5;
};
 
// See llvm/lib/Target/AMDGPU/AMDGPU.h
pub const amdgpu = struct {
pub const flat: c_uint = 0;
pub const global: c_uint = 1;
pub const region: c_uint = 2;
pub const local: c_uint = 3;
pub const constant: c_uint = 4;
pub const private: c_uint = 5;
pub const constant_32bit: c_uint = 6;
pub const buffer_fat_pointer: c_uint = 7;
pub const param_d: c_uint = 6;
pub const param_i: c_uint = 7;
pub const constant_buffer_0: c_uint = 8;
pub const constant_buffer_1: c_uint = 9;
pub const constant_buffer_2: c_uint = 10;
pub const constant_buffer_3: c_uint = 11;
pub const constant_buffer_4: c_uint = 12;
pub const constant_buffer_5: c_uint = 13;
pub const constant_buffer_6: c_uint = 14;
pub const constant_buffer_7: c_uint = 15;
pub const constant_buffer_8: c_uint = 16;
pub const constant_buffer_9: c_uint = 17;
pub const constant_buffer_10: c_uint = 18;
pub const constant_buffer_11: c_uint = 19;
pub const constant_buffer_12: c_uint = 20;
pub const constant_buffer_13: c_uint = 21;
pub const constant_buffer_14: c_uint = 22;
pub const constant_buffer_15: c_uint = 23;
};
 
// See llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypetilities.h
pub const wasm = struct {
pub const variable: c_uint = 1;
pub const externref: c_uint = 10;
pub const funcref: c_uint = 20;
};
};
 
pub const DIEnumerator = opaque {};
pub const DILocalVariable = opaque {};
pub const DILocation = opaque {};
pub const DIGlobalExpression = opaque {};
 
pub const DIGlobalVariable = opaque {
pub const toNode = ZigLLVMGlobalVariableToNode;
extern fn ZigLLVMGlobalVariableToNode(global_variable: *DIGlobalVariable) *DINode;
 
pub const replaceLinkageName = ZigLLVMGlobalVariableReplaceLinkageName;
extern fn ZigLLVMGlobalVariableReplaceLinkageName(global_variable: *DIGlobalVariable, linkage_name: *MDString) void;
};
pub const DIGlobalVariableExpression = opaque {
pub const getVariable = ZigLLVMGlobalGetVariable;
extern fn ZigLLVMGlobalGetVariable(global_variable: *DIGlobalVariableExpression) *DIGlobalVariable;
};
pub const DIType = opaque {
pub const toScope = ZigLLVMTypeToScope;
extern fn ZigLLVMTypeToScope(ty: *DIType) *DIScope;
 
pub const toNode = ZigLLVMTypeToNode;
extern fn ZigLLVMTypeToNode(ty: *DIType) *DINode;
};
pub const DIFile = opaque {
pub const toScope = ZigLLVMFileToScope;
extern fn ZigLLVMFileToScope(difile: *DIFile) *DIScope;
 
pub const toNode = ZigLLVMFileToNode;
extern fn ZigLLVMFileToNode(difile: *DIFile) *DINode;
};
pub const DILexicalBlock = opaque {
pub const toScope = ZigLLVMLexicalBlockToScope;
extern fn ZigLLVMLexicalBlockToScope(lexical_block: *DILexicalBlock) *DIScope;
 
pub const toNode = ZigLLVMLexicalBlockToNode;
extern fn ZigLLVMLexicalBlockToNode(lexical_block: *DILexicalBlock) *DINode;
};
pub const DICompileUnit = opaque {
pub const toScope = ZigLLVMCompileUnitToScope;
extern fn ZigLLVMCompileUnitToScope(compile_unit: *DICompileUnit) *DIScope;
 
pub const toNode = ZigLLVMCompileUnitToNode;
extern fn ZigLLVMCompileUnitToNode(compile_unit: *DICompileUnit) *DINode;
};
pub const DISubprogram = opaque {
pub const toScope = ZigLLVMSubprogramToScope;
extern fn ZigLLVMSubprogramToScope(subprogram: *DISubprogram) *DIScope;
 
pub const toNode = ZigLLVMSubprogramToNode;
extern fn ZigLLVMSubprogramToNode(subprogram: *DISubprogram) *DINode;
 
pub const replaceLinkageName = ZigLLVMSubprogramReplaceLinkageName;
extern fn ZigLLVMSubprogramReplaceLinkageName(subprogram: *DISubprogram, linkage_name: *MDString) void;
};
 
pub const getDebugLoc = ZigLLVMGetDebugLoc2;
extern fn ZigLLVMGetDebugLoc2(line: c_uint, col: c_uint, scope: *DIScope, inlined_at: ?*DILocation) *DILocation;
 
pub const DIBuilder = opaque {
pub const dispose = ZigLLVMDisposeDIBuilder;
extern fn ZigLLVMDisposeDIBuilder(dib: *DIBuilder) void;
 
pub const finalize = ZigLLVMDIBuilderFinalize;
extern fn ZigLLVMDIBuilderFinalize(dib: *DIBuilder) void;
 
pub const createPointerType = ZigLLVMCreateDebugPointerType;
extern fn ZigLLVMCreateDebugPointerType(
dib: *DIBuilder,
pointee_type: *DIType,
size_in_bits: u64,
align_in_bits: u64,
name: [*:0]const u8,
) *DIType;
 
pub const createBasicType = ZigLLVMCreateDebugBasicType;
extern fn ZigLLVMCreateDebugBasicType(
dib: *DIBuilder,
name: [*:0]const u8,
size_in_bits: u64,
encoding: c_uint,
) *DIType;
 
pub const createArrayType = ZigLLVMCreateDebugArrayType;
extern fn ZigLLVMCreateDebugArrayType(
dib: *DIBuilder,
size_in_bits: u64,
align_in_bits: u64,
elem_type: *DIType,
elem_count: i64,
) *DIType;
 
pub const createEnumerator = ZigLLVMCreateDebugEnumerator;
extern fn ZigLLVMCreateDebugEnumerator(
dib: *DIBuilder,
name: [*:0]const u8,
val: u64,
is_unsigned: bool,
) *DIEnumerator;
 
pub const createEnumerator2 = ZigLLVMCreateDebugEnumeratorOfArbitraryPrecision;
extern fn ZigLLVMCreateDebugEnumeratorOfArbitraryPrecision(
dib: *DIBuilder,
name: [*:0]const u8,
num_words: c_uint,
words: [*]const u64,
bits: c_uint,
is_unsigned: bool,
) *DIEnumerator;
 
pub const createEnumerationType = ZigLLVMCreateDebugEnumerationType;
extern fn ZigLLVMCreateDebugEnumerationType(
dib: *DIBuilder,
scope: *DIScope,
name: [*:0]const u8,
file: *DIFile,
line_number: c_uint,
size_in_bits: u64,
align_in_bits: u64,
enumerator_array: [*]const *DIEnumerator,
enumerator_array_len: c_int,
underlying_type: *DIType,
unique_id: [*:0]const u8,
) *DIType;
 
pub const createStructType = ZigLLVMCreateDebugStructType;
extern fn ZigLLVMCreateDebugStructType(
dib: *DIBuilder,
scope: *DIScope,
name: [*:0]const u8,
file: ?*DIFile,
line_number: c_uint,
size_in_bits: u64,
align_in_bits: u64,
flags: c_uint,
derived_from: ?*DIType,
types_array: [*]const *DIType,
types_array_len: c_int,
run_time_lang: c_uint,
vtable_holder: ?*DIType,
unique_id: [*:0]const u8,
) *DIType;
 
pub const createUnionType = ZigLLVMCreateDebugUnionType;
extern fn ZigLLVMCreateDebugUnionType(
dib: *DIBuilder,
scope: *DIScope,
name: [*:0]const u8,
file: ?*DIFile,
line_number: c_uint,
size_in_bits: u64,
align_in_bits: u64,
flags: c_uint,
types_array: [*]const *DIType,
types_array_len: c_int,
run_time_lang: c_uint,
unique_id: [*:0]const u8,
) *DIType;
 
pub const createMemberType = ZigLLVMCreateDebugMemberType;
extern fn ZigLLVMCreateDebugMemberType(
dib: *DIBuilder,
scope: *DIScope,
name: [*:0]const u8,
file: ?*DIFile,
line: c_uint,
size_in_bits: u64,
align_in_bits: u64,
offset_in_bits: u64,
flags: c_uint,
ty: *DIType,
) *DIType;
 
pub const createReplaceableCompositeType = ZigLLVMCreateReplaceableCompositeType;
extern fn ZigLLVMCreateReplaceableCompositeType(
dib: *DIBuilder,
tag: c_uint,
name: [*:0]const u8,
scope: *DIScope,
file: ?*DIFile,
line: c_uint,
) *DIType;
 
pub const createForwardDeclType = ZigLLVMCreateDebugForwardDeclType;
extern fn ZigLLVMCreateDebugForwardDeclType(
dib: *DIBuilder,
tag: c_uint,
name: [*:0]const u8,
scope: ?*DIScope,
file: ?*DIFile,
line: c_uint,
) *DIType;
 
pub const replaceTemporary = ZigLLVMReplaceTemporary;
extern fn ZigLLVMReplaceTemporary(dib: *DIBuilder, ty: *DIType, replacement: *DIType) void;
 
pub const replaceDebugArrays = ZigLLVMReplaceDebugArrays;
extern fn ZigLLVMReplaceDebugArrays(
dib: *DIBuilder,
ty: *DIType,
types_array: [*]const *DIType,
types_array_len: c_int,
) void;
 
pub const createSubroutineType = ZigLLVMCreateSubroutineType;
extern fn ZigLLVMCreateSubroutineType(
dib: *DIBuilder,
types_array: [*]const *DIType,
types_array_len: c_int,
flags: c_uint,
) *DIType;
 
pub const createAutoVariable = ZigLLVMCreateAutoVariable;
extern fn ZigLLVMCreateAutoVariable(
dib: *DIBuilder,
scope: *DIScope,
name: [*:0]const u8,
file: *DIFile,
line_no: c_uint,
ty: *DIType,
always_preserve: bool,
flags: c_uint,
) *DILocalVariable;
 
pub const createGlobalVariableExpression = ZigLLVMCreateGlobalVariableExpression;
extern fn ZigLLVMCreateGlobalVariableExpression(
dib: *DIBuilder,
scope: *DIScope,
name: [*:0]const u8,
linkage_name: [*:0]const u8,
file: *DIFile,
line_no: c_uint,
di_type: *DIType,
is_local_to_unit: bool,
) *DIGlobalVariableExpression;
 
pub const createParameterVariable = ZigLLVMCreateParameterVariable;
extern fn ZigLLVMCreateParameterVariable(
dib: *DIBuilder,
scope: *DIScope,
name: [*:0]const u8,
file: *DIFile,
line_no: c_uint,
ty: *DIType,
always_preserve: bool,
flags: c_uint,
arg_no: c_uint,
) *DILocalVariable;
 
pub const createLexicalBlock = ZigLLVMCreateLexicalBlock;
extern fn ZigLLVMCreateLexicalBlock(
dib: *DIBuilder,
scope: *DIScope,
file: *DIFile,
line: c_uint,
col: c_uint,
) *DILexicalBlock;
 
pub const createCompileUnit = ZigLLVMCreateCompileUnit;
extern fn ZigLLVMCreateCompileUnit(
dib: *DIBuilder,
lang: c_uint,
difile: *DIFile,
producer: [*:0]const u8,
is_optimized: bool,
flags: [*:0]const u8,
runtime_version: c_uint,
split_name: [*:0]const u8,
dwo_id: u64,
emit_debug_info: bool,
) *DICompileUnit;
 
pub const createFile = ZigLLVMCreateFile;
extern fn ZigLLVMCreateFile(
dib: *DIBuilder,
filename: [*:0]const u8,
directory: [*:0]const u8,
) *DIFile;
 
pub const createFunction = ZigLLVMCreateFunction;
extern fn ZigLLVMCreateFunction(
dib: *DIBuilder,
scope: *DIScope,
name: [*:0]const u8,
linkage_name: [*:0]const u8,
file: *DIFile,
lineno: c_uint,
fn_di_type: *DIType,
is_local_to_unit: bool,
is_definition: bool,
scope_line: c_uint,
flags: c_uint,
is_optimized: bool,
decl_subprogram: ?*DISubprogram,
) *DISubprogram;
 
pub const createVectorType = ZigLLVMDIBuilderCreateVectorType;
extern fn ZigLLVMDIBuilderCreateVectorType(
dib: *DIBuilder,
SizeInBits: u64,
AlignInBits: u32,
Ty: *DIType,
elem_count: u32,
) *DIType;
 
pub const insertDeclareAtEnd = ZigLLVMInsertDeclareAtEnd;
extern fn ZigLLVMInsertDeclareAtEnd(
dib: *DIBuilder,
storage: *Value,
var_info: *DILocalVariable,
debug_loc: *DILocation,
basic_block_ref: *BasicBlock,
) *Value;
 
pub const insertDeclare = ZigLLVMInsertDeclare;
extern fn ZigLLVMInsertDeclare(
dib: *DIBuilder,
storage: *Value,
var_info: *DILocalVariable,
debug_loc: *DILocation,
insert_before_instr: *Value,
) *Value;
 
pub const insertDbgValueIntrinsicAtEnd = ZigLLVMInsertDbgValueIntrinsicAtEnd;
extern fn ZigLLVMInsertDbgValueIntrinsicAtEnd(
dib: *DIBuilder,
val: *Value,
var_info: *DILocalVariable,
debug_loc: *DILocation,
basic_block_ref: *BasicBlock,
) *Value;
};
 
pub const DIFlags = opaque {
pub const Zero = 0;
pub const Private = 1;
pub const Protected = 2;
pub const Public = 3;
 
pub const FwdDecl = 1 << 2;
pub const AppleBlock = 1 << 3;
pub const BlockByrefStruct = 1 << 4;
pub const Virtual = 1 << 5;
pub const Artificial = 1 << 6;
pub const Explicit = 1 << 7;
pub const Prototyped = 1 << 8;
pub const ObjcClassComplete = 1 << 9;
pub const ObjectPointer = 1 << 10;
pub const Vector = 1 << 11;
pub const StaticMember = 1 << 12;
pub const LValueReference = 1 << 13;
pub const RValueReference = 1 << 14;
pub const Reserved = 1 << 15;
 
pub const SingleInheritance = 1 << 16;
pub const MultipleInheritance = 2 << 16;
pub const VirtualInheritance = 3 << 16;
 
pub const IntroducedVirtual = 1 << 18;
pub const BitField = 1 << 19;
pub const NoReturn = 1 << 20;
pub const TypePassByValue = 1 << 22;
pub const TypePassByReference = 1 << 23;
pub const EnumClass = 1 << 24;
pub const Thunk = 1 << 25;
pub const NonTrivial = 1 << 26;
pub const BigEndian = 1 << 27;
pub const LittleEndian = 1 << 28;
pub const AllCallsDescribed = 1 << 29;
};
 
filename was Deleted added: 8538, removed: 5358, total 3180
@@ -0,0 +1,421 @@
const std = @import("std");
 
pub const AbbrevOp = union(enum) {
literal: u32, // 0
fixed: u16, // 1
fixed_runtime: type, // 1
vbr: u16, // 2
char6: void, // 4
blob: void, // 5
array_fixed: u16, // 3, 1
array_fixed_runtime: type, // 3, 1
array_vbr: u16, // 3, 2
array_char6: void, // 3, 4
};
 
pub const Error = error{OutOfMemory};
 
pub fn BitcodeWriter(comptime types: []const type) type {
return struct {
const BcWriter = @This();
 
buffer: std.ArrayList(u32),
bit_buffer: u32 = 0,
bit_count: u5 = 0,
 
widths: [types.len]u16,
 
pub fn getTypeWidth(self: BcWriter, comptime Type: type) u16 {
return self.widths[comptime std.mem.indexOfScalar(type, types, Type).?];
}
 
pub fn init(allocator: std.mem.Allocator, widths: [types.len]u16) BcWriter {
return .{
.buffer = std.ArrayList(u32).init(allocator),
.widths = widths,
};
}
 
pub fn deinit(self: BcWriter) void {
self.buffer.deinit();
}
 
pub fn toSlice(self: BcWriter) []const u32 {
std.debug.assert(self.bit_count == 0);
return self.buffer.items;
}
 
pub fn length(self: BcWriter) usize {
std.debug.assert(self.bit_count == 0);
return self.buffer.items.len;
}
 
pub fn writeBits(self: *BcWriter, value: anytype, bits: u16) Error!void {
if (bits == 0) return;
 
var in_buffer = bufValue(value, 32);
var in_bits = bits;
 
// Store input bits in buffer if they fit otherwise store as many as possible and flush
if (self.bit_count > 0) {
const bits_remaining = 31 - self.bit_count + 1;
const n: u5 = @intCast(@min(bits_remaining, in_bits));
const v = @as(u32, @truncate(in_buffer)) << self.bit_count;
self.bit_buffer |= v;
in_buffer >>= n;
 
self.bit_count +%= n;
in_bits -= n;
 
if (self.bit_count != 0) return;
try self.buffer.append(self.bit_buffer);
self.bit_buffer = 0;
}
 
// Write 32-bit chunks of input bits
while (in_bits >= 32) {
try self.buffer.append(@truncate(in_buffer));
 
in_buffer >>= 31;
in_buffer >>= 1;
in_bits -= 32;
}
 
// Store remaining input bits in buffer
if (in_bits > 0) {
self.bit_count = @intCast(in_bits);
self.bit_buffer = @truncate(in_buffer);
}
}
 
pub fn writeVBR(self: *BcWriter, value: anytype, comptime vbr_bits: usize) Error!void {
comptime {
std.debug.assert(vbr_bits > 1);
if (@bitSizeOf(@TypeOf(value)) > 64) @compileError("Unsupported VBR block type: " ++ @typeName(@TypeOf(value)));
}
 
var in_buffer = bufValue(value, vbr_bits);
 
const continue_bit = @as(@TypeOf(in_buffer), 1) << @intCast(vbr_bits - 1);
const mask = continue_bit - 1;
 
// If input is larger than one VBR block can store
// then store vbr_bits - 1 bits and a continue bit
while (in_buffer > mask) {
try self.writeBits(in_buffer & mask | continue_bit, vbr_bits);
in_buffer >>= @intCast(vbr_bits - 1);
}
 
// Store remaining bits
try self.writeBits(in_buffer, vbr_bits);
}
 
pub fn bitsVBR(_: *const BcWriter, value: anytype, comptime vbr_bits: usize) u16 {
comptime {
std.debug.assert(vbr_bits > 1);
if (@bitSizeOf(@TypeOf(value)) > 64) @compileError("Unsupported VBR block type: " ++ @typeName(@TypeOf(value)));
}
 
var bits: u16 = 0;
 
var in_buffer = bufValue(value, vbr_bits);
 
const continue_bit = @as(@TypeOf(in_buffer), 1) << @intCast(vbr_bits - 1);
const mask = continue_bit - 1;
 
// If input is larger than one VBR block can store
// then store vbr_bits - 1 bits and a continue bit
while (in_buffer > mask) {
bits += @intCast(vbr_bits);
in_buffer >>= @intCast(vbr_bits - 1);
}
 
// Store remaining bits
bits += @intCast(vbr_bits);
return bits;
}
 
pub fn write6BitChar(self: *BcWriter, c: u8) Error!void {
try self.writeBits(charTo6Bit(c), 6);
}
 
pub fn alignTo32(self: *BcWriter) Error!void {
if (self.bit_count == 0) return;
 
try self.buffer.append(self.bit_buffer);
self.bit_buffer = 0;
self.bit_count = 0;
}
 
pub fn enterTopBlock(self: *BcWriter, comptime SubBlock: type) Error!BlockWriter(SubBlock) {
return BlockWriter(SubBlock).init(self, 2);
}
 
fn BlockWriter(comptime Block: type) type {
return struct {
const Self = @This();
 
// The minimum abbrev id length based on the number of abbrevs present in the block
pub const abbrev_len = std.math.log2_int_ceil(
u6,
4 + (if (@hasDecl(Block, "abbrevs")) Block.abbrevs.len else 0),
);
 
start: usize,
bitcode: *BcWriter,
 
pub fn init(bitcode: *BcWriter, comptime parent_abbrev_len: u6) Error!Self {
try bitcode.writeBits(1, parent_abbrev_len);
try bitcode.writeVBR(Block.id, 8);
try bitcode.writeVBR(abbrev_len, 4);
try bitcode.alignTo32();
 
// We store the index of the block size and store a dummy value as the number of words in the block
const start = bitcode.length();
try bitcode.writeBits(0, 32);
 
// Predefine all block abbrevs
inline for (Block.abbrevs) |Abbrev| {
try defineAbbrev(bitcode, &Abbrev.ops);
}
 
return .{
.start = start,
.bitcode = bitcode,
};
}
 
pub fn enterSubBlock(self: Self, comptime SubBlock: type) Error!BlockWriter(SubBlock) {
return BlockWriter(SubBlock).init(self.bitcode, abbrev_len);
}
 
pub fn end(self: *Self) Error!void {
try self.bitcode.writeBits(0, abbrev_len);
try self.bitcode.alignTo32();
 
// Set the number of words in the block at the start of the block
self.bitcode.buffer.items[self.start] = @truncate(self.bitcode.length() - self.start - 1);
}
 
pub fn writeUnabbrev(self: *Self, code: u32, values: []const u64) Error!void {
try self.bitcode.writeBits(3, abbrev_len);
try self.bitcode.writeVBR(code, 6);
try self.bitcode.writeVBR(values.len, 6);
for (values) |val| {
try self.bitcode.writeVBR(val, 6);
}
}
 
pub fn writeAbbrev(self: *Self, params: anytype) Error!void {
return self.writeAbbrevAdapted(params, struct {
pub fn get(_: @This(), param: anytype, comptime _: []const u8) @TypeOf(param) {
return param;
}
}{});
}
 
pub fn abbrevId(comptime Abbrev: type) u32 {
inline for (Block.abbrevs, 0..) |abbrev, i| {
if (Abbrev == abbrev) return i + 4;
}
 
@compileError("Unknown abbrev: " ++ @typeName(Abbrev));
}
 
pub fn writeAbbrevAdapted(
self: *Self,
params: anytype,
adapter: anytype,
) Error!void {
const Abbrev = @TypeOf(params);
 
try self.bitcode.writeBits(comptime abbrevId(Abbrev), abbrev_len);
 
const fields = std.meta.fields(Abbrev);
 
// This abbreviation might only contain literals
if (fields.len == 0) return;
 
comptime var field_index: usize = 0;
inline for (Abbrev.ops) |ty| {
const field_name = fields[field_index].name;
const param = @field(params, field_name);
 
switch (ty) {
.literal => continue,
.fixed => |len| try self.bitcode.writeBits(adapter.get(param, field_name), len),
.fixed_runtime => |width_ty| try self.bitcode.writeBits(
adapter.get(param, field_name),
self.bitcode.getTypeWidth(width_ty),
),
.vbr => |len| try self.bitcode.writeVBR(adapter.get(param, field_name), len),
.char6 => try self.bitcode.write6BitChar(adapter.get(param, field_name)),
.blob => {
try self.bitcode.writeVBR(param.len, 6);
try self.bitcode.alignTo32();
for (param) |x| {
try self.bitcode.writeBits(x, 8);
}
try self.bitcode.alignTo32();
},
.array_fixed => |len| {
try self.bitcode.writeVBR(param.len, 6);
for (param) |x| {
try self.bitcode.writeBits(adapter.get(x, field_name), len);
}
},
.array_fixed_runtime => |width_ty| {
try self.bitcode.writeVBR(param.len, 6);
for (param) |x| {
try self.bitcode.writeBits(
adapter.get(x, field_name),
self.bitcode.getTypeWidth(width_ty),
);
}
},
.array_vbr => |len| {
try self.bitcode.writeVBR(param.len, 6);
for (param) |x| {
try self.bitcode.writeVBR(adapter.get(x, field_name), len);
}
},
.array_char6 => {
try self.bitcode.writeVBR(param.len, 6);
for (param) |x| {
try self.bitcode.write6BitChar(adapter.get(x, field_name));
}
},
}
field_index += 1;
if (field_index == fields.len) break;
}
}
 
fn defineAbbrev(bitcode: *BcWriter, comptime ops: []const AbbrevOp) Error!void {
try bitcode.writeBits(2, abbrev_len);
 
// ops.len is not accurate because arrays are actually two ops
try bitcode.writeVBR(blk: {
var count: usize = 0;
inline for (ops) |op| {
count += switch (op) {
.literal, .fixed, .fixed_runtime, .vbr, .char6, .blob => 1,
.array_fixed, .array_fixed_runtime, .array_vbr, .array_char6 => 2,
};
}
break :blk count;
}, 5);
 
inline for (ops) |op| {
switch (op) {
.literal => |value| {
try bitcode.writeBits(1, 1);
try bitcode.writeVBR(value, 8);
},
.fixed => |width| {
try bitcode.writeBits(0, 1);
try bitcode.writeBits(1, 3);
try bitcode.writeVBR(width, 5);
},
.fixed_runtime => |width_ty| {
try bitcode.writeBits(0, 1);
try bitcode.writeBits(1, 3);
try bitcode.writeVBR(bitcode.getTypeWidth(width_ty), 5);
},
.vbr => |width| {
try bitcode.writeBits(0, 1);
try bitcode.writeBits(2, 3);
try bitcode.writeVBR(width, 5);
},
.char6 => {
try bitcode.writeBits(0, 1);
try bitcode.writeBits(4, 3);
},
.blob => {
try bitcode.writeBits(0, 1);
try bitcode.writeBits(5, 3);
},
.array_fixed => |width| {
// Array op
try bitcode.writeBits(0, 1);
try bitcode.writeBits(3, 3);
 
// Fixed or VBR op
try bitcode.writeBits(0, 1);
try bitcode.writeBits(1, 3);
try bitcode.writeVBR(width, 5);
},
.array_fixed_runtime => |width_ty| {
// Array op
try bitcode.writeBits(0, 1);
try bitcode.writeBits(3, 3);
 
// Fixed or VBR op
try bitcode.writeBits(0, 1);
try bitcode.writeBits(1, 3);
try bitcode.writeVBR(bitcode.getTypeWidth(width_ty), 5);
},
.array_vbr => |width| {
// Array op
try bitcode.writeBits(0, 1);
try bitcode.writeBits(3, 3);
 
// Fixed or VBR op
try bitcode.writeBits(0, 1);
try bitcode.writeBits(2, 3);
try bitcode.writeVBR(width, 5);
},
.array_char6 => {
// Array op
try bitcode.writeBits(0, 1);
try bitcode.writeBits(3, 3);
 
// Char6 op
try bitcode.writeBits(0, 1);
try bitcode.writeBits(4, 3);
},
}
}
}
};
}
};
}
 
fn charTo6Bit(c: u8) u8 {
return switch (c) {
'a'...'z' => c - 'a',
'A'...'Z' => c - 'A' + 26,
'0'...'9' => c - '0' + 52,
'.' => 62,
'_' => 63,
else => @panic("Failed to encode byte as 6-bit char"),
};
}
 
fn BufType(comptime T: type, comptime min_len: usize) type {
return std.meta.Int(.unsigned, @max(min_len, @bitSizeOf(switch (@typeInfo(T)) {
.ComptimeInt => u32,
.Int => |info| if (info.signedness == .unsigned)
T
else
@compileError("Unsupported type: " ++ @typeName(T)),
.Enum => |info| info.tag_type,
.Bool => u1,
.Struct => |info| switch (info.layout) {
.Auto, .Extern => @compileError("Unsupported type: " ++ @typeName(T)),
.Packed => std.meta.Int(.unsigned, @bitSizeOf(T)),
},
else => @compileError("Unsupported type: " ++ @typeName(T)),
})));
}
 
fn bufValue(value: anytype, comptime min_len: usize) BufType(@TypeOf(value), min_len) {
return switch (@typeInfo(@TypeOf(value))) {
.ComptimeInt, .Int => @intCast(value),
.Enum => @intFromEnum(value),
.Bool => @intFromBool(value),
.Struct => @intCast(@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(value))), @bitCast(value))),
else => unreachable,
};
}
 
filename was Deleted added: 8538, removed: 5358, total 3180
@@ -0,0 +1,1636 @@
const std = @import("std");
const Builder = @import("Builder.zig");
const bitcode_writer = @import("bitcode_writer.zig");
 
const AbbrevOp = bitcode_writer.AbbrevOp;
 
pub const MAGIC: u32 = 0xdec04342;
 
const ValueAbbrev = AbbrevOp{ .vbr = 6 };
const ValueArrayAbbrev = AbbrevOp{ .array_vbr = 6 };
 
const ConstantAbbrev = AbbrevOp{ .vbr = 6 };
const ConstantArrayAbbrev = AbbrevOp{ .array_vbr = 6 };
 
const MetadataAbbrev = AbbrevOp{ .vbr = 16 };
const MetadataArrayAbbrev = AbbrevOp{ .array_vbr = 16 };
 
const LineAbbrev = AbbrevOp{ .vbr = 8 };
const ColumnAbbrev = AbbrevOp{ .vbr = 8 };
 
const BlockAbbrev = AbbrevOp{ .vbr = 6 };
 
pub const MetadataKind = enum(u1) {
dbg = 0,
};
 
pub const Identification = struct {
pub const id = 13;
 
pub const abbrevs = [_]type{
Version,
Epoch,
};
 
pub const Version = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 1 },
.{ .array_fixed = 8 },
};
string: []const u8,
};
 
pub const Epoch = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 2 },
.{ .vbr = 6 },
};
epoch: u32,
};
};
 
pub const Module = struct {
pub const id = 8;
 
pub const abbrevs = [_]type{
Version,
String,
Variable,
Function,
Alias,
};
 
pub const Version = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 1 },
.{ .literal = 2 },
};
};
 
pub const String = struct {
pub const ops = [_]AbbrevOp{
.{ .vbr = 4 },
.{ .array_fixed = 8 },
};
code: u16,
string: []const u8,
};
 
pub const Variable = struct {
const AddrSpaceAndIsConst = packed struct {
is_const: bool,
one: u1 = 1,
addr_space: Builder.AddrSpace,
};
 
pub const ops = [_]AbbrevOp{
.{ .literal = 7 }, // Code
.{ .vbr = 16 }, // strtab_offset
.{ .vbr = 16 }, // strtab_size
.{ .fixed_runtime = Builder.Type },
.{ .fixed = @bitSizeOf(AddrSpaceAndIsConst) }, // isconst
ConstantAbbrev, // initid
.{ .fixed = @bitSizeOf(Builder.Linkage) },
.{ .fixed = @bitSizeOf(Builder.Alignment) },
.{ .vbr = 16 }, // section
.{ .fixed = @bitSizeOf(Builder.Visibility) },
.{ .fixed = @bitSizeOf(Builder.ThreadLocal) }, // threadlocal
.{ .fixed = @bitSizeOf(Builder.UnnamedAddr) },
.{ .fixed = @bitSizeOf(Builder.ExternallyInitialized) },
.{ .fixed = @bitSizeOf(Builder.DllStorageClass) },
.{ .literal = 0 }, // comdat
.{ .literal = 0 }, // attributes
.{ .fixed = @bitSizeOf(Builder.Preemption) },
};
strtab_offset: usize,
strtab_size: usize,
type_index: Builder.Type,
is_const: AddrSpaceAndIsConst,
initid: u32,
linkage: Builder.Linkage,
alignment: std.meta.Int(.unsigned, @bitSizeOf(Builder.Alignment)),
section: usize,
visibility: Builder.Visibility,
thread_local: Builder.ThreadLocal,
unnamed_addr: Builder.UnnamedAddr,
externally_initialized: Builder.ExternallyInitialized,
dllstorageclass: Builder.DllStorageClass,
preemption: Builder.Preemption,
};
 
pub const Function = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 8 }, // Code
.{ .vbr = 16 }, // strtab_offset
.{ .vbr = 16 }, // strtab_size
.{ .fixed_runtime = Builder.Type },
.{ .fixed = @bitSizeOf(Builder.CallConv) },
.{ .fixed = 1 }, // isproto
.{ .fixed = @bitSizeOf(Builder.Linkage) },
.{ .vbr = 16 }, // paramattr
.{ .fixed = @bitSizeOf(Builder.Alignment) },
.{ .vbr = 16 }, // section
.{ .fixed = @bitSizeOf(Builder.Visibility) },
.{ .literal = 0 }, // gc
.{ .fixed = @bitSizeOf(Builder.UnnamedAddr) },
.{ .literal = 0 }, // prologuedata
.{ .fixed = @bitSizeOf(Builder.DllStorageClass) },
.{ .literal = 0 }, // comdat
.{ .literal = 0 }, // prefixdata
.{ .literal = 0 }, // personalityfn
.{ .fixed = @bitSizeOf(Builder.Preemption) },
.{ .fixed = @bitSizeOf(Builder.AddrSpace) },
};
strtab_offset: usize,
strtab_size: usize,
type_index: Builder.Type,
call_conv: Builder.CallConv,
is_proto: bool,
linkage: Builder.Linkage,
paramattr: usize,
alignment: std.meta.Int(.unsigned, @bitSizeOf(Builder.Alignment)),
section: usize,
visibility: Builder.Visibility,
unnamed_addr: Builder.UnnamedAddr,
dllstorageclass: Builder.DllStorageClass,
preemption: Builder.Preemption,
addr_space: Builder.AddrSpace,
};
 
pub const Alias = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 14 }, // Code
.{ .vbr = 16 }, // strtab_offset
.{ .vbr = 16 }, // strtab_size
.{ .fixed_runtime = Builder.Type },
.{ .fixed = @bitSizeOf(Builder.AddrSpace) },
ConstantAbbrev, // aliasee val
.{ .fixed = @bitSizeOf(Builder.Linkage) },
.{ .fixed = @bitSizeOf(Builder.Visibility) },
.{ .fixed = @bitSizeOf(Builder.DllStorageClass) },
.{ .fixed = @bitSizeOf(Builder.ThreadLocal) },
.{ .fixed = @bitSizeOf(Builder.UnnamedAddr) },
.{ .fixed = @bitSizeOf(Builder.Preemption) },
};
strtab_offset: usize,
strtab_size: usize,
type_index: Builder.Type,
addr_space: Builder.AddrSpace,
aliasee: u32,
linkage: Builder.Linkage,
visibility: Builder.Visibility,
dllstorageclass: Builder.DllStorageClass,
thread_local: Builder.ThreadLocal,
unnamed_addr: Builder.UnnamedAddr,
preemption: Builder.Preemption,
};
};
 
pub const Type = struct {
pub const id = 17;
 
pub const abbrevs = [_]type{
NumEntry,
Simple,
Opaque,
Integer,
StructAnon,
StructNamed,
StructName,
Array,
Vector,
Pointer,
Target,
Function,
};
 
pub const NumEntry = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 1 },
.{ .fixed = 32 },
};
num: u32,
};
 
pub const Simple = struct {
pub const ops = [_]AbbrevOp{
.{ .vbr = 4 },
};
code: u5,
};
 
pub const Opaque = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 6 },
.{ .literal = 0 },
};
};
 
pub const Integer = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 7 },
.{ .fixed = 28 },
};
width: u28,
};
 
pub const StructAnon = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 18 },
.{ .fixed = 1 },
.{ .array_fixed_runtime = Builder.Type },
};
is_packed: bool,
types: []const Builder.Type,
};
 
pub const StructNamed = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 20 },
.{ .fixed = 1 },
.{ .array_fixed_runtime = Builder.Type },
};
is_packed: bool,
types: []const Builder.Type,
};
 
pub const StructName = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 19 },
.{ .array_fixed = 8 },
};
string: []const u8,
};
 
pub const Array = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 11 },
.{ .vbr = 16 },
.{ .fixed_runtime = Builder.Type },
};
len: u64,
child: Builder.Type,
};
 
pub const Vector = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 12 },
.{ .vbr = 16 },
.{ .fixed_runtime = Builder.Type },
};
len: u64,
child: Builder.Type,
};
 
pub const Pointer = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 25 },
.{ .vbr = 4 },
};
addr_space: Builder.AddrSpace,
};
 
pub const Target = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 26 },
.{ .vbr = 4 },
.{ .array_fixed_runtime = Builder.Type },
.{ .array_fixed = 32 },
};
num_types: u32,
types: []const Builder.Type,
ints: []const u32,
};
 
pub const Function = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 21 },
.{ .fixed = 1 },
.{ .fixed_runtime = Builder.Type },
.{ .array_fixed_runtime = Builder.Type },
};
is_vararg: bool,
return_type: Builder.Type,
param_types: []const Builder.Type,
};
};
 
pub const Paramattr = struct {
pub const id = 9;
 
pub const abbrevs = [_]type{
Entry,
};
 
pub const Entry = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 2 },
.{ .array_vbr = 8 },
};
group_indices: []const u64,
};
};
 
pub const ParamattrGroup = struct {
pub const id = 10;
 
pub const abbrevs = [_]type{};
};
 
pub const Constants = struct {
pub const id = 11;
 
pub const abbrevs = [_]type{
SetType,
Null,
Undef,
Poison,
Integer,
Half,
Float,
Double,
Fp80,
Fp128,
Aggregate,
String,
CString,
Cast,
Binary,
Cmp,
ExtractElement,
InsertElement,
ShuffleVector,
ShuffleVectorEx,
BlockAddress,
DsoLocalEquivalentOrNoCfi,
};
 
pub const SetType = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 1 },
.{ .fixed_runtime = Builder.Type },
};
type_id: Builder.Type,
};
 
pub const Null = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 2 },
};
};
 
pub const Undef = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 3 },
};
};
 
pub const Poison = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 26 },
};
};
 
pub const Integer = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 4 },
.{ .vbr = 16 },
};
value: u64,
};
 
pub const Half = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 6 },
.{ .fixed = 16 },
};
value: u16,
};
 
pub const Float = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 6 },
.{ .fixed = 32 },
};
value: u32,
};
 
pub const Double = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 6 },
.{ .vbr = 6 },
};
value: u64,
};
 
pub const Fp80 = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 6 },
.{ .vbr = 6 },
.{ .vbr = 6 },
};
hi: u64,
lo: u16,
};
 
pub const Fp128 = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 6 },
.{ .vbr = 6 },
.{ .vbr = 6 },
};
lo: u64,
hi: u64,
};
 
pub const Aggregate = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 7 },
.{ .array_fixed = 32 },
};
values: []const Builder.Constant,
};
 
pub const String = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 8 },
.{ .array_fixed = 8 },
};
string: []const u8,
};
 
pub const CString = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 9 },
.{ .array_fixed = 8 },
};
string: []const u8,
};
 
pub const Cast = struct {
const CastOpcode = Builder.CastOpcode;
pub const ops = [_]AbbrevOp{
.{ .literal = 11 },
.{ .fixed = @bitSizeOf(CastOpcode) },
.{ .fixed_runtime = Builder.Type },
ConstantAbbrev,
};
 
opcode: CastOpcode,
type_index: Builder.Type,
val: Builder.Constant,
};
 
pub const Binary = struct {
const BinaryOpcode = Builder.BinaryOpcode;
pub const ops = [_]AbbrevOp{
.{ .literal = 10 },
.{ .fixed = @bitSizeOf(BinaryOpcode) },
ConstantAbbrev,
ConstantAbbrev,
};
 
opcode: BinaryOpcode,
lhs: Builder.Constant,
rhs: Builder.Constant,
};
 
pub const Cmp = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 17 },
.{ .fixed_runtime = Builder.Type },
ConstantAbbrev,
ConstantAbbrev,
.{ .vbr = 6 },
};
 
ty: Builder.Type,
lhs: Builder.Constant,
rhs: Builder.Constant,
pred: u32,
};
 
pub const ExtractElement = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 14 },
.{ .fixed_runtime = Builder.Type },
ConstantAbbrev,
.{ .fixed_runtime = Builder.Type },
ConstantAbbrev,
};
 
val_type: Builder.Type,
val: Builder.Constant,
index_type: Builder.Type,
index: Builder.Constant,
};
 
pub const InsertElement = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 15 },
ConstantAbbrev,
ConstantAbbrev,
.{ .fixed_runtime = Builder.Type },
ConstantAbbrev,
};
 
val: Builder.Constant,
elem: Builder.Constant,
index_type: Builder.Type,
index: Builder.Constant,
};
 
pub const ShuffleVector = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 16 },
ValueAbbrev,
ValueAbbrev,
ValueAbbrev,
};
 
lhs: Builder.Constant,
rhs: Builder.Constant,
mask: Builder.Constant,
};
 
pub const ShuffleVectorEx = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 19 },
.{ .fixed_runtime = Builder.Type },
ValueAbbrev,
ValueAbbrev,
ValueAbbrev,
};
 
ty: Builder.Type,
lhs: Builder.Constant,
rhs: Builder.Constant,
mask: Builder.Constant,
};
 
pub const BlockAddress = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 21 },
.{ .fixed_runtime = Builder.Type },
ConstantAbbrev,
BlockAbbrev,
};
type_id: Builder.Type,
function: u32,
block: u32,
};
 
pub const DsoLocalEquivalentOrNoCfi = struct {
pub const ops = [_]AbbrevOp{
.{ .fixed = 5 },
.{ .fixed_runtime = Builder.Type },
ConstantAbbrev,
};
code: u5,
type_id: Builder.Type,
function: u32,
};
};
 
pub const MetadataKindBlock = struct {
pub const id = 22;
 
pub const abbrevs = [_]type{
Kind,
};
 
pub const Kind = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 6 },
.{ .vbr = 4 },
.{ .array_fixed = 8 },
};
id: u32,
name: []const u8,
};
};
 
pub const MetadataAttachmentBlock = struct {
pub const id = 16;
 
pub const abbrevs = [_]type{
AttachmentSingle,
};
 
pub const AttachmentSingle = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 11 },
.{ .fixed = 1 },
MetadataAbbrev,
};
kind: MetadataKind,
metadata: Builder.Metadata,
};
};
 
pub const MetadataBlock = struct {
pub const id = 15;
 
pub const abbrevs = [_]type{
Strings,
File,
CompileUnit,
Subprogram,
LexicalBlock,
Location,
BasicType,
CompositeType,
DerivedType,
SubroutineType,
Enumerator,
Subrange,
Expression,
Node,
LocalVar,
Parameter,
GlobalVar,
GlobalVarExpression,
Constant,
Name,
NamedNode,
GlobalDeclAttachment,
};
 
pub const Strings = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 35 },
.{ .vbr = 6 },
.{ .vbr = 6 },
.blob,
};
num_strings: u32,
strings_offset: u32,
blob: []const u8,
};
 
pub const File = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 16 },
.{ .literal = 0 }, // is distinct
MetadataAbbrev, // filename
MetadataAbbrev, // directory
.{ .literal = 0 }, // checksum
.{ .literal = 0 }, // checksum
};
 
filename: Builder.MetadataString,
directory: Builder.MetadataString,
};
 
pub const CompileUnit = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 20 },
.{ .literal = 1 }, // is distinct
.{ .literal = std.dwarf.LANG.C99 }, // source language
MetadataAbbrev, // file
MetadataAbbrev, // producer
.{ .fixed = 1 }, // isOptimized
.{ .literal = 0 }, // raw flags
.{ .literal = 0 }, // runtime version
.{ .literal = 0 }, // split debug file name
.{ .literal = 1 }, // emission kind
MetadataAbbrev, // enums
.{ .literal = 0 }, // retained types
.{ .literal = 0 }, // subprograms
MetadataAbbrev, // globals
.{ .literal = 0 }, // imported entities
.{ .literal = 0 }, // DWO ID
.{ .literal = 0 }, // macros
.{ .literal = 0 }, // split debug inlining
.{ .literal = 0 }, // debug info profiling
.{ .literal = 0 }, // name table kind
.{ .literal = 0 }, // ranges base address
.{ .literal = 0 }, // raw sysroot
.{ .literal = 0 }, // raw SDK
};
 
file: Builder.Metadata,
producer: Builder.MetadataString,
is_optimized: bool,
enums: Builder.Metadata,
globals: Builder.Metadata,
};
 
pub const Subprogram = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 21 },
.{ .literal = 0b111 }, // is distinct | has sp flags | has flags
MetadataAbbrev, // scope
MetadataAbbrev, // name
MetadataAbbrev, // linkage name
MetadataAbbrev, // file
LineAbbrev, // line
MetadataAbbrev, // type
LineAbbrev, // scope line
.{ .literal = 0 }, // containing type
.{ .fixed = 32 }, // sp flags
.{ .literal = 0 }, // virtual index
.{ .fixed = 32 }, // flags
MetadataAbbrev, // compile unit
.{ .literal = 0 }, // template params
.{ .literal = 0 }, // declaration
.{ .literal = 0 }, // retained nodes
.{ .literal = 0 }, // this adjustment
.{ .literal = 0 }, // thrown types
.{ .literal = 0 }, // annotations
.{ .literal = 0 }, // target function name
};
 
scope: Builder.Metadata,
name: Builder.MetadataString,
linkage_name: Builder.MetadataString,
file: Builder.Metadata,
line: u32,
ty: Builder.Metadata,
scope_line: u32,
sp_flags: Builder.Metadata.Subprogram.DISPFlags,
flags: Builder.Metadata.DIFlags,
compile_unit: Builder.Metadata,
};
 
pub const LexicalBlock = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 22 },
.{ .literal = 0 }, // is distinct
MetadataAbbrev, // scope
MetadataAbbrev, // file
LineAbbrev, // line
ColumnAbbrev, // column
};
 
scope: Builder.Metadata,
file: Builder.Metadata,
line: u32,
column: u32,
};
 
pub const Location = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 7 },
.{ .literal = 0 }, // is distinct
LineAbbrev, // line
ColumnAbbrev, // column
MetadataAbbrev, // scope
MetadataAbbrev, // inlined at
.{ .literal = 0 }, // is implicit code
};
 
line: u32,
column: u32,
scope: u32,
inlined_at: Builder.Metadata,
};
 
pub const BasicType = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 15 },
.{ .literal = 0 }, // is distinct
.{ .literal = std.dwarf.TAG.base_type }, // tag
MetadataAbbrev, // name
.{ .vbr = 6 }, // size in bits
.{ .literal = 0 }, // align in bits
.{ .vbr = 8 }, // encoding
.{ .literal = 0 }, // flags
};
 
name: Builder.MetadataString,
size_in_bits: u64,
encoding: u32,
};
 
pub const CompositeType = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 18 },
.{ .literal = 0 | 0x2 }, // is distinct | is not used in old type ref
.{ .fixed = 32 }, // tag
MetadataAbbrev, // name
MetadataAbbrev, // file
LineAbbrev, // line
MetadataAbbrev, // scope
MetadataAbbrev, // underlying type
.{ .vbr = 6 }, // size in bits
.{ .vbr = 6 }, // align in bits
.{ .literal = 0 }, // offset in bits
.{ .fixed = 32 }, // flags
MetadataAbbrev, // elements
.{ .literal = 0 }, // runtime lang
.{ .literal = 0 }, // vtable holder
.{ .literal = 0 }, // template params
.{ .literal = 0 }, // raw id
.{ .literal = 0 }, // discriminator
.{ .literal = 0 }, // data location
.{ .literal = 0 }, // associated
.{ .literal = 0 }, // allocated
.{ .literal = 0 }, // rank
.{ .literal = 0 }, // annotations
};
 
tag: u32,
name: Builder.MetadataString,
file: Builder.Metadata,
line: u32,
scope: Builder.Metadata,
underlying_type: Builder.Metadata,
size_in_bits: u64,
align_in_bits: u64,
flags: Builder.Metadata.DIFlags,
elements: Builder.Metadata,
};
 
pub const DerivedType = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 17 },
.{ .literal = 0 }, // is distinct
.{ .fixed = 32 }, // tag
MetadataAbbrev, // name
MetadataAbbrev, // file
LineAbbrev, // line
MetadataAbbrev, // scope
MetadataAbbrev, // underlying type
.{ .vbr = 6 }, // size in bits
.{ .vbr = 6 }, // align in bits
.{ .vbr = 6 }, // offset in bits
.{ .literal = 0 }, // flags
.{ .literal = 0 }, // extra data
};
 
tag: u32,
name: Builder.MetadataString,
file: Builder.Metadata,
line: u32,
scope: Builder.Metadata,
underlying_type: Builder.Metadata,
size_in_bits: u64,
align_in_bits: u64,
offset_in_bits: u64,
};
 
pub const SubroutineType = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 19 },
.{ .literal = 0 | 0x2 }, // is distinct | has no old type refs
.{ .literal = 0 }, // flags
MetadataAbbrev, // types
.{ .literal = 0 }, // cc
};
 
types: Builder.Metadata,
};
 
pub const Enumerator = struct {
pub const id = 14;
 
pub const Flags = packed struct(u3) {
distinct: bool = false,
unsigned: bool,
bigint: bool,
};
 
pub const ops = [_]AbbrevOp{
.{ .literal = Enumerator.id },
.{ .fixed = @bitSizeOf(Flags) }, // flags
.{ .vbr = 6 }, // bit width
MetadataAbbrev, // name
.{ .vbr = 16 }, // integer value
};
 
flags: Flags,
bit_width: u32,
name: Builder.MetadataString,
value: u64,
};
 
pub const Subrange = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 13 },
.{ .literal = 0b10 }, // is distinct | version
MetadataAbbrev, // count
MetadataAbbrev, // lower bound
.{ .literal = 0 }, // upper bound
.{ .literal = 0 }, // stride
};
 
count: Builder.Metadata,
lower_bound: Builder.Metadata,
};
 
pub const Expression = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 29 },
.{ .literal = 0 | (3 << 1) }, // is distinct | version
MetadataArrayAbbrev, // elements
};
 
elements: []const u32,
};
 
pub const Node = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 3 },
MetadataArrayAbbrev, // elements
};
 
elements: []const Builder.Metadata,
};
 
pub const LocalVar = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 28 },
.{ .literal = 0b10 }, // is distinct | has alignment
MetadataAbbrev, // scope
MetadataAbbrev, // name
MetadataAbbrev, // file
LineAbbrev, // line
MetadataAbbrev, // type
.{ .literal = 0 }, // arg
.{ .literal = 0 }, // flags
.{ .literal = 0 }, // align bits
.{ .literal = 0 }, // annotations
};
 
scope: Builder.Metadata,
name: Builder.MetadataString,
file: Builder.Metadata,
line: u32,
ty: Builder.Metadata,
};
 
pub const Parameter = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 28 },
.{ .literal = 0b10 }, // is distinct | has alignment
MetadataAbbrev, // scope
MetadataAbbrev, // name
MetadataAbbrev, // file
LineAbbrev, // line
MetadataAbbrev, // type
.{ .vbr = 4 }, // arg
.{ .literal = 0 }, // flags
.{ .literal = 0 }, // align bits
.{ .literal = 0 }, // annotations
};
 
scope: Builder.Metadata,
name: Builder.MetadataString,
file: Builder.Metadata,
line: u32,
ty: Builder.Metadata,
arg: u32,
};
 
pub const GlobalVar = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 27 },
.{ .literal = 0b101 }, // is distinct | version
MetadataAbbrev, // scope
MetadataAbbrev, // name
MetadataAbbrev, // linkage name
MetadataAbbrev, // file
LineAbbrev, // line
MetadataAbbrev, // type
.{ .fixed = 1 }, // local
.{ .literal = 1 }, // defined
.{ .literal = 0 }, // static data members declaration
.{ .literal = 0 }, // template params
.{ .literal = 0 }, // align in bits
.{ .literal = 0 }, // annotations
};
 
scope: Builder.Metadata,
name: Builder.MetadataString,
linkage_name: Builder.MetadataString,
file: Builder.Metadata,
line: u32,
ty: Builder.Metadata,
local: bool,
};
 
pub const GlobalVarExpression = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 37 },
.{ .literal = 0 }, // is distinct
MetadataAbbrev, // variable
MetadataAbbrev, // expression
};
 
variable: Builder.Metadata,
expression: Builder.Metadata,
};
 
pub const Constant = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 2 },
MetadataAbbrev, // type
MetadataAbbrev, // value
};
 
ty: Builder.Type,
constant: Builder.Constant,
};
 
pub const Name = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 4 },
.{ .array_fixed = 8 }, // name
};
 
name: []const u8,
};
 
pub const NamedNode = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 10 },
MetadataArrayAbbrev, // elements
};
 
elements: []const Builder.Metadata,
};
 
pub const GlobalDeclAttachment = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 36 },
ValueAbbrev, // value id
.{ .fixed = 1 }, // kind
MetadataAbbrev, // elements
};
 
value: Builder.Constant,
kind: MetadataKind,
metadata: Builder.Metadata,
};
};
 
pub const FunctionMetadataBlock = struct {
pub const id = 15;
 
pub const abbrevs = [_]type{
Value,
};
 
pub const Value = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 2 },
.{ .fixed = 32 }, // variable
.{ .fixed = 32 }, // expression
};
 
ty: Builder.Type,
value: Builder.Value,
};
};
 
pub const FunctionBlock = struct {
pub const id = 12;
 
pub const abbrevs = [_]type{
DeclareBlocks,
Call,
CallFast,
FNeg,
FNegFast,
Binary,
BinaryFast,
Cmp,
CmpFast,
Select,
SelectFast,
Cast,
Alloca,
GetElementPtr,
ExtractValue,
InsertValue,
ExtractElement,
InsertElement,
ShuffleVector,
RetVoid,
Ret,
Unreachable,
Load,
LoadAtomic,
Store,
StoreAtomic,
BrUnconditional,
BrConditional,
VaArg,
AtomicRmw,
CmpXchg,
Fence,
DebugLoc,
DebugLocAgain,
};
 
pub const DeclareBlocks = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 1 },
.{ .vbr = 8 },
};
num_blocks: usize,
};
 
pub const Call = struct {
pub const CallType = packed struct(u17) {
tail: bool = false,
call_conv: Builder.CallConv,
reserved: u3 = 0,
must_tail: bool = false,
// We always use the explicit type version as that is what LLVM does
explicit_type: bool = true,
no_tail: bool = false,
};
pub const ops = [_]AbbrevOp{
.{ .literal = 34 },
.{ .fixed_runtime = Builder.FunctionAttributes },
.{ .fixed = @bitSizeOf(CallType) },
.{ .fixed_runtime = Builder.Type },
ValueAbbrev, // Callee
ValueArrayAbbrev, // Args
};
 
attributes: Builder.FunctionAttributes,
call_type: CallType,
type_id: Builder.Type,
callee: Builder.Value,
args: []const Builder.Value,
};
 
pub const CallFast = struct {
const CallType = packed struct(u18) {
tail: bool = false,
call_conv: Builder.CallConv,
reserved: u3 = 0,
must_tail: bool = false,
// We always use the explicit type version as that is what LLVM does
explicit_type: bool = true,
no_tail: bool = false,
fast: bool = true,
};
 
pub const ops = [_]AbbrevOp{
.{ .literal = 34 },
.{ .fixed_runtime = Builder.FunctionAttributes },
.{ .fixed = @bitSizeOf(CallType) },
.{ .fixed = @bitSizeOf(Builder.FastMath) },
.{ .fixed_runtime = Builder.Type },
ValueAbbrev, // Callee
ValueArrayAbbrev, // Args
};
 
attributes: Builder.FunctionAttributes,
call_type: CallType,
fast_math: Builder.FastMath,
type_id: Builder.Type,
callee: Builder.Value,
args: []const Builder.Value,
};
 
pub const FNeg = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 56 },
ValueAbbrev,
.{ .literal = 0 },
};
 
val: u32,
};
 
pub const FNegFast = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 56 },
ValueAbbrev,
.{ .literal = 0 },
.{ .fixed = @bitSizeOf(Builder.FastMath) },
};
 
val: u32,
fast_math: Builder.FastMath,
};
 
pub const Binary = struct {
const BinaryOpcode = Builder.BinaryOpcode;
pub const ops = [_]AbbrevOp{
.{ .literal = 2 },
ValueAbbrev,
ValueAbbrev,
.{ .fixed = @bitSizeOf(BinaryOpcode) },
};
 
lhs: u32,
rhs: u32,
opcode: BinaryOpcode,
};
 
pub const BinaryFast = struct {
const BinaryOpcode = Builder.BinaryOpcode;
pub const ops = [_]AbbrevOp{
.{ .literal = 2 },
ValueAbbrev,
ValueAbbrev,
.{ .fixed = @bitSizeOf(BinaryOpcode) },
.{ .fixed = @bitSizeOf(Builder.FastMath) },
};
 
lhs: u32,
rhs: u32,
opcode: BinaryOpcode,
fast_math: Builder.FastMath,
};
 
pub const Cmp = struct {
const CmpPredicate = Builder.CmpPredicate;
pub const ops = [_]AbbrevOp{
.{ .literal = 28 },
ValueAbbrev,
ValueAbbrev,
.{ .fixed = @bitSizeOf(CmpPredicate) },
};
 
lhs: u32,
rhs: u32,
pred: CmpPredicate,
};
 
pub const CmpFast = struct {
const CmpPredicate = Builder.CmpPredicate;
pub const ops = [_]AbbrevOp{
.{ .literal = 28 },
ValueAbbrev,
ValueAbbrev,
.{ .fixed = @bitSizeOf(CmpPredicate) },
.{ .fixed = @bitSizeOf(Builder.FastMath) },
};
 
lhs: u32,
rhs: u32,
pred: CmpPredicate,
fast_math: Builder.FastMath,
};
 
pub const Select = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 29 },
ValueAbbrev,
ValueAbbrev,
ValueAbbrev,
};
 
lhs: u32,
rhs: u32,
cond: u32,
};
 
pub const SelectFast = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 29 },
ValueAbbrev,
ValueAbbrev,
ValueAbbrev,
.{ .fixed = @bitSizeOf(Builder.FastMath) },
};
 
lhs: u32,
rhs: u32,
cond: u32,
fast_math: Builder.FastMath,
};
 
pub const Cast = struct {
const CastOpcode = Builder.CastOpcode;
pub const ops = [_]AbbrevOp{
.{ .literal = 3 },
ValueAbbrev,
.{ .fixed_runtime = Builder.Type },
.{ .fixed = @bitSizeOf(CastOpcode) },
};
 
val: u32,
type_index: Builder.Type,
opcode: CastOpcode,
};
 
pub const Alloca = struct {
pub const Flags = packed struct(u11) {
align_lower: u5,
inalloca: bool,
explicit_type: bool,
swift_error: bool,
align_upper: u3,
};
pub const ops = [_]AbbrevOp{
.{ .literal = 19 },
.{ .fixed_runtime = Builder.Type },
.{ .fixed_runtime = Builder.Type },
ValueAbbrev,
.{ .fixed = @bitSizeOf(Flags) },
};
 
inst_type: Builder.Type,
len_type: Builder.Type,
len_value: u32,
flags: Flags,
};
 
pub const RetVoid = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 10 },
};
};
 
pub const Ret = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 10 },
ValueAbbrev,
};
val: u32,
};
 
pub const GetElementPtr = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 43 },
.{ .fixed = 1 },
.{ .fixed_runtime = Builder.Type },
ValueAbbrev,
ValueArrayAbbrev,
};
 
is_inbounds: bool,
type_index: Builder.Type,
base: Builder.Value,
indices: []const Builder.Value,
};
 
pub const ExtractValue = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 26 },
ValueAbbrev,
ValueArrayAbbrev,
};
 
val: u32,
indices: []const u32,
};
 
pub const InsertValue = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 27 },
ValueAbbrev,
ValueAbbrev,
ValueArrayAbbrev,
};
 
val: u32,
elem: u32,
indices: []const u32,
};
 
pub const ExtractElement = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 6 },
ValueAbbrev,
ValueAbbrev,
};
 
val: u32,
index: u32,
};
 
pub const InsertElement = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 7 },
ValueAbbrev,
ValueAbbrev,
ValueAbbrev,
};
 
val: u32,
elem: u32,
index: u32,
};
 
pub const ShuffleVector = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 8 },
ValueAbbrev,
ValueAbbrev,
ValueAbbrev,
};
 
lhs: u32,
rhs: u32,
mask: u32,
};
 
pub const Unreachable = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 15 },
};
};
 
pub const Load = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 20 },
ValueAbbrev,
.{ .fixed_runtime = Builder.Type },
.{ .fixed = @bitSizeOf(Builder.Alignment) },
.{ .fixed = 1 },
};
ptr: u32,
ty: Builder.Type,
alignment: std.meta.Int(.unsigned, @bitSizeOf(Builder.Alignment)),
is_volatile: bool,
};
 
pub const LoadAtomic = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 41 },
ValueAbbrev,
.{ .fixed_runtime = Builder.Type },
.{ .fixed = @bitSizeOf(Builder.Alignment) },
.{ .fixed = 1 },
.{ .fixed = @bitSizeOf(Builder.AtomicOrdering) },
.{ .fixed = @bitSizeOf(Builder.SyncScope) },
};
ptr: u32,
ty: Builder.Type,
alignment: std.meta.Int(.unsigned, @bitSizeOf(Builder.Alignment)),
is_volatile: bool,
success_ordering: Builder.AtomicOrdering,
sync_scope: Builder.SyncScope,
};
 
pub const Store = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 44 },
ValueAbbrev,
ValueAbbrev,
.{ .fixed = @bitSizeOf(Builder.Alignment) },
.{ .fixed = 1 },
};
ptr: u32,
val: u32,
alignment: std.meta.Int(.unsigned, @bitSizeOf(Builder.Alignment)),
is_volatile: bool,
};
 
pub const StoreAtomic = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 45 },
ValueAbbrev,
ValueAbbrev,
.{ .fixed = @bitSizeOf(Builder.Alignment) },
.{ .fixed = 1 },
.{ .fixed = @bitSizeOf(Builder.AtomicOrdering) },
.{ .fixed = @bitSizeOf(Builder.SyncScope) },
};
ptr: u32,
val: u32,
alignment: std.meta.Int(.unsigned, @bitSizeOf(Builder.Alignment)),
is_volatile: bool,
success_ordering: Builder.AtomicOrdering,
sync_scope: Builder.SyncScope,
};
 
pub const BrUnconditional = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 11 },
BlockAbbrev,
};
block: u32,
};
 
pub const BrConditional = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 11 },
BlockAbbrev,
BlockAbbrev,
BlockAbbrev,
};
then_block: u32,
else_block: u32,
condition: u32,
};
 
pub const VaArg = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 23 },
.{ .fixed_runtime = Builder.Type },
ValueAbbrev,
.{ .fixed_runtime = Builder.Type },
};
list_type: Builder.Type,
list: u32,
type: Builder.Type,
};
 
pub const AtomicRmw = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 59 },
ValueAbbrev,
ValueAbbrev,
.{ .fixed = @bitSizeOf(Builder.Function.Instruction.AtomicRmw.Operation) },
.{ .fixed = 1 },
.{ .fixed = @bitSizeOf(Builder.AtomicOrdering) },
.{ .fixed = @bitSizeOf(Builder.SyncScope) },
.{ .fixed = @bitSizeOf(Builder.Alignment) },
};
ptr: u32,
val: u32,
operation: Builder.Function.Instruction.AtomicRmw.Operation,
is_volatile: bool,
success_ordering: Builder.AtomicOrdering,
sync_scope: Builder.SyncScope,
alignment: std.meta.Int(.unsigned, @bitSizeOf(Builder.Alignment)),
};
 
pub const CmpXchg = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 46 },
ValueAbbrev,
ValueAbbrev,
ValueAbbrev,
.{ .fixed = 1 },
.{ .fixed = @bitSizeOf(Builder.AtomicOrdering) },
.{ .fixed = @bitSizeOf(Builder.SyncScope) },
.{ .fixed = @bitSizeOf(Builder.AtomicOrdering) },
.{ .fixed = 1 },
.{ .fixed = @bitSizeOf(Builder.Alignment) },
};
ptr: u32,
cmp: u32,
new: u32,
is_volatile: bool,
success_ordering: Builder.AtomicOrdering,
sync_scope: Builder.SyncScope,
failure_ordering: Builder.AtomicOrdering,
is_weak: bool,
alignment: std.meta.Int(.unsigned, @bitSizeOf(Builder.Alignment)),
};
 
pub const Fence = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 36 },
.{ .fixed = @bitSizeOf(Builder.AtomicOrdering) },
.{ .fixed = @bitSizeOf(Builder.SyncScope) },
};
ordering: Builder.AtomicOrdering,
sync_scope: Builder.SyncScope,
};
 
pub const DebugLoc = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 35 },
.{ .fixed = 32 },
.{ .fixed = 32 },
.{ .fixed = 32 },
.{ .fixed = 32 },
.{ .fixed = 1 },
};
line: u32,
column: u32,
scope: Builder.Metadata,
inlined_at: Builder.Metadata,
is_implicit: bool,
};
 
pub const DebugLocAgain = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 33 },
};
};
};
 
pub const FunctionValueSymbolTable = struct {
pub const id = 14;
 
pub const abbrevs = [_]type{
BlockEntry,
};
 
pub const BlockEntry = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 2 },
ValueAbbrev,
.{ .array_fixed = 8 },
};
value_id: u32,
string: []const u8,
};
};
 
pub const Strtab = struct {
pub const id = 23;
 
pub const abbrevs = [_]type{Blob};
 
pub const Blob = struct {
pub const ops = [_]AbbrevOp{
.{ .literal = 1 },
.blob,
};
blob: []const u8,
};
};
 
src/link.zig added: 8538, removed: 5358, total 3180
@@ -839,10 +839,9 @@ pub const File = struct {
}
 
const llvm_bindings = @import("codegen/llvm/bindings.zig");
const Builder = @import("codegen/llvm/Builder.zig");
const llvm = @import("codegen/llvm.zig");
const target = comp.root_mod.resolved_target.result;
Builder.initializeLLVMTarget(target.cpu.arch);
llvm.initializeLLVMTarget(target.cpu.arch);
const os_tag = llvm.targetOs(target.os.tag);
const bad = llvm_bindings.WriteArchive(full_out_path_z, object_files.items.ptr, object_files.items.len, os_tag);
if (bad) return error.UnableToWriteArchive;
 
src/zig_llvm.cpp added: 8538, removed: 5358, total 3180
@@ -24,9 +24,7 @@
#include <llvm/Analysis/TargetLibraryInfo.h>
#include <llvm/Analysis/TargetTransformInfo.h>
#include <llvm/Bitcode/BitcodeWriter.h>
#include <llvm/IR/DIBuilder.h>
#include <llvm/IR/DiagnosticInfo.h>
#include <llvm/IR/IRBuilder.h>
#include <llvm/IR/InlineAsm.h>
#include <llvm/IR/Instructions.h>
#include <llvm/IR/LegacyPassManager.h>
@@ -382,566 +380,10 @@ void ZigLLVMSetOptBisectLimit(LLVMContextRef context_ref, int limit) {
unwrap(context_ref)->setOptPassGate(opt_bisect);
}
 
LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name, LLVMTypeRef FunctionTy, unsigned AddressSpace) {
Function* func = Function::Create(unwrap<FunctionType>(FunctionTy), GlobalValue::ExternalLinkage, AddressSpace, Name, unwrap(M));
return wrap(func);
}
 
void ZigLLVMSetTailCallKind(LLVMValueRef Call, enum ZigLLVMTailCallKind TailCallKind) {
CallInst::TailCallKind TCK;
switch (TailCallKind) {
case ZigLLVMTailCallKindNone:
TCK = CallInst::TCK_None;
break;
case ZigLLVMTailCallKindTail:
TCK = CallInst::TCK_Tail;
break;
case ZigLLVMTailCallKindMustTail:
TCK = CallInst::TCK_MustTail;
break;
case ZigLLVMTailCallKindNoTail:
TCK = CallInst::TCK_NoTail;
break;
}
unwrap<CallInst>(Call)->setTailCallKind(TCK);
}
 
void ZigLLVMFnSetSubprogram(LLVMValueRef fn, ZigLLVMDISubprogram *subprogram) {
assert( isa<Function>(unwrap(fn)) );
Function *unwrapped_function = reinterpret_cast<Function*>(unwrap(fn));
unwrapped_function->setSubprogram(reinterpret_cast<DISubprogram*>(subprogram));
}
 
 
ZigLLVMDIType *ZigLLVMCreateDebugPointerType(ZigLLVMDIBuilder *dibuilder, ZigLLVMDIType *pointee_type,
uint64_t size_in_bits, uint64_t align_in_bits, const char *name)
{
DIType *di_type = reinterpret_cast<DIBuilder*>(dibuilder)->createPointerType(
reinterpret_cast<DIType*>(pointee_type), size_in_bits, align_in_bits, std::optional<unsigned>(), name);
return reinterpret_cast<ZigLLVMDIType*>(di_type);
}
 
ZigLLVMDIType *ZigLLVMCreateDebugBasicType(ZigLLVMDIBuilder *dibuilder, const char *name,
uint64_t size_in_bits, unsigned encoding)
{
DIType *di_type = reinterpret_cast<DIBuilder*>(dibuilder)->createBasicType(
name, size_in_bits, encoding);
return reinterpret_cast<ZigLLVMDIType*>(di_type);
}
 
struct ZigLLVMDIType *ZigLLVMDIBuilderCreateVectorType(struct ZigLLVMDIBuilder *dibuilder,
uint64_t SizeInBits, uint32_t AlignInBits, struct ZigLLVMDIType *Ty, uint32_t elem_count)
{
SmallVector<Metadata *, 1> subrange;
subrange.push_back(reinterpret_cast<DIBuilder*>(dibuilder)->getOrCreateSubrange(0, elem_count));
DIType *di_type = reinterpret_cast<DIBuilder*>(dibuilder)->createVectorType(
SizeInBits,
AlignInBits,
reinterpret_cast<DIType*>(Ty),
reinterpret_cast<DIBuilder*>(dibuilder)->getOrCreateArray(subrange));
return reinterpret_cast<ZigLLVMDIType*>(di_type);
}
 
ZigLLVMDIType *ZigLLVMCreateDebugArrayType(ZigLLVMDIBuilder *dibuilder, uint64_t size_in_bits,
uint64_t align_in_bits, ZigLLVMDIType *elem_type, int64_t elem_count)
{
SmallVector<Metadata *, 1> subrange;
subrange.push_back(reinterpret_cast<DIBuilder*>(dibuilder)->getOrCreateSubrange(0, elem_count));
DIType *di_type = reinterpret_cast<DIBuilder*>(dibuilder)->createArrayType(
size_in_bits, align_in_bits,
reinterpret_cast<DIType*>(elem_type),
reinterpret_cast<DIBuilder*>(dibuilder)->getOrCreateArray(subrange));
return reinterpret_cast<ZigLLVMDIType*>(di_type);
}
 
ZigLLVMDIEnumerator *ZigLLVMCreateDebugEnumerator(ZigLLVMDIBuilder *dibuilder, const char *name, uint64_t val, bool isUnsigned) {
DIEnumerator *di_enumerator = reinterpret_cast<DIBuilder*>(dibuilder)->createEnumerator(name, val, isUnsigned);
return reinterpret_cast<ZigLLVMDIEnumerator*>(di_enumerator);
}
 
ZigLLVMDIEnumerator *ZigLLVMCreateDebugEnumeratorOfArbitraryPrecision(ZigLLVMDIBuilder *dibuilder,
const char *name, unsigned NumWords, const uint64_t Words[], unsigned int bits, bool isUnsigned)
{
DIEnumerator *di_enumerator = reinterpret_cast<DIBuilder*>(dibuilder)->createEnumerator(name,
APSInt(APInt(bits, ArrayRef(Words, NumWords)), isUnsigned));
return reinterpret_cast<ZigLLVMDIEnumerator*>(di_enumerator);
}
 
ZigLLVMDIType *ZigLLVMCreateDebugEnumerationType(ZigLLVMDIBuilder *dibuilder, ZigLLVMDIScope *scope,
const char *name, ZigLLVMDIFile *file, unsigned line_number, uint64_t size_in_bits,
uint64_t align_in_bits, ZigLLVMDIEnumerator **enumerator_array, int enumerator_array_len,
ZigLLVMDIType *underlying_type, const char *unique_id)
{
SmallVector<Metadata *, 8> fields;
for (int i = 0; i < enumerator_array_len; i += 1) {
DIEnumerator *dienumerator = reinterpret_cast<DIEnumerator*>(enumerator_array[i]);
fields.push_back(dienumerator);
}
DIType *di_type = reinterpret_cast<DIBuilder*>(dibuilder)->createEnumerationType(
reinterpret_cast<DIScope*>(scope),
name,
reinterpret_cast<DIFile*>(file),
line_number, size_in_bits, align_in_bits,
reinterpret_cast<DIBuilder*>(dibuilder)->getOrCreateArray(fields),
reinterpret_cast<DIType*>(underlying_type),
unique_id);
return reinterpret_cast<ZigLLVMDIType*>(di_type);
}
 
ZigLLVMDIType *ZigLLVMCreateDebugMemberType(ZigLLVMDIBuilder *dibuilder, ZigLLVMDIScope *scope,
const char *name, ZigLLVMDIFile *file, unsigned line, uint64_t size_in_bits,
uint64_t align_in_bits, uint64_t offset_in_bits, unsigned flags, ZigLLVMDIType *type)
{
DIType *di_type = reinterpret_cast<DIBuilder*>(dibuilder)->createMemberType(
reinterpret_cast<DIScope*>(scope),
name,
reinterpret_cast<DIFile*>(file),
line, size_in_bits, align_in_bits, offset_in_bits,
static_cast<DINode::DIFlags>(flags),
reinterpret_cast<DIType*>(type));
return reinterpret_cast<ZigLLVMDIType*>(di_type);
}
 
ZigLLVMDIType *ZigLLVMCreateDebugUnionType(ZigLLVMDIBuilder *dibuilder, ZigLLVMDIScope *scope,
const char *name, ZigLLVMDIFile *file, unsigned line_number, uint64_t size_in_bits,
uint64_t align_in_bits, unsigned flags, ZigLLVMDIType **types_array, int types_array_len,
unsigned run_time_lang, const char *unique_id)
{
SmallVector<Metadata *, 8> fields;
for (int i = 0; i < types_array_len; i += 1) {
DIType *ditype = reinterpret_cast<DIType*>(types_array[i]);
fields.push_back(ditype);
}
DIType *di_type = reinterpret_cast<DIBuilder*>(dibuilder)->createUnionType(
reinterpret_cast<DIScope*>(scope),
name,
reinterpret_cast<DIFile*>(file),
line_number, size_in_bits, align_in_bits,
static_cast<DINode::DIFlags>(flags),
reinterpret_cast<DIBuilder*>(dibuilder)->getOrCreateArray(fields),
run_time_lang, unique_id);
return reinterpret_cast<ZigLLVMDIType*>(di_type);
}
 
ZigLLVMDIType *ZigLLVMCreateDebugStructType(ZigLLVMDIBuilder *dibuilder, ZigLLVMDIScope *scope,
const char *name, ZigLLVMDIFile *file, unsigned line_number, uint64_t size_in_bits,
uint64_t align_in_bits, unsigned flags, ZigLLVMDIType *derived_from,
ZigLLVMDIType **types_array, int types_array_len, unsigned run_time_lang, ZigLLVMDIType *vtable_holder,
const char *unique_id)
{
SmallVector<Metadata *, 8> fields;
for (int i = 0; i < types_array_len; i += 1) {
DIType *ditype = reinterpret_cast<DIType*>(types_array[i]);
fields.push_back(ditype);
}
DIType *di_type = reinterpret_cast<DIBuilder*>(dibuilder)->createStructType(
reinterpret_cast<DIScope*>(scope),
name,
reinterpret_cast<DIFile*>(file),
line_number, size_in_bits, align_in_bits,
static_cast<DINode::DIFlags>(flags),
reinterpret_cast<DIType*>(derived_from),
reinterpret_cast<DIBuilder*>(dibuilder)->getOrCreateArray(fields),
run_time_lang,
reinterpret_cast<DIType*>(vtable_holder),
unique_id);
return reinterpret_cast<ZigLLVMDIType*>(di_type);
}
 
ZigLLVMDIType *ZigLLVMCreateReplaceableCompositeType(ZigLLVMDIBuilder *dibuilder, unsigned tag,
const char *name, ZigLLVMDIScope *scope, ZigLLVMDIFile *file, unsigned line)
{
DIType *di_type = reinterpret_cast<DIBuilder*>(dibuilder)->createReplaceableCompositeType(
tag, name,
reinterpret_cast<DIScope*>(scope),
reinterpret_cast<DIFile*>(file),
line);
return reinterpret_cast<ZigLLVMDIType*>(di_type);
}
 
ZigLLVMDIType *ZigLLVMCreateDebugForwardDeclType(ZigLLVMDIBuilder *dibuilder, unsigned tag,
const char *name, ZigLLVMDIScope *scope, ZigLLVMDIFile *file, unsigned line)
{
DIType *di_type = reinterpret_cast<DIBuilder*>(dibuilder)->createForwardDecl(
tag, name,
reinterpret_cast<DIScope*>(scope),
reinterpret_cast<DIFile*>(file),
line);
return reinterpret_cast<ZigLLVMDIType*>(di_type);
}
 
void ZigLLVMReplaceTemporary(ZigLLVMDIBuilder *dibuilder, ZigLLVMDIType *type,
ZigLLVMDIType *replacement)
{
reinterpret_cast<DIBuilder*>(dibuilder)->replaceTemporary(
TempDIType(reinterpret_cast<DIType*>(type)),
reinterpret_cast<DIType*>(replacement));
}
 
void ZigLLVMReplaceDebugArrays(ZigLLVMDIBuilder *dibuilder, ZigLLVMDIType *type,
ZigLLVMDIType **types_array, int types_array_len)
{
SmallVector<Metadata *, 8> fields;
for (int i = 0; i < types_array_len; i += 1) {
DIType *ditype = reinterpret_cast<DIType*>(types_array[i]);
fields.push_back(ditype);
}
DICompositeType *composite_type = (DICompositeType*)reinterpret_cast<DIType*>(type);
reinterpret_cast<DIBuilder*>(dibuilder)->replaceArrays(
composite_type,
reinterpret_cast<DIBuilder*>(dibuilder)->getOrCreateArray(fields));
}
 
ZigLLVMDIType *ZigLLVMCreateSubroutineType(ZigLLVMDIBuilder *dibuilder_wrapped,
ZigLLVMDIType **types_array, int types_array_len, unsigned flags)
{
SmallVector<Metadata *, 8> types;
for (int i = 0; i < types_array_len; i += 1) {
DIType *ditype = reinterpret_cast<DIType*>(types_array[i]);
types.push_back(ditype);
}
DIBuilder *dibuilder = reinterpret_cast<DIBuilder*>(dibuilder_wrapped);
DISubroutineType *subroutine_type = dibuilder->createSubroutineType(
dibuilder->getOrCreateTypeArray(types),
static_cast<DINode::DIFlags>(flags));
DIType *ditype = subroutine_type;
return reinterpret_cast<ZigLLVMDIType*>(ditype);
}
 
unsigned ZigLLVMEncoding_DW_ATE_unsigned(void) {
return dwarf::DW_ATE_unsigned;
}
 
unsigned ZigLLVMEncoding_DW_ATE_signed(void) {
return dwarf::DW_ATE_signed;
}
 
unsigned ZigLLVMEncoding_DW_ATE_float(void) {
return dwarf::DW_ATE_float;
}
 
unsigned ZigLLVMEncoding_DW_ATE_boolean(void) {
return dwarf::DW_ATE_boolean;
}
 
unsigned ZigLLVMEncoding_DW_ATE_unsigned_char(void) {
return dwarf::DW_ATE_unsigned_char;
}
 
unsigned ZigLLVMEncoding_DW_ATE_signed_char(void) {
return dwarf::DW_ATE_signed_char;
}
 
unsigned ZigLLVMLang_DW_LANG_C99(void) {
return dwarf::DW_LANG_C99;
}
 
unsigned ZigLLVMTag_DW_variable(void) {
return dwarf::DW_TAG_variable;
}
 
unsigned ZigLLVMTag_DW_structure_type(void) {
return dwarf::DW_TAG_structure_type;
}
 
unsigned ZigLLVMTag_DW_enumeration_type(void) {
return dwarf::DW_TAG_enumeration_type;
}
 
unsigned ZigLLVMTag_DW_union_type(void) {
return dwarf::DW_TAG_union_type;
}
 
ZigLLVMDIBuilder *ZigLLVMCreateDIBuilder(LLVMModuleRef module, bool allow_unresolved) {
DIBuilder *di_builder = new(std::nothrow) DIBuilder(*unwrap(module), allow_unresolved);
if (di_builder == nullptr)
return nullptr;
return reinterpret_cast<ZigLLVMDIBuilder *>(di_builder);
}
 
void ZigLLVMDisposeDIBuilder(ZigLLVMDIBuilder *dbuilder) {
DIBuilder *di_builder = reinterpret_cast<DIBuilder *>(dbuilder);
delete di_builder;
}
 
void ZigLLVMSetCurrentDebugLocation(LLVMBuilderRef builder,
unsigned int line, unsigned int column, ZigLLVMDIScope *scope)
{
DIScope* di_scope = reinterpret_cast<DIScope*>(scope);
DebugLoc debug_loc = DILocation::get(di_scope->getContext(), line, column, di_scope, nullptr, false);
unwrap(builder)->SetCurrentDebugLocation(debug_loc);
}
 
void ZigLLVMSetCurrentDebugLocation2(LLVMBuilderRef builder, unsigned int line,
unsigned int column, ZigLLVMDIScope *scope, ZigLLVMDILocation *inlined_at)
{
DIScope* di_scope = reinterpret_cast<DIScope*>(scope);
DebugLoc debug_loc = DILocation::get(di_scope->getContext(), line, column, di_scope,
reinterpret_cast<DILocation *>(inlined_at), false);
unwrap(builder)->SetCurrentDebugLocation(debug_loc);
}
 
void ZigLLVMClearCurrentDebugLocation(LLVMBuilderRef builder) {
unwrap(builder)->SetCurrentDebugLocation(DebugLoc());
}
 
 
ZigLLVMDILexicalBlock *ZigLLVMCreateLexicalBlock(ZigLLVMDIBuilder *dbuilder, ZigLLVMDIScope *scope,
ZigLLVMDIFile *file, unsigned line, unsigned col)
{
DILexicalBlock *result = reinterpret_cast<DIBuilder*>(dbuilder)->createLexicalBlock(
reinterpret_cast<DIScope*>(scope),
reinterpret_cast<DIFile*>(file),
line,
col);
return reinterpret_cast<ZigLLVMDILexicalBlock*>(result);
}
 
ZigLLVMDILocalVariable *ZigLLVMCreateAutoVariable(ZigLLVMDIBuilder *dbuilder,
ZigLLVMDIScope *scope, const char *name, ZigLLVMDIFile *file, unsigned line_no,
ZigLLVMDIType *type, bool always_preserve, unsigned flags)
{
DILocalVariable *result = reinterpret_cast<DIBuilder*>(dbuilder)->createAutoVariable(
reinterpret_cast<DIScope*>(scope),
name,
reinterpret_cast<DIFile*>(file),
line_no,
reinterpret_cast<DIType*>(type),
always_preserve,
static_cast<DINode::DIFlags>(flags));
return reinterpret_cast<ZigLLVMDILocalVariable*>(result);
}
 
ZigLLVMDIGlobalVariableExpression *ZigLLVMCreateGlobalVariableExpression(ZigLLVMDIBuilder *dbuilder,
ZigLLVMDIScope *scope, const char *name, const char *linkage_name, ZigLLVMDIFile *file,
unsigned line_no, ZigLLVMDIType *di_type, bool is_local_to_unit)
{
return reinterpret_cast<ZigLLVMDIGlobalVariableExpression*>(reinterpret_cast<DIBuilder*>(dbuilder)->createGlobalVariableExpression(
reinterpret_cast<DIScope*>(scope),
name,
linkage_name,
reinterpret_cast<DIFile*>(file),
line_no,
reinterpret_cast<DIType*>(di_type),
is_local_to_unit));
}
 
ZigLLVMDILocalVariable *ZigLLVMCreateParameterVariable(ZigLLVMDIBuilder *dbuilder,
ZigLLVMDIScope *scope, const char *name, ZigLLVMDIFile *file, unsigned line_no,
ZigLLVMDIType *type, bool always_preserve, unsigned flags, unsigned arg_no)
{
assert(arg_no != 0);
DILocalVariable *result = reinterpret_cast<DIBuilder*>(dbuilder)->createParameterVariable(
reinterpret_cast<DIScope*>(scope),
name,
arg_no,
reinterpret_cast<DIFile*>(file),
line_no,
reinterpret_cast<DIType*>(type),
always_preserve,
static_cast<DINode::DIFlags>(flags));
return reinterpret_cast<ZigLLVMDILocalVariable*>(result);
}
 
ZigLLVMDIScope *ZigLLVMLexicalBlockToScope(ZigLLVMDILexicalBlock *lexical_block) {
DIScope *scope = reinterpret_cast<DILexicalBlock*>(lexical_block);
return reinterpret_cast<ZigLLVMDIScope*>(scope);
}
 
ZigLLVMDIScope *ZigLLVMCompileUnitToScope(ZigLLVMDICompileUnit *compile_unit) {
DIScope *scope = reinterpret_cast<DICompileUnit*>(compile_unit);
return reinterpret_cast<ZigLLVMDIScope*>(scope);
}
 
ZigLLVMDIScope *ZigLLVMFileToScope(ZigLLVMDIFile *difile) {
DIScope *scope = reinterpret_cast<DIFile*>(difile);
return reinterpret_cast<ZigLLVMDIScope*>(scope);
}
 
ZigLLVMDIScope *ZigLLVMSubprogramToScope(ZigLLVMDISubprogram *subprogram) {
DIScope *scope = reinterpret_cast<DISubprogram*>(subprogram);
return reinterpret_cast<ZigLLVMDIScope*>(scope);
}
 
ZigLLVMDIScope *ZigLLVMTypeToScope(ZigLLVMDIType *type) {
DIScope *scope = reinterpret_cast<DIType*>(type);
return reinterpret_cast<ZigLLVMDIScope*>(scope);
}
 
ZigLLVMDINode *ZigLLVMLexicalBlockToNode(ZigLLVMDILexicalBlock *lexical_block) {
DINode *node = reinterpret_cast<DILexicalBlock*>(lexical_block);
return reinterpret_cast<ZigLLVMDINode*>(node);
}
 
ZigLLVMDINode *ZigLLVMCompileUnitToNode(ZigLLVMDICompileUnit *compile_unit) {
DINode *node = reinterpret_cast<DICompileUnit*>(compile_unit);
return reinterpret_cast<ZigLLVMDINode*>(node);
}
 
ZigLLVMDINode *ZigLLVMFileToNode(ZigLLVMDIFile *difile) {
DINode *node = reinterpret_cast<DIFile*>(difile);
return reinterpret_cast<ZigLLVMDINode*>(node);
}
 
ZigLLVMDINode *ZigLLVMSubprogramToNode(ZigLLVMDISubprogram *subprogram) {
DINode *node = reinterpret_cast<DISubprogram*>(subprogram);
return reinterpret_cast<ZigLLVMDINode*>(node);
}
 
ZigLLVMDINode *ZigLLVMTypeToNode(ZigLLVMDIType *type) {
DINode *node = reinterpret_cast<DIType*>(type);
return reinterpret_cast<ZigLLVMDINode*>(node);
}
 
ZigLLVMDINode *ZigLLVMScopeToNode(ZigLLVMDIScope *scope) {
DINode *node = reinterpret_cast<DIScope*>(scope);
return reinterpret_cast<ZigLLVMDINode*>(node);
}
 
ZigLLVMDINode *ZigLLVMGlobalVariableToNode(ZigLLVMDIGlobalVariable *global_variable) {
DINode *node = reinterpret_cast<DIGlobalVariable*>(global_variable);
return reinterpret_cast<ZigLLVMDINode*>(node);
}
 
void ZigLLVMSubprogramReplaceLinkageName(ZigLLVMDISubprogram *subprogram,
ZigLLVMMDString *linkage_name)
{
MDString *linkage_name_md = reinterpret_cast<MDString*>(linkage_name);
reinterpret_cast<DISubprogram*>(subprogram)->replaceLinkageName(linkage_name_md);
}
 
void ZigLLVMGlobalVariableReplaceLinkageName(ZigLLVMDIGlobalVariable *global_variable,
ZigLLVMMDString *linkage_name)
{
Metadata *linkage_name_md = reinterpret_cast<MDString*>(linkage_name);
// NOTE: Operand index must match llvm::DIGlobalVariable
reinterpret_cast<DIGlobalVariable*>(global_variable)->replaceOperandWith(5, linkage_name_md);
}
 
ZigLLVMDICompileUnit *ZigLLVMCreateCompileUnit(ZigLLVMDIBuilder *dibuilder,
unsigned lang, ZigLLVMDIFile *difile, const char *producer,
bool is_optimized, const char *flags, unsigned runtime_version, const char *split_name,
uint64_t dwo_id, bool emit_debug_info)
{
DICompileUnit *result = reinterpret_cast<DIBuilder*>(dibuilder)->createCompileUnit(
lang,
reinterpret_cast<DIFile*>(difile),
producer, is_optimized, flags, runtime_version, split_name,
(emit_debug_info ? DICompileUnit::DebugEmissionKind::FullDebug : DICompileUnit::DebugEmissionKind::NoDebug),
dwo_id);
return reinterpret_cast<ZigLLVMDICompileUnit*>(result);
}
 
 
ZigLLVMDIFile *ZigLLVMCreateFile(ZigLLVMDIBuilder *dibuilder, const char *filename, const char *directory) {
DIFile *result = reinterpret_cast<DIBuilder*>(dibuilder)->createFile(filename, directory);
return reinterpret_cast<ZigLLVMDIFile*>(result);
}
 
ZigLLVMDISubprogram *ZigLLVMCreateFunction(ZigLLVMDIBuilder *dibuilder, ZigLLVMDIScope *scope,
const char *name, const char *linkage_name, ZigLLVMDIFile *file, unsigned lineno,
ZigLLVMDIType *fn_di_type, bool is_local_to_unit, bool is_definition, unsigned scope_line,
unsigned flags, bool is_optimized, ZigLLVMDISubprogram *decl_subprogram)
{
DISubroutineType *di_sub_type = static_cast<DISubroutineType*>(reinterpret_cast<DIType*>(fn_di_type));
DISubprogram *result = reinterpret_cast<DIBuilder*>(dibuilder)->createFunction(
reinterpret_cast<DIScope*>(scope),
name, linkage_name,
reinterpret_cast<DIFile*>(file),
lineno,
di_sub_type,
scope_line,
static_cast<DINode::DIFlags>(flags),
DISubprogram::toSPFlags(is_local_to_unit, is_definition, is_optimized),
nullptr,
reinterpret_cast<DISubprogram *>(decl_subprogram),
nullptr);
return reinterpret_cast<ZigLLVMDISubprogram*>(result);
}
 
void ZigLLVMDIBuilderFinalize(ZigLLVMDIBuilder *dibuilder) {
reinterpret_cast<DIBuilder*>(dibuilder)->finalize();
}
 
LLVMValueRef ZigLLVMInsertDeclareAtEnd(ZigLLVMDIBuilder *dibuilder, LLVMValueRef storage,
ZigLLVMDILocalVariable *var_info, ZigLLVMDILocation *debug_loc, LLVMBasicBlockRef basic_block_ref)
{
Instruction *result = reinterpret_cast<DIBuilder*>(dibuilder)->insertDeclare(
unwrap(storage),
reinterpret_cast<DILocalVariable *>(var_info),
reinterpret_cast<DIBuilder*>(dibuilder)->createExpression(),
reinterpret_cast<DILocation*>(debug_loc),
static_cast<BasicBlock*>(unwrap(basic_block_ref)));
return wrap(result);
}
 
LLVMValueRef ZigLLVMInsertDbgValueIntrinsicAtEnd(ZigLLVMDIBuilder *dib, LLVMValueRef val,
ZigLLVMDILocalVariable *var_info, ZigLLVMDILocation *debug_loc,
LLVMBasicBlockRef basic_block_ref)
{
Instruction *result = reinterpret_cast<DIBuilder*>(dib)->insertDbgValueIntrinsic(
unwrap(val),
reinterpret_cast<DILocalVariable *>(var_info),
reinterpret_cast<DIBuilder*>(dib)->createExpression(),
reinterpret_cast<DILocation*>(debug_loc),
static_cast<BasicBlock*>(unwrap(basic_block_ref)));
return wrap(result);
}
 
LLVMValueRef ZigLLVMInsertDeclare(ZigLLVMDIBuilder *dibuilder, LLVMValueRef storage,
ZigLLVMDILocalVariable *var_info, ZigLLVMDILocation *debug_loc, LLVMValueRef insert_before_instr)
{
Instruction *result = reinterpret_cast<DIBuilder*>(dibuilder)->insertDeclare(
unwrap(storage),
reinterpret_cast<DILocalVariable *>(var_info),
reinterpret_cast<DIBuilder*>(dibuilder)->createExpression(),
reinterpret_cast<DILocation*>(debug_loc),
static_cast<Instruction*>(unwrap(insert_before_instr)));
return wrap(result);
}
 
ZigLLVMDILocation *ZigLLVMGetDebugLoc(unsigned line, unsigned col, ZigLLVMDIScope *scope) {
DIScope* di_scope = reinterpret_cast<DIScope*>(scope);
DebugLoc debug_loc = DILocation::get(di_scope->getContext(), line, col, di_scope, nullptr, false);
return reinterpret_cast<ZigLLVMDILocation*>(debug_loc.get());
}
 
ZigLLVMDILocation *ZigLLVMGetDebugLoc2(unsigned line, unsigned col, ZigLLVMDIScope *scope,
ZigLLVMDILocation *inlined_at) {
DIScope* di_scope = reinterpret_cast<DIScope*>(scope);
DebugLoc debug_loc = DILocation::get(di_scope->getContext(), line, col, di_scope,
reinterpret_cast<DILocation *>(inlined_at), false);
return reinterpret_cast<ZigLLVMDILocation*>(debug_loc.get());
}
 
void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state) {
if (on_state) {
FastMathFlags fmf;
fmf.setFast();
unwrap(builder_wrapped)->setFastMathFlags(fmf);
} else {
unwrap(builder_wrapped)->clearFastMathFlags();
}
}
 
void ZigLLVMParseCommandLineOptions(size_t argc, const char *const *argv) {
cl::ParseCommandLineOptions(argc, argv);
}
 
void ZigLLVMAddModuleDebugInfoFlag(LLVMModuleRef module, bool produce_dwarf64) {
unwrap(module)->addModuleFlag(Module::Warning, "Debug Info Version", DEBUG_METADATA_VERSION);
unwrap(module)->addModuleFlag(Module::Warning, "Dwarf Version", 4);
 
if (produce_dwarf64) {
unwrap(module)->addModuleFlag(Module::Warning, "DWARF64", 1);
}
}
 
void ZigLLVMAddModuleCodeViewFlag(LLVMModuleRef module) {
unwrap(module)->addModuleFlag(Module::Warning, "Debug Info Version", DEBUG_METADATA_VERSION);
unwrap(module)->addModuleFlag(Module::Warning, "CodeView", 1);
}
 
void ZigLLVMSetModulePICLevel(LLVMModuleRef module) {
unwrap(module)->setPICLevel(PICLevel::Level::BigPIC);
}
@@ -956,35 +398,6 @@ void ZigLLVMSetModuleCodeModel(LLVMModuleRef module, LLVMCodeModel code_model) {
assert(!JIT);
}
 
LLVMValueRef ZigLLVMBuildNSWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS,
const char *name)
{
return wrap(unwrap(builder)->CreateShl(unwrap(LHS), unwrap(RHS), name, false, true));
}
 
LLVMValueRef ZigLLVMBuildNUWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS,
const char *name)
{
return wrap(unwrap(builder)->CreateShl(unwrap(LHS), unwrap(RHS), name, true, false));
}
 
LLVMValueRef ZigLLVMBuildLShrExact(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS,
const char *name)
{
return wrap(unwrap(builder)->CreateLShr(unwrap(LHS), unwrap(RHS), name, true));
}
 
LLVMValueRef ZigLLVMBuildAShrExact(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS,
const char *name)
{
return wrap(unwrap(builder)->CreateAShr(unwrap(LHS), unwrap(RHS), name, true));
}
 
LLVMValueRef ZigLLVMBuildAllocaInAddressSpace(LLVMBuilderRef builder, LLVMTypeRef Ty,
unsigned AddressSpace, const char *Name) {
return wrap(unwrap(builder)->CreateAlloca(unwrap(Ty), AddressSpace, nullptr, Name));
}
 
bool ZigLLVMWriteImportLibrary(const char *def_path, const ZigLLVM_ArchType arch,
const char *output_lib_path, bool kill_at)
{
@@ -1134,43 +547,6 @@ bool ZigLLDLinkWasm(int argc, const char **argv, bool can_exit_early, bool disab
return lld::wasm::link(args, llvm::outs(), llvm::errs(), can_exit_early, disable_output);
}
 
void ZigLLVMTakeName(LLVMValueRef new_owner, LLVMValueRef victim) {
unwrap(new_owner)->takeName(unwrap(victim));
}
 
void ZigLLVMRemoveGlobalValue(LLVMValueRef GlobalVal) {
unwrap<GlobalValue>(GlobalVal)->removeFromParent();
}
 
void ZigLLVMEraseGlobalValue(LLVMValueRef GlobalVal) {
unwrap<GlobalValue>(GlobalVal)->eraseFromParent();
}
 
void ZigLLVMDeleteGlobalValue(LLVMValueRef GlobalVal) {
auto *GV = unwrap<GlobalValue>(GlobalVal);
assert(GV->getParent() == nullptr);
switch (GV->getValueID()) {
#define HANDLE_GLOBAL_VALUE(NAME) \
case Value::NAME##Val: \
delete static_cast<NAME *>(GV); \
break;
#include <llvm/IR/Value.def>
default: llvm_unreachable("Expected global value");
}
}
 
void ZigLLVMSetInitializer(LLVMValueRef GlobalVar, LLVMValueRef ConstantVal) {
unwrap<GlobalVariable>(GlobalVar)->setInitializer(ConstantVal ? unwrap<Constant>(ConstantVal) : nullptr);
}
 
ZigLLVMDIGlobalVariable* ZigLLVMGlobalGetVariable(ZigLLVMDIGlobalVariableExpression *global_variable_expression) {
return reinterpret_cast<ZigLLVMDIGlobalVariable*>(reinterpret_cast<DIGlobalVariableExpression*>(global_variable_expression)->getVariable());
}
 
void ZigLLVMAttachMetaData(LLVMValueRef Val, ZigLLVMDIGlobalVariableExpression *global_variable_expression) {
unwrap<GlobalVariable>(Val)->addDebugInfo(reinterpret_cast<DIGlobalVariableExpression*>(global_variable_expression));
}
 
static_assert((Triple::ArchType)ZigLLVM_UnknownArch == Triple::UnknownArch, "");
static_assert((Triple::ArchType)ZigLLVM_arm == Triple::arm, "");
static_assert((Triple::ArchType)ZigLLVM_armeb == Triple::armeb, "");
 
src/zig_llvm.h added: 8538, removed: 5358, total 3180
@@ -24,24 +24,6 @@
// ATTENTION: If you modify this file, be sure to update the corresponding
// extern function declarations in the self-hosted compiler.
 
struct ZigLLVMDIType;
struct ZigLLVMDIBuilder;
struct ZigLLVMDICompileUnit;
struct ZigLLVMDIScope;
struct ZigLLVMDIFile;
struct ZigLLVMDILexicalBlock;
struct ZigLLVMDISubprogram;
struct ZigLLVMDISubroutineType;
struct ZigLLVMDILocalVariable;
struct ZigLLVMDIGlobalVariableExpression;
struct ZigLLVMDIGlobalVariable;
struct ZigLLVMDIGlobalExpression;
struct ZigLLVMDILocation;
struct ZigLLVMDIEnumerator;
struct ZigLLVMInsertionPoint;
struct ZigLLVMDINode;
struct ZigLLVMMDString;
 
ZIG_EXTERN_C bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMModuleRef module_ref,
char **error_message, bool is_debug,
bool is_small, bool time_report, bool tsan, bool lto,
@@ -62,9 +44,6 @@ ZIG_EXTERN_C LLVMTargetMachineRef ZigLLVMCreateTargetMachine(LLVMTargetRef T, co
 
ZIG_EXTERN_C void ZigLLVMSetOptBisectLimit(LLVMContextRef context_ref, int limit);
 
ZIG_EXTERN_C LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name,
LLVMTypeRef FunctionTy, unsigned AddressSpace);
 
enum ZigLLVMTailCallKind {
ZigLLVMTailCallKindNone,
ZigLLVMTailCallKindTail,
@@ -72,8 +51,6 @@ enum ZigLLVMTailCallKind {
ZigLLVMTailCallKindNoTail,
};
 
ZIG_EXTERN_C void ZigLLVMSetTailCallKind(LLVMValueRef Call, enum ZigLLVMTailCallKind TailCallKind);
 
enum ZigLLVM_CallingConv {
ZigLLVM_C = 0,
ZigLLVM_Fast = 8,
@@ -122,176 +99,12 @@ enum ZigLLVM_CallingConv {
ZigLLVM_MaxID = 1023,
};
 
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildNSWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS,
const char *name);
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildNUWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS,
const char *name);
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildLShrExact(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS,
const char *name);
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildAShrExact(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS,
const char *name);
 
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildAllocaInAddressSpace(LLVMBuilderRef builder, LLVMTypeRef Ty, unsigned AddressSpace,
const char *Name);
 
ZIG_EXTERN_C struct ZigLLVMDIType *ZigLLVMCreateDebugPointerType(struct ZigLLVMDIBuilder *dibuilder,
struct ZigLLVMDIType *pointee_type, uint64_t size_in_bits, uint64_t align_in_bits, const char *name);
 
ZIG_EXTERN_C struct ZigLLVMDIType *ZigLLVMCreateDebugBasicType(struct ZigLLVMDIBuilder *dibuilder, const char *name,
uint64_t size_in_bits, unsigned encoding);
 
ZIG_EXTERN_C struct ZigLLVMDIType *ZigLLVMCreateDebugArrayType(struct ZigLLVMDIBuilder *dibuilder,
uint64_t size_in_bits, uint64_t align_in_bits, struct ZigLLVMDIType *elem_type,
int64_t elem_count);
 
ZIG_EXTERN_C struct ZigLLVMDIEnumerator *ZigLLVMCreateDebugEnumerator(struct ZigLLVMDIBuilder *dibuilder,
const char *name, uint64_t val, bool isUnsigned);
 
 
ZIG_EXTERN_C struct ZigLLVMDIEnumerator *ZigLLVMCreateDebugEnumeratorOfArbitraryPrecision(struct ZigLLVMDIBuilder *dibuilder,
const char *name, unsigned NumWords, const uint64_t Words[], unsigned int bits, bool isUnsigned);
 
ZIG_EXTERN_C struct ZigLLVMDIType *ZigLLVMCreateDebugEnumerationType(struct ZigLLVMDIBuilder *dibuilder,
struct ZigLLVMDIScope *scope, const char *name, struct ZigLLVMDIFile *file, unsigned line_number,
uint64_t size_in_bits, uint64_t align_in_bits, struct ZigLLVMDIEnumerator **enumerator_array,
int enumerator_array_len, struct ZigLLVMDIType *underlying_type, const char *unique_id);
 
ZIG_EXTERN_C struct ZigLLVMDIType *ZigLLVMCreateDebugStructType(struct ZigLLVMDIBuilder *dibuilder,
struct ZigLLVMDIScope *scope, const char *name, struct ZigLLVMDIFile *file, unsigned line_number,
uint64_t size_in_bits, uint64_t align_in_bits, unsigned flags, struct ZigLLVMDIType *derived_from,
struct ZigLLVMDIType **types_array, int types_array_len, unsigned run_time_lang,
struct ZigLLVMDIType *vtable_holder, const char *unique_id);
 
ZIG_EXTERN_C struct ZigLLVMDIType *ZigLLVMCreateDebugUnionType(struct ZigLLVMDIBuilder *dibuilder,
struct ZigLLVMDIScope *scope, const char *name, struct ZigLLVMDIFile *file, unsigned line_number,
uint64_t size_in_bits, uint64_t align_in_bits, unsigned flags, struct ZigLLVMDIType **types_array,
int types_array_len, unsigned run_time_lang, const char *unique_id);
 
ZIG_EXTERN_C struct ZigLLVMDIType *ZigLLVMCreateDebugMemberType(struct ZigLLVMDIBuilder *dibuilder,
struct ZigLLVMDIScope *scope, const char *name, struct ZigLLVMDIFile *file, unsigned line,
uint64_t size_in_bits, uint64_t align_in_bits, uint64_t offset_in_bits, unsigned flags,
struct ZigLLVMDIType *type);
 
ZIG_EXTERN_C struct ZigLLVMDIType *ZigLLVMCreateReplaceableCompositeType(struct ZigLLVMDIBuilder *dibuilder,
unsigned tag, const char *name, struct ZigLLVMDIScope *scope, struct ZigLLVMDIFile *file, unsigned line);
 
ZIG_EXTERN_C struct ZigLLVMDIType *ZigLLVMCreateDebugForwardDeclType(struct ZigLLVMDIBuilder *dibuilder, unsigned tag,
const char *name, struct ZigLLVMDIScope *scope, struct ZigLLVMDIFile *file, unsigned line);
 
ZIG_EXTERN_C void ZigLLVMReplaceTemporary(struct ZigLLVMDIBuilder *dibuilder, struct ZigLLVMDIType *type,
struct ZigLLVMDIType *replacement);
 
ZIG_EXTERN_C void ZigLLVMReplaceDebugArrays(struct ZigLLVMDIBuilder *dibuilder, struct ZigLLVMDIType *type,
struct ZigLLVMDIType **types_array, int types_array_len);
 
ZIG_EXTERN_C struct ZigLLVMDIType *ZigLLVMCreateSubroutineType(struct ZigLLVMDIBuilder *dibuilder_wrapped,
struct ZigLLVMDIType **types_array, int types_array_len, unsigned flags);
 
ZIG_EXTERN_C unsigned ZigLLVMEncoding_DW_ATE_unsigned(void);
ZIG_EXTERN_C unsigned ZigLLVMEncoding_DW_ATE_signed(void);
ZIG_EXTERN_C unsigned ZigLLVMEncoding_DW_ATE_float(void);
ZIG_EXTERN_C unsigned ZigLLVMEncoding_DW_ATE_boolean(void);
ZIG_EXTERN_C unsigned ZigLLVMEncoding_DW_ATE_unsigned_char(void);
ZIG_EXTERN_C unsigned ZigLLVMEncoding_DW_ATE_signed_char(void);
ZIG_EXTERN_C unsigned ZigLLVMLang_DW_LANG_C99(void);
ZIG_EXTERN_C unsigned ZigLLVMTag_DW_variable(void);
ZIG_EXTERN_C unsigned ZigLLVMTag_DW_structure_type(void);
ZIG_EXTERN_C unsigned ZigLLVMTag_DW_enumeration_type(void);
ZIG_EXTERN_C unsigned ZigLLVMTag_DW_union_type(void);
 
ZIG_EXTERN_C struct ZigLLVMDIBuilder *ZigLLVMCreateDIBuilder(LLVMModuleRef module, bool allow_unresolved);
ZIG_EXTERN_C void ZigLLVMDisposeDIBuilder(struct ZigLLVMDIBuilder *dbuilder);
ZIG_EXTERN_C void ZigLLVMAddModuleDebugInfoFlag(LLVMModuleRef module, bool produce_dwarf64);
ZIG_EXTERN_C void ZigLLVMAddModuleCodeViewFlag(LLVMModuleRef module);
ZIG_EXTERN_C void ZigLLVMSetModulePICLevel(LLVMModuleRef module);
ZIG_EXTERN_C void ZigLLVMSetModulePIELevel(LLVMModuleRef module);
ZIG_EXTERN_C void ZigLLVMSetModuleCodeModel(LLVMModuleRef module, LLVMCodeModel code_model);
 
ZIG_EXTERN_C void ZigLLVMSetCurrentDebugLocation(LLVMBuilderRef builder,
unsigned int line, unsigned int column, struct ZigLLVMDIScope *scope);
ZIG_EXTERN_C void ZigLLVMSetCurrentDebugLocation2(LLVMBuilderRef builder, unsigned int line,
unsigned int column, struct ZigLLVMDIScope *scope, struct ZigLLVMDILocation *inlined_at);
ZIG_EXTERN_C void ZigLLVMClearCurrentDebugLocation(LLVMBuilderRef builder);
 
ZIG_EXTERN_C struct ZigLLVMDIScope *ZigLLVMLexicalBlockToScope(struct ZigLLVMDILexicalBlock *lexical_block);
ZIG_EXTERN_C struct ZigLLVMDIScope *ZigLLVMCompileUnitToScope(struct ZigLLVMDICompileUnit *compile_unit);
ZIG_EXTERN_C struct ZigLLVMDIScope *ZigLLVMFileToScope(struct ZigLLVMDIFile *difile);
ZIG_EXTERN_C struct ZigLLVMDIScope *ZigLLVMSubprogramToScope(struct ZigLLVMDISubprogram *subprogram);
ZIG_EXTERN_C struct ZigLLVMDIScope *ZigLLVMTypeToScope(struct ZigLLVMDIType *type);
 
ZIG_EXTERN_C struct ZigLLVMDINode *ZigLLVMLexicalBlockToNode(struct ZigLLVMDILexicalBlock *lexical_block);
ZIG_EXTERN_C struct ZigLLVMDINode *ZigLLVMCompileUnitToNode(struct ZigLLVMDICompileUnit *compile_unit);
ZIG_EXTERN_C struct ZigLLVMDINode *ZigLLVMFileToNode(struct ZigLLVMDIFile *difile);
ZIG_EXTERN_C struct ZigLLVMDINode *ZigLLVMSubprogramToNode(struct ZigLLVMDISubprogram *subprogram);
ZIG_EXTERN_C struct ZigLLVMDINode *ZigLLVMTypeToNode(struct ZigLLVMDIType *type);
ZIG_EXTERN_C struct ZigLLVMDINode *ZigLLVMScopeToNode(struct ZigLLVMDIScope *scope);
ZIG_EXTERN_C struct ZigLLVMDINode *ZigLLVMGlobalVariableToNode(struct ZigLLVMDIGlobalVariable *global_variable);
 
ZIG_EXTERN_C void ZigLLVMSubprogramReplaceLinkageName(struct ZigLLVMDISubprogram *subprogram,
struct ZigLLVMMDString *linkage_name);
ZIG_EXTERN_C void ZigLLVMGlobalVariableReplaceLinkageName(struct ZigLLVMDIGlobalVariable *global_variable,
struct ZigLLVMMDString *linkage_name);
 
ZIG_EXTERN_C struct ZigLLVMDILocalVariable *ZigLLVMCreateAutoVariable(struct ZigLLVMDIBuilder *dbuilder,
struct ZigLLVMDIScope *scope, const char *name, struct ZigLLVMDIFile *file, unsigned line_no,
struct ZigLLVMDIType *type, bool always_preserve, unsigned flags);
 
ZIG_EXTERN_C struct ZigLLVMDIGlobalVariableExpression *ZigLLVMCreateGlobalVariableExpression(struct ZigLLVMDIBuilder *dbuilder,
struct ZigLLVMDIScope *scope, const char *name, const char *linkage_name, struct ZigLLVMDIFile *file,
unsigned line_no, struct ZigLLVMDIType *di_type, bool is_local_to_unit);
 
ZIG_EXTERN_C struct ZigLLVMDILocalVariable *ZigLLVMCreateParameterVariable(struct ZigLLVMDIBuilder *dbuilder,
struct ZigLLVMDIScope *scope, const char *name, struct ZigLLVMDIFile *file, unsigned line_no,
struct ZigLLVMDIType *type, bool always_preserve, unsigned flags, unsigned arg_no);
 
ZIG_EXTERN_C struct ZigLLVMDILexicalBlock *ZigLLVMCreateLexicalBlock(struct ZigLLVMDIBuilder *dbuilder,
struct ZigLLVMDIScope *scope, struct ZigLLVMDIFile *file, unsigned line, unsigned col);
 
ZIG_EXTERN_C struct ZigLLVMDICompileUnit *ZigLLVMCreateCompileUnit(struct ZigLLVMDIBuilder *dibuilder,
unsigned lang, struct ZigLLVMDIFile *difile, const char *producer,
bool is_optimized, const char *flags, unsigned runtime_version, const char *split_name,
uint64_t dwo_id, bool emit_debug_info);
 
ZIG_EXTERN_C struct ZigLLVMDIFile *ZigLLVMCreateFile(struct ZigLLVMDIBuilder *dibuilder, const char *filename,
const char *directory);
 
ZIG_EXTERN_C struct ZigLLVMDISubprogram *ZigLLVMCreateFunction(struct ZigLLVMDIBuilder *dibuilder,
struct ZigLLVMDIScope *scope, const char *name, const char *linkage_name, struct ZigLLVMDIFile *file,
unsigned lineno, struct ZigLLVMDIType *fn_di_type, bool is_local_to_unit, bool is_definition,
unsigned scope_line, unsigned flags, bool is_optimized, struct ZigLLVMDISubprogram *decl_subprogram);
 
ZIG_EXTERN_C struct ZigLLVMDIType *ZigLLVMDIBuilderCreateVectorType(struct ZigLLVMDIBuilder *dibuilder,
uint64_t SizeInBits, uint32_t AlignInBits, struct ZigLLVMDIType *Ty, uint32_t elem_count);
 
ZIG_EXTERN_C void ZigLLVMFnSetSubprogram(LLVMValueRef fn, struct ZigLLVMDISubprogram *subprogram);
 
ZIG_EXTERN_C void ZigLLVMDIBuilderFinalize(struct ZigLLVMDIBuilder *dibuilder);
 
ZIG_EXTERN_C struct ZigLLVMDILocation *ZigLLVMGetDebugLoc(unsigned line, unsigned col,
struct ZigLLVMDIScope *scope);
ZIG_EXTERN_C struct ZigLLVMDILocation *ZigLLVMGetDebugLoc2(unsigned line, unsigned col,
struct ZigLLVMDIScope *scope, struct ZigLLVMDILocation *inlined_at);
 
ZIG_EXTERN_C LLVMValueRef ZigLLVMInsertDeclareAtEnd(struct ZigLLVMDIBuilder *dib,
LLVMValueRef storage, struct ZigLLVMDILocalVariable *var_info,
struct ZigLLVMDILocation *debug_loc, LLVMBasicBlockRef basic_block_ref);
 
ZIG_EXTERN_C LLVMValueRef ZigLLVMInsertDeclare(struct ZigLLVMDIBuilder *dib,
LLVMValueRef storage, struct ZigLLVMDILocalVariable *var_info,
struct ZigLLVMDILocation *debug_loc, LLVMValueRef insert_before_instr);
 
ZIG_EXTERN_C LLVMValueRef ZigLLVMInsertDbgValueIntrinsicAtEnd(struct ZigLLVMDIBuilder *dib,
LLVMValueRef val, struct ZigLLVMDILocalVariable *var_info,
struct ZigLLVMDILocation *debug_loc, LLVMBasicBlockRef basic_block_ref);
 
ZIG_EXTERN_C void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state);
 
ZIG_EXTERN_C void ZigLLVMParseCommandLineOptions(size_t argc, const char *const *argv);
 
ZIG_EXTERN_C ZigLLVMDIGlobalVariable* ZigLLVMGlobalGetVariable(ZigLLVMDIGlobalVariableExpression *global_variable_expression);
ZIG_EXTERN_C void ZigLLVMAttachMetaData(LLVMValueRef Val, ZigLLVMDIGlobalVariableExpression *global_variable_expression);
 
 
// synchronize with llvm/include/ADT/Triple.h::ArchType
// synchronize with std.Target.Cpu.Arch
// synchronize with codegen/llvm/bindings.zig::ArchType
@@ -494,12 +307,6 @@ enum ZigLLVM_ObjectFormatType {
ZigLLVM_XCOFF,
};
 
ZIG_EXTERN_C void ZigLLVMTakeName(LLVMValueRef new_owner, LLVMValueRef victim);
ZIG_EXTERN_C void ZigLLVMRemoveGlobalValue(LLVMValueRef GlobalVal);
ZIG_EXTERN_C void ZigLLVMEraseGlobalValue(LLVMValueRef GlobalVal);
ZIG_EXTERN_C void ZigLLVMDeleteGlobalValue(LLVMValueRef GlobalVal);
ZIG_EXTERN_C void ZigLLVMSetInitializer(LLVMValueRef GlobalVar, LLVMValueRef ConstantVal);
 
#define ZigLLVM_DIFlags_Zero 0U
#define ZigLLVM_DIFlags_Private 1U
#define ZigLLVM_DIFlags_Protected 2U