srctree

Jakub Konka parent 256c5934 f8989a9c 7230b68b
Merge pull request #19034 from ziglang/elf-riscv

elf: add basic aarch64 and riscv64 support

inlinesplit
ci/x86_64-linux-debug.sh added: 1425, removed: 683, total 742
@@ -12,7 +12,7 @@ CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.12.0-dev.203+d3bc1cfc4"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"
 
export PATH="$HOME/deps/wasmtime-v10.0.2-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-6.1.0.1/bin:$PATH"
export PATH="$HOME/deps/wasmtime-v10.0.2-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-8.2.1/bin:$PATH"
 
# Make the `zig version` number consistent.
# This will affect the cmake command below.
 
ci/x86_64-linux-release.sh added: 1425, removed: 683, total 742
@@ -12,7 +12,7 @@ CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.12.0-dev.203+d3bc1cfc4"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"
 
export PATH="$HOME/deps/wasmtime-v10.0.2-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-6.1.0.1/bin:$PATH"
export PATH="$HOME/deps/wasmtime-v10.0.2-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-8.2.1/bin:$PATH"
 
# Make the `zig version` number consistent.
# This will affect the cmake command below.
 
src/link/Elf.zig added: 1425, removed: 683, total 742
@@ -189,6 +189,7 @@ gnu_eh_frame_hdr_index: ?Symbol.Index = null,
dso_handle_index: ?Symbol.Index = null,
rela_iplt_start_index: ?Symbol.Index = null,
rela_iplt_end_index: ?Symbol.Index = null,
global_pointer_index: ?Symbol.Index = null,
start_stop_indexes: std.ArrayListUnmanaged(u32) = .{},
 
/// An array of symbols parsed across all input files.
@@ -1343,6 +1344,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
// Beyond this point, everything has been allocated a virtual address and we can resolve
// the relocations, and commit objects to file.
if (self.zigObjectPtr()) |zig_object| {
var has_reloc_errors = false;
for (zig_object.atoms.items) |atom_index| {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.flags.alive) continue;
@@ -1353,10 +1355,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
defer gpa.free(code);
const file_offset = shdr.sh_offset + atom_ptr.value;
atom_ptr.resolveRelocsAlloc(self, code) catch |err| switch (err) {
// TODO
error.RelaxFail, error.InvalidInstruction, error.CannotEncode => {
log.err("relaxing intructions failed; TODO this should be a fatal linker error", .{});
},
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
@@ -1365,19 +1364,14 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
};
try self.base.file.?.pwriteAll(code, file_offset);
}
 
if (has_reloc_errors) return error.FlushFailure;
}
 
try self.writePhdrTable();
try self.writeShdrTable();
try self.writeAtoms();
 
self.writeSyntheticSections() catch |err| switch (err) {
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
else => |e| return e,
};
try self.writeSyntheticSections();
 
if (self.entry_index == null and self.base.isExe()) {
log.debug("flushing. no_entry_point_found = true", .{});
@@ -2048,18 +2042,23 @@ fn scanRelocs(self: *Elf) !void {
if (self.zigObjectPtr()) |zo| objects.appendAssumeCapacity(zo.index);
objects.appendSliceAssumeCapacity(self.objects.items);
 
var has_reloc_errors = false;
for (objects.items) |index| {
self.file(index).?.scanRelocs(self, &undefs) catch |err| switch (err) {
error.RelaxFailure => unreachable,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
};
}
 
try self.reportUndefinedSymbols(&undefs);
 
if (has_reloc_errors) return error.FlushFailure;
 
for (self.symbols.items, 0..) |*sym, i| {
const index = @as(u32, @intCast(i));
if (!sym.isLocal(self) and !sym.flags.has_dynamic) {
@@ -3095,6 +3094,10 @@ fn addLinkerDefinedSymbols(self: *Elf) !void {
}
}
 
if (self.getTarget().cpu.arch == .riscv64 and self.base.isDynLib()) {
self.global_pointer_index = try linker_defined.addGlobal("__global_pointer$", self);
}
 
linker_defined.resolveSymbols(self);
}
 
@@ -3222,6 +3225,19 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
stop.output_section_index = shndx;
}
}
 
// __global_pointer$
if (self.global_pointer_index) |index| {
const sym = self.symbol(index);
if (self.sectionByName(".sdata")) |shndx| {
const shdr = self.shdrs.items[shndx];
sym.value = shdr.sh_addr + 0x800;
sym.output_section_index = shndx;
} else {
sym.value = 0;
sym.output_section_index = 0;
}
}
}
 
fn checkDuplicates(self: *Elf) !void {
@@ -4431,6 +4447,8 @@ fn writeAtoms(self: *Elf) !void {
undefs.deinit();
}
 
var has_reloc_errors = false;
 
// TODO iterate over `output_sections` directly
for (self.shdrs.items, 0..) |shdr, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
@@ -4493,14 +4511,11 @@ fn writeAtoms(self: *Elf) !void {
else
atom_ptr.resolveRelocsAlloc(self, out_code);
_ = res catch |err| switch (err) {
// TODO
error.RelaxFail, error.InvalidInstruction, error.CannotEncode => {
log.err("relaxing intructions failed; TODO this should be a fatal linker error", .{});
},
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
else => |e| return e,
};
}
@@ -4509,6 +4524,8 @@ fn writeAtoms(self: *Elf) !void {
}
 
try self.reportUndefinedSymbols(&undefs);
 
if (has_reloc_errors) return error.FlushFailure;
}
 
pub fn updateSymtabSize(self: *Elf) !void {
@@ -4665,7 +4682,14 @@ fn writeSyntheticSections(self: *Elf) !void {
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
try eh_frame.writeEhFrame(self, buffer.writer());
eh_frame.writeEhFrame(self, buffer.writer()) catch |err| switch (err) {
error.RelocFailure => return error.FlushFailure,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
else => |e| return e,
};
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
}
 
@@ -5526,6 +5550,15 @@ pub fn comdatGroupOwner(self: *Elf, index: ComdatGroupOwner.Index) *ComdatGroupO
return &self.comdat_groups_owners.items[index];
}
 
pub fn gotAddress(self: *Elf) u64 {
const shndx = blk: {
if (self.getTarget().cpu.arch == .x86_64 and self.got_plt_section_index != null)
break :blk self.got_plt_section_index.?;
break :blk if (self.got_section_index) |shndx| shndx else null;
};
return if (shndx) |index| self.shdrs.items[index].sh_addr else 0;
}
 
pub fn tpAddress(self: *Elf) u64 {
const index = self.phdr_tls_index orelse return 0;
const phdr = self.phdrs.items[index];
 
src/link/Elf/Atom.zig added: 1425, removed: 683, total 742
@@ -300,7 +300,7 @@ pub fn free(self: *Atom, elf_file: *Elf) void {
self.* = .{};
}
 
pub fn relocs(self: Atom, elf_file: *Elf) []align(1) const elf.Elf64_Rela {
pub fn relocs(self: Atom, elf_file: *Elf) []const elf.Elf64_Rela {
const shndx = self.relocsShndx() orelse return &[0]elf.Elf64_Rela{};
return switch (self.file(elf_file).?) {
.zig_object => |x| x.relocs.items[shndx].items,
@@ -394,11 +394,62 @@ pub fn scanRelocsRequiresCode(self: Atom, elf_file: *Elf) bool {
return false;
}
 
pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype) !void {
switch (elf_file.getTarget().cpu.arch) {
.x86_64 => try x86_64.scanRelocs(self, elf_file, code, undefs),
else => return error.UnsupportedCpuArch,
pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype) RelocError!void {
const cpu_arch = elf_file.getTarget().cpu.arch;
const file_ptr = self.file(elf_file).?;
const rels = self.relocs(elf_file);
 
var has_reloc_errors = false;
var it = RelocsIterator{ .relocs = rels };
while (it.next()) |rel| {
const r_kind = relocation.decode(rel.r_type(), cpu_arch);
if (r_kind == .none) continue;
 
const symbol_index = switch (file_ptr) {
.zig_object => |x| x.symbol(rel.r_sym()),
.object => |x| x.symbols.items[rel.r_sym()],
else => unreachable,
};
const symbol = elf_file.symbol(symbol_index);
 
// Check for violation of One Definition Rule for COMDATs.
if (symbol.file(elf_file) == null) {
// TODO convert into an error
log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
file_ptr.fmtPath(),
self.name(elf_file),
symbol.name(elf_file),
});
continue;
}
 
// Report an undefined symbol.
if (try self.reportUndefined(elf_file, symbol, symbol_index, rel, undefs)) continue;
 
if (symbol.isIFunc(elf_file)) {
symbol.flags.needs_got = true;
symbol.flags.needs_plt = true;
}
 
// While traversing relocations, mark symbols that require special handling such as
// pointer indirection via GOT, or a stub trampoline via PLT.
switch (cpu_arch) {
.x86_64 => x86_64.scanReloc(self, elf_file, rel, symbol, code, &it) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
},
.aarch64 => aarch64.scanReloc(self, elf_file, rel, symbol, code, &it) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
},
.riscv64 => riscv.scanReloc(self, elf_file, rel, symbol, code, &it) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
},
else => return error.UnsupportedCpuArch,
}
}
if (has_reloc_errors) return error.RelocFailure;
}
 
fn scanReloc(
@@ -407,7 +458,7 @@ fn scanReloc(
rel: elf.Elf64_Rela,
action: RelocAction,
elf_file: *Elf,
) error{OutOfMemory}!void {
) RelocError!void {
const is_writeable = self.inputShdr(elf_file).sh_flags & elf.SHF_WRITE != 0;
const num_dynrelocs = switch (self.file(elf_file).?) {
.linker_defined => unreachable,
@@ -554,7 +605,7 @@ fn dataType(symbol: *const Symbol, elf_file: *Elf) u2 {
return 3;
}
 
fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) error{OutOfMemory}!void {
fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) RelocError!void {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: unhandled relocation type {} at offset 0x{x}", .{
relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
@@ -564,6 +615,7 @@ fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) er
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
});
return error.RelocFailure;
}
 
fn reportTextRelocError(
@@ -571,7 +623,7 @@ fn reportTextRelocError(
symbol: *const Symbol,
rel: elf.Elf64_Rela,
elf_file: *Elf,
) error{OutOfMemory}!void {
) RelocError!void {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
rel.r_offset,
@@ -581,6 +633,7 @@ fn reportTextRelocError(
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
});
return error.RelocFailure;
}
 
fn reportPicError(
@@ -588,7 +641,7 @@ fn reportPicError(
symbol: *const Symbol,
rel: elf.Elf64_Rela,
elf_file: *Elf,
) error{OutOfMemory}!void {
) RelocError!void {
var err = try elf_file.addErrorWithNotes(2);
try err.addMsg(elf_file, "relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
rel.r_offset,
@@ -599,6 +652,7 @@ fn reportPicError(
self.name(elf_file),
});
try err.addNote(elf_file, "recompile with -fPIC", .{});
return error.RelocFailure;
}
 
fn reportNoPicError(
@@ -606,7 +660,7 @@ fn reportNoPicError(
symbol: *const Symbol,
rel: elf.Elf64_Rela,
elf_file: *Elf,
) error{OutOfMemory}!void {
) RelocError!void {
var err = try elf_file.addErrorWithNotes(2);
try err.addMsg(elf_file, "relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
rel.r_offset,
@@ -617,6 +671,7 @@ fn reportNoPicError(
self.name(elf_file),
});
try err.addNote(elf_file, "recompile with -fno-PIC", .{});
return error.RelocFailure;
}
 
// This function will report any undefined non-weak symbols that are not imports.
@@ -627,7 +682,7 @@ fn reportUndefined(
sym_index: Symbol.Index,
rel: elf.Elf64_Rela,
undefs: anytype,
) !void {
) !bool {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const rel_esym = switch (self.file(elf_file).?) {
@@ -647,16 +702,95 @@ fn reportUndefined(
gop.value_ptr.* = std.ArrayList(Atom.Index).init(gpa);
}
try gop.value_ptr.append(self.atom_index);
return true;
}
 
return false;
}
 
pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) !void {
pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!void {
relocs_log.debug("0x{x}: {s}", .{ self.address(elf_file), self.name(elf_file) });
 
switch (elf_file.getTarget().cpu.arch) {
.x86_64 => try x86_64.resolveRelocsAlloc(self, elf_file, code),
else => return error.UnsupportedCpuArch,
const cpu_arch = elf_file.getTarget().cpu.arch;
const file_ptr = self.file(elf_file).?;
var stream = std.io.fixedBufferStream(code);
 
const rels = self.relocs(elf_file);
var it = RelocsIterator{ .relocs = rels };
var has_reloc_errors = false;
while (it.next()) |rel| {
const r_kind = relocation.decode(rel.r_type(), cpu_arch);
if (r_kind == .none) continue;
 
const target = switch (file_ptr) {
.zig_object => |x| elf_file.symbol(x.symbol(rel.r_sym())),
.object => |x| elf_file.symbol(x.symbols.items[rel.r_sym()]),
else => unreachable,
};
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
 
// We will use equation format to resolve relocations:
// https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
//
// Address of the source atom.
const P = @as(i64, @intCast(self.address(elf_file) + rel.r_offset));
// Addend from the relocation.
const A = rel.r_addend;
// Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
// Address of the global offset table.
const GOT = @as(i64, @intCast(elf_file.gotAddress()));
// Address of the .zig.got table entry if any.
const ZIG_GOT = @as(i64, @intCast(target.zigGotAddress(elf_file)));
// Relative offset to the start of the global offset table.
const G = @as(i64, @intCast(target.gotAddress(elf_file))) - GOT;
// // Address of the thread pointer.
const TP = @as(i64, @intCast(elf_file.tpAddress()));
// Address of the dynamic thread pointer.
const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
 
relocs_log.debug(" {s}: {x}: [{x} => {x}] G({x}) ZG({x}) ({s})", .{
relocation.fmtRelocType(rel.r_type(), cpu_arch),
r_offset,
P,
S + A,
G + GOT + A,
ZIG_GOT + A,
target.name(elf_file),
});
 
try stream.seekTo(r_offset);
 
const args = ResolveArgs{ P, A, S, GOT, G, TP, DTP, ZIG_GOT };
 
switch (cpu_arch) {
.x86_64 => x86_64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
error.RelocFailure,
error.RelaxFailure,
error.InvalidInstruction,
error.CannotEncode,
=> has_reloc_errors = true,
else => |e| return e,
},
.aarch64 => aarch64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
error.RelocFailure,
error.RelaxFailure,
error.UnexpectedRemainder,
error.DivisionByZero,
=> has_reloc_errors = true,
else => |e| return e,
},
.riscv64 => riscv.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
error.RelocFailure,
error.RelaxFailure,
=> has_reloc_errors = true,
else => |e| return e,
},
else => return error.UnsupportedCpuArch,
}
}
 
if (has_reloc_errors) return error.RelaxFailure;
}
 
fn resolveDynAbsReloc(
@@ -761,10 +895,83 @@ fn applyDynamicReloc(value: i64, elf_file: *Elf, writer: anytype) !void {
pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: anytype) !void {
relocs_log.debug("0x{x}: {s}", .{ self.address(elf_file), self.name(elf_file) });
 
switch (elf_file.getTarget().cpu.arch) {
.x86_64 => try x86_64.resolveRelocsNonAlloc(self, elf_file, code, undefs),
else => return error.UnsupportedCpuArch,
const cpu_arch = elf_file.getTarget().cpu.arch;
const file_ptr = self.file(elf_file).?;
var stream = std.io.fixedBufferStream(code);
 
const rels = self.relocs(elf_file);
var has_reloc_errors = false;
var it = RelocsIterator{ .relocs = rels };
while (it.next()) |rel| {
const r_kind = relocation.decode(rel.r_type(), cpu_arch);
if (r_kind == .none) continue;
 
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
 
const target_index = switch (file_ptr) {
.zig_object => |x| x.symbol(rel.r_sym()),
.object => |x| x.symbols.items[rel.r_sym()],
else => unreachable,
};
const target = elf_file.symbol(target_index);
 
// Check for violation of One Definition Rule for COMDATs.
if (target.file(elf_file) == null) {
// TODO convert into an error
log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
file_ptr.fmtPath(),
self.name(elf_file),
target.name(elf_file),
});
continue;
}
 
// Report an undefined symbol.
if (try self.reportUndefined(elf_file, target, target_index, rel, undefs)) continue;
 
// We will use equation format to resolve relocations:
// https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
//
const P = @as(i64, @intCast(self.address(elf_file) + rel.r_offset));
// Addend from the relocation.
const A = rel.r_addend;
// Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
// Address of the global offset table.
const GOT = @as(i64, @intCast(elf_file.gotAddress()));
// Address of the dynamic thread pointer.
const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
 
const args = ResolveArgs{ P, A, S, GOT, 0, 0, DTP, 0 };
 
relocs_log.debug(" {}: {x}: [{x} => {x}] ({s})", .{
relocation.fmtRelocType(rel.r_type(), cpu_arch),
rel.r_offset,
P,
S + A,
target.name(elf_file),
});
 
try stream.seekTo(r_offset);
 
switch (cpu_arch) {
.x86_64 => x86_64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
},
.aarch64 => aarch64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
},
.riscv64 => riscv.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
},
else => return error.UnsupportedCpuArch,
}
}
 
if (has_reloc_errors) return error.RelocFailure;
}
 
pub fn format(
@@ -831,429 +1038,313 @@ pub const Flags = packed struct {
};
 
const x86_64 = struct {
fn scanRelocs(atom: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype) !void {
fn scanReloc(
atom: Atom,
elf_file: *Elf,
rel: elf.Elf64_Rela,
symbol: *Symbol,
code: ?[]const u8,
it: *RelocsIterator,
) !void {
const is_static = elf_file.base.isStatic();
const is_dyn_lib = elf_file.base.isDynLib();
const file_ptr = atom.file(elf_file).?;
const rels = atom.relocs(elf_file);
var i: usize = 0;
while (i < rels.len) : (i += 1) {
const rel = rels[i];
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
 
if (r_type == .NONE) continue;
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
 
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
switch (r_type) {
.@"64" => {
try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
},
 
const symbol_index = switch (file_ptr) {
.zig_object => |x| x.symbol(rel.r_sym()),
.object => |x| x.symbols.items[rel.r_sym()],
else => unreachable,
};
const symbol = elf_file.symbol(symbol_index);
.@"32",
.@"32S",
=> {
try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file);
},
 
// Check for violation of One Definition Rule for COMDATs.
if (symbol.file(elf_file) == null) {
// TODO convert into an error
log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
file_ptr.fmtPath(),
atom.name(elf_file),
symbol.name(elf_file),
});
continue;
}
 
// Report an undefined symbol.
try atom.reportUndefined(elf_file, symbol, symbol_index, rel, undefs);
 
if (symbol.isIFunc(elf_file)) {
.GOT32,
.GOTPC32,
.GOTPC64,
.GOTPCREL,
.GOTPCREL64,
.GOTPCRELX,
.REX_GOTPCRELX,
=> {
symbol.flags.needs_got = true;
symbol.flags.needs_plt = true;
}
},
 
// While traversing relocations, mark symbols that require special handling such as
// pointer indirection via GOT, or a stub trampoline via PLT.
switch (r_type) {
.@"64" => {
try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
},
.PLT32,
.PLTOFF64,
=> {
if (symbol.flags.import) {
symbol.flags.needs_plt = true;
}
},
 
.@"32",
.@"32S",
.PC32 => {
try atom.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file);
},
 
.TLSGD => {
// TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
 
if (is_static or (!symbol.flags.import and !is_dyn_lib)) {
// Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
// We skip the next relocation.
it.skip(1);
} else if (!symbol.flags.import and is_dyn_lib) {
symbol.flags.needs_gottp = true;
it.skip(1);
} else {
symbol.flags.needs_tlsgd = true;
}
},
 
.TLSLD => {
// TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
 
if (is_static or !is_dyn_lib) {
// Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
// We skip the next relocation.
it.skip(1);
} else {
elf_file.got.flags.needs_tlsld = true;
}
},
 
.GOTTPOFF => {
const should_relax = blk: {
if (is_dyn_lib or symbol.flags.import) break :blk false;
if (!x86_64.canRelaxGotTpOff(code.?[r_offset - 3 ..])) break :blk false;
break :blk true;
};
if (!should_relax) {
symbol.flags.needs_gottp = true;
}
},
 
.GOTPC32_TLSDESC => {
const should_relax = is_static or (!is_dyn_lib and !symbol.flags.import);
if (!should_relax) {
symbol.flags.needs_tlsdesc = true;
}
},
 
.TPOFF32,
.TPOFF64,
=> {
if (is_dyn_lib) try atom.reportPicError(symbol, rel, elf_file);
},
 
.GOTOFF64,
.DTPOFF32,
.DTPOFF64,
.SIZE32,
.SIZE64,
.TLSDESC_CALL,
=> {},
 
else => |x| switch (@intFromEnum(x)) {
// Zig custom relocations
Elf.R_ZIG_GOT32,
Elf.R_ZIG_GOTPCREL,
=> {
try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file);
assert(symbol.flags.has_zig_got);
},
 
.GOT32,
.GOTPC32,
.GOTPC64,
.GOTPCREL,
.GOTPCREL64,
.GOTPCRELX,
.REX_GOTPCRELX,
=> {
symbol.flags.needs_got = true;
},
 
.PLT32,
.PLTOFF64,
=> {
if (symbol.flags.import) {
symbol.flags.needs_plt = true;
}
},
 
.PC32 => {
try atom.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file);
},
 
.TLSGD => {
// TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
 
if (is_static or (!symbol.flags.import and !is_dyn_lib)) {
// Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
// We skip the next relocation.
i += 1;
} else if (!symbol.flags.import and is_dyn_lib) {
symbol.flags.needs_gottp = true;
i += 1;
} else {
symbol.flags.needs_tlsgd = true;
}
},
 
.TLSLD => {
// TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
 
if (is_static or !is_dyn_lib) {
// Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
// We skip the next relocation.
i += 1;
} else {
elf_file.got.flags.needs_tlsld = true;
}
},
 
.GOTTPOFF => {
const should_relax = blk: {
if (is_dyn_lib or symbol.flags.import) break :blk false;
if (!x86_64.canRelaxGotTpOff(code.?[r_offset - 3 ..])) break :blk false;
break :blk true;
};
if (!should_relax) {
symbol.flags.needs_gottp = true;
}
},
 
.GOTPC32_TLSDESC => {
const should_relax = is_static or (!is_dyn_lib and !symbol.flags.import);
if (!should_relax) {
symbol.flags.needs_tlsdesc = true;
}
},
 
.TPOFF32,
.TPOFF64,
=> {
if (is_dyn_lib) try atom.reportPicError(symbol, rel, elf_file);
},
 
.GOTOFF64,
.DTPOFF32,
.DTPOFF64,
.SIZE32,
.SIZE64,
.TLSDESC_CALL,
=> {},
 
else => |x| switch (@intFromEnum(x)) {
// Zig custom relocations
Elf.R_ZIG_GOT32,
Elf.R_ZIG_GOTPCREL,
=> {
assert(symbol.flags.has_zig_got);
},
 
else => try atom.reportUnhandledRelocError(rel, elf_file),
},
}
}
}
 
fn resolveRelocsAlloc(atom: Atom, elf_file: *Elf, code: []u8) !void {
const file_ptr = atom.file(elf_file).?;
var stream = std.io.fixedBufferStream(code);
const cwriter = stream.writer();
 
const rels = atom.relocs(elf_file);
var i: usize = 0;
while (i < rels.len) : (i += 1) {
const rel = rels[i];
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
if (r_type == .NONE) continue;
 
const target = switch (file_ptr) {
.zig_object => |x| elf_file.symbol(x.symbol(rel.r_sym())),
.object => |x| elf_file.symbol(x.symbols.items[rel.r_sym()]),
else => unreachable,
};
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
 
// We will use equation format to resolve relocations:
// https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
//
// Address of the source atom.
const P = @as(i64, @intCast(atom.address(elf_file) + rel.r_offset));
// Addend from the relocation.
const A = rel.r_addend;
// Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
// Address of the global offset table.
const GOT = blk: {
const shndx = if (elf_file.got_plt_section_index) |shndx|
shndx
else if (elf_file.got_section_index) |shndx|
shndx
else
null;
break :blk if (shndx) |index| @as(i64, @intCast(elf_file.shdrs.items[index].sh_addr)) else 0;
};
// Address of the .zig.got table entry if any.
const ZIG_GOT = @as(i64, @intCast(target.zigGotAddress(elf_file)));
// Relative offset to the start of the global offset table.
const G = @as(i64, @intCast(target.gotAddress(elf_file))) - GOT;
// // Address of the thread pointer.
const TP = @as(i64, @intCast(elf_file.tpAddress()));
// Address of the dynamic thread pointer.
const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
 
relocs_log.debug(" {s}: {x}: [{x} => {x}] G({x}) ZG({x}) ({s})", .{
relocation.fmtRelocType(rel.r_type(), .x86_64),
r_offset,
P,
S + A,
G + GOT + A,
ZIG_GOT + A,
target.name(elf_file),
});
 
try stream.seekTo(r_offset);
 
switch (r_type) {
.NONE => unreachable,
 
.@"64" => {
try atom.resolveDynAbsReloc(
target,
rel,
dynAbsRelocAction(target, elf_file),
elf_file,
cwriter,
);
},
 
.PLT32,
.PC32,
=> try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
 
.GOTPCREL => try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little),
.GOTPC32 => try cwriter.writeInt(i32, @as(i32, @intCast(GOT + A - P)), .little),
.GOTPC64 => try cwriter.writeInt(i64, GOT + A - P, .little),
 
.GOTPCRELX => {
if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
x86_64.relaxGotpcrelx(code[r_offset - 2 ..]) catch break :blk;
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
continue;
}
try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
},
 
.REX_GOTPCRELX => {
if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
x86_64.relaxRexGotpcrelx(code[r_offset - 3 ..]) catch break :blk;
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
continue;
}
try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
},
 
.@"32" => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
.@"32S" => try cwriter.writeInt(i32, @as(i32, @truncate(S + A)), .little),
 
.TPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - TP)), .little),
.TPOFF64 => try cwriter.writeInt(i64, S + A - TP, .little),
 
.DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - DTP)), .little),
.DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
 
.TLSGD => {
if (target.flags.has_tlsgd) {
const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else if (target.flags.has_gottp) {
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
try x86_64.relaxTlsGdToIe(atom, rels[i .. i + 2], @intCast(S_ - P), elf_file, &stream);
i += 1;
} else {
try x86_64.relaxTlsGdToLe(
atom,
rels[i .. i + 2],
@as(i32, @intCast(S - TP)),
elf_file,
&stream,
);
i += 1;
}
},
 
.TLSLD => {
if (elf_file.got.tlsld_index) |entry_index| {
const tlsld_entry = elf_file.got.entries.items[entry_index];
const S_ = @as(i64, @intCast(tlsld_entry.address(elf_file)));
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else {
try x86_64.relaxTlsLdToLe(
atom,
rels[i .. i + 2],
@as(i32, @intCast(TP - @as(i64, @intCast(elf_file.tlsAddress())))),
elf_file,
&stream,
);
i += 1;
}
},
 
.GOTPC32_TLSDESC => {
if (target.flags.has_tlsdesc) {
const S_ = @as(i64, @intCast(target.tlsDescAddress(elf_file)));
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else {
try x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..]);
try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
}
},
 
.TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
// call -> nop
try cwriter.writeAll(&.{ 0x66, 0x90 });
},
 
.GOTTPOFF => {
if (target.flags.has_gottp) {
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else {
x86_64.relaxGotTpOff(code[r_offset - 3 ..]) catch unreachable;
try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
}
},
 
.GOT32 => try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A)), .little),
 
else => |x| switch (@intFromEnum(x)) {
// Zig custom relocations
Elf.R_ZIG_GOT32 => try cwriter.writeInt(u32, @as(u32, @intCast(ZIG_GOT + A)), .little),
Elf.R_ZIG_GOTPCREL => try cwriter.writeInt(i32, @as(i32, @intCast(ZIG_GOT + A - P)), .little),
 
else => {},
},
}
}
}
 
fn resolveRelocsNonAlloc(atom: Atom, elf_file: *Elf, code: []u8, undefs: anytype) !void {
const file_ptr = atom.file(elf_file).?;
var stream = std.io.fixedBufferStream(code);
const cwriter = stream.writer();
 
const rels = atom.relocs(elf_file);
var i: usize = 0;
while (i < rels.len) : (i += 1) {
const rel = rels[i];
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
if (r_type == .NONE) continue;
 
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
 
const target_index = switch (file_ptr) {
.zig_object => |x| x.symbol(rel.r_sym()),
.object => |x| x.symbols.items[rel.r_sym()],
else => unreachable,
};
const target = elf_file.symbol(target_index);
 
// Check for violation of One Definition Rule for COMDATs.
if (target.file(elf_file) == null) {
// TODO convert into an error
log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
file_ptr.fmtPath(),
atom.name(elf_file),
target.name(elf_file),
});
continue;
}
 
// Report an undefined symbol.
try atom.reportUndefined(elf_file, target, target_index, rel, undefs);
 
// We will use equation format to resolve relocations:
// https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
//
const P = @as(i64, @intCast(atom.address(elf_file) + rel.r_offset));
// Addend from the relocation.
const A = rel.r_addend;
// Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
// Address of the global offset table.
const GOT = blk: {
const shndx = if (elf_file.got_plt_section_index) |shndx|
shndx
else if (elf_file.got_section_index) |shndx|
shndx
else
null;
break :blk if (shndx) |index| @as(i64, @intCast(elf_file.shdrs.items[index].sh_addr)) else 0;
};
// Address of the dynamic thread pointer.
const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
 
relocs_log.debug(" {}: {x}: [{x} => {x}] ({s})", .{
relocation.fmtRelocType(rel.r_type(), .x86_64),
rel.r_offset,
P,
S + A,
target.name(elf_file),
});
 
try stream.seekTo(r_offset);
 
switch (r_type) {
.NONE => unreachable,
.@"8" => try cwriter.writeInt(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A)))), .little),
.@"16" => try cwriter.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
.@"32" => try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
.@"32S" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
.@"64" => try cwriter.writeInt(i64, S + A, .little),
.DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
.DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
.GOTOFF64 => try cwriter.writeInt(i64, S + A - GOT, .little),
.GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little),
.SIZE32 => {
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(size + A)))), .little);
},
.SIZE64 => {
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
try cwriter.writeInt(i64, @as(i64, @intCast(size + A)), .little);
},
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
},
}
}
 
fn resolveRelocAlloc(
atom: Atom,
elf_file: *Elf,
rel: elf.Elf64_Rela,
target: *const Symbol,
args: ResolveArgs,
it: *RelocsIterator,
code: []u8,
stream: anytype,
) (error{ InvalidInstruction, CannotEncode } || RelocError)!void {
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
 
const cwriter = stream.writer();
 
const P, const A, const S, const GOT, const G, const TP, const DTP, const ZIG_GOT = args;
 
switch (r_type) {
.NONE => unreachable,
 
.@"64" => {
try atom.resolveDynAbsReloc(
target,
rel,
dynAbsRelocAction(target, elf_file),
elf_file,
cwriter,
);
},
 
.PLT32,
.PC32,
=> try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
 
.GOTPCREL => try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little),
.GOTPC32 => try cwriter.writeInt(i32, @as(i32, @intCast(GOT + A - P)), .little),
.GOTPC64 => try cwriter.writeInt(i64, GOT + A - P, .little),
 
.GOTPCRELX => {
if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
x86_64.relaxGotpcrelx(code[r_offset - 2 ..]) catch break :blk;
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
return;
}
try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
},
 
.REX_GOTPCRELX => {
if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
x86_64.relaxRexGotpcrelx(code[r_offset - 3 ..]) catch break :blk;
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
return;
}
try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
},
 
.@"32" => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
.@"32S" => try cwriter.writeInt(i32, @as(i32, @truncate(S + A)), .little),
 
.TPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - TP)), .little),
.TPOFF64 => try cwriter.writeInt(i64, S + A - TP, .little),
 
.DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - DTP)), .little),
.DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
 
.TLSGD => {
if (target.flags.has_tlsgd) {
const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else if (target.flags.has_gottp) {
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
try x86_64.relaxTlsGdToIe(atom, &.{ rel, it.next().? }, @intCast(S_ - P), elf_file, stream);
} else {
try x86_64.relaxTlsGdToLe(
atom,
&.{ rel, it.next().? },
@as(i32, @intCast(S - TP)),
elf_file,
stream,
);
}
},
 
.TLSLD => {
if (elf_file.got.tlsld_index) |entry_index| {
const tlsld_entry = elf_file.got.entries.items[entry_index];
const S_ = @as(i64, @intCast(tlsld_entry.address(elf_file)));
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else {
try x86_64.relaxTlsLdToLe(
atom,
&.{ rel, it.next().? },
@as(i32, @intCast(TP - @as(i64, @intCast(elf_file.tlsAddress())))),
elf_file,
stream,
);
}
},
 
.GOTPC32_TLSDESC => {
if (target.flags.has_tlsdesc) {
const S_ = @as(i64, @intCast(target.tlsDescAddress(elf_file)));
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else {
x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..]) catch {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "could not relax {s}", .{@tagName(r_type)});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
atom.file(elf_file).?.fmtPath(),
atom.name(elf_file),
rel.r_offset,
});
return error.RelaxFailure;
};
try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
}
},
 
.TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
// call -> nop
try cwriter.writeAll(&.{ 0x66, 0x90 });
},
 
.GOTTPOFF => {
if (target.flags.has_gottp) {
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else {
x86_64.relaxGotTpOff(code[r_offset - 3 ..]);
try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
}
},
 
.GOT32 => try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A)), .little),
 
else => |x| switch (@intFromEnum(x)) {
// Zig custom relocations
Elf.R_ZIG_GOT32 => try cwriter.writeInt(u32, @as(u32, @intCast(ZIG_GOT + A)), .little),
Elf.R_ZIG_GOTPCREL => try cwriter.writeInt(i32, @as(i32, @intCast(ZIG_GOT + A - P)), .little),
 
else => try atom.reportUnhandledRelocError(rel, elf_file),
},
}
}
 
fn resolveRelocNonAlloc(
atom: Atom,
elf_file: *Elf,
rel: elf.Elf64_Rela,
target: *const Symbol,
args: ResolveArgs,
it: *RelocsIterator,
code: []u8,
stream: anytype,
) !void {
_ = code;
_ = it;
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
const cwriter = stream.writer();
 
_, const A, const S, const GOT, _, _, const DTP, _ = args;
 
switch (r_type) {
.NONE => unreachable,
.@"8" => try cwriter.writeInt(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A)))), .little),
.@"16" => try cwriter.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
.@"32" => try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
.@"32S" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
.@"64" => try cwriter.writeInt(i64, S + A, .little),
.DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
.DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
.GOTOFF64 => try cwriter.writeInt(i64, S + A - GOT, .little),
.GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little),
.SIZE32 => {
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(size + A)))), .little);
},
.SIZE64 => {
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
try cwriter.writeInt(i64, @as(i64, @intCast(size + A)), .little);
},
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
}
 
fn relaxGotpcrelx(code: []u8) !void {
const old_inst = disassemble(code) orelse return error.RelaxFail;
const old_inst = disassemble(code) orelse return error.RelaxFailure;
const inst = switch (old_inst.encoding.mnemonic) {
.call => try Instruction.new(old_inst.prefix, .call, &.{
// TODO: hack to force imm32s in the assembler
@@ -1263,28 +1354,28 @@ const x86_64 = struct {
// TODO: hack to force imm32s in the assembler
.{ .imm = Immediate.s(-129) },
}),
else => return error.RelaxFail,
else => return error.RelaxFailure,
};
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
const nop = try Instruction.new(.none, .nop, &.{});
encode(&.{ nop, inst }, code) catch return error.RelaxFail;
try encode(&.{ nop, inst }, code);
}
 
fn relaxRexGotpcrelx(code: []u8) !void {
const old_inst = disassemble(code) orelse return error.RelaxFail;
const old_inst = disassemble(code) orelse return error.RelaxFailure;
switch (old_inst.encoding.mnemonic) {
.mov => {
const inst = try Instruction.new(old_inst.prefix, .lea, &old_inst.ops);
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
encode(&.{inst}, code) catch return error.RelaxFail;
try encode(&.{inst}, code);
},
else => return error.RelaxFail,
else => return error.RelaxFailure,
}
}
 
fn relaxTlsGdToIe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,
rels: []const elf.Elf64_Rela,
value: i32,
elf_file: *Elf,
stream: anytype,
@@ -1307,7 +1398,7 @@ const x86_64 = struct {
 
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: rewrite {} when followed by {}", .{
try err.addMsg(elf_file, "TODO: rewrite {} when followed by {}", .{
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
});
@@ -1316,13 +1407,14 @@ const x86_64 = struct {
self.name(elf_file),
rels[0].r_offset,
});
return error.RelaxFailure;
},
}
}
 
fn relaxTlsLdToLe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,
rels: []const elf.Elf64_Rela,
value: i32,
elf_file: *Elf,
stream: anytype,
@@ -1360,7 +1452,7 @@ const x86_64 = struct {
 
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: rewrite {} when followed by {}", .{
try err.addMsg(elf_file, "TODO: rewrite {} when followed by {}", .{
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
});
@@ -1369,6 +1461,7 @@ const x86_64 = struct {
self.name(elf_file),
rels[0].r_offset,
});
return error.RelaxFailure;
},
}
}
@@ -1388,24 +1481,24 @@ const x86_64 = struct {
}
}
 
fn relaxGotTpOff(code: []u8) !void {
const old_inst = disassemble(code) orelse return error.RelaxFail;
fn relaxGotTpOff(code: []u8) void {
const old_inst = disassemble(code) orelse unreachable;
switch (old_inst.encoding.mnemonic) {
.mov => {
const inst = try Instruction.new(old_inst.prefix, .mov, &.{
const inst = Instruction.new(old_inst.prefix, .mov, &.{
old_inst.ops[0],
// TODO: hack to force imm32s in the assembler
.{ .imm = Immediate.s(-129) },
});
}) catch unreachable;
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
encode(&.{inst}, code) catch return error.RelaxFail;
encode(&.{inst}, code) catch unreachable;
},
else => return error.RelaxFail,
else => unreachable,
}
}
 
fn relaxGotPcTlsDesc(code: []u8) !void {
const old_inst = disassemble(code) orelse return error.RelaxFail;
const old_inst = disassemble(code) orelse return error.RelaxFailure;
switch (old_inst.encoding.mnemonic) {
.lea => {
const inst = try Instruction.new(old_inst.prefix, .mov, &.{
@@ -1414,15 +1507,15 @@ const x86_64 = struct {
.{ .imm = Immediate.s(-129) },
});
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
encode(&.{inst}, code) catch return error.RelaxFail;
try encode(&.{inst}, code);
},
else => return error.RelaxFail,
else => return error.RelaxFailure,
}
}
 
fn relaxTlsGdToLe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,
rels: []const elf.Elf64_Rela,
value: i32,
elf_file: *Elf,
stream: anytype,
@@ -1460,6 +1553,7 @@ const x86_64 = struct {
self.name(elf_file),
rels[0].r_offset,
});
return error.RelaxFailure;
},
}
}
@@ -1485,15 +1579,446 @@ const x86_64 = struct {
const Instruction = encoder.Instruction;
};
 
const aarch64 = struct {
fn scanReloc(
atom: Atom,
elf_file: *Elf,
rel: elf.Elf64_Rela,
symbol: *Symbol,
code: ?[]const u8,
it: *RelocsIterator,
) !void {
_ = code;
_ = it;
 
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
switch (r_type) {
.ABS64 => {
try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
},
 
.ADR_PREL_PG_HI21 => {
try atom.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file);
},
 
.ADR_GOT_PAGE => {
// TODO: relax if possible
symbol.flags.needs_got = true;
},
 
.LD64_GOT_LO12_NC,
.LD64_GOTPAGE_LO15,
=> {
symbol.flags.needs_got = true;
},
 
.CALL26,
.JUMP26,
=> {
if (symbol.flags.import) {
symbol.flags.needs_plt = true;
}
},
 
.ADD_ABS_LO12_NC,
.ADR_PREL_LO21,
.LDST8_ABS_LO12_NC,
.LDST16_ABS_LO12_NC,
.LDST32_ABS_LO12_NC,
.LDST64_ABS_LO12_NC,
.LDST128_ABS_LO12_NC,
=> {},
 
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
}
 
fn resolveRelocAlloc(
atom: Atom,
elf_file: *Elf,
rel: elf.Elf64_Rela,
target: *const Symbol,
args: ResolveArgs,
it: *RelocsIterator,
code: []u8,
stream: anytype,
) (error{ UnexpectedRemainder, DivisionByZero } || RelocError)!void {
_ = it;
 
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
const cwriter = stream.writer();
 
const P, const A, const S, const GOT, const G, const TP, const DTP, const ZIG_GOT = args;
_ = TP;
_ = DTP;
_ = ZIG_GOT;
 
switch (r_type) {
.NONE => unreachable,
.ABS64 => {
try atom.resolveDynAbsReloc(
target,
rel,
dynAbsRelocAction(target, elf_file),
elf_file,
cwriter,
);
},
 
.CALL26,
.JUMP26,
=> {
// TODO: add thunk support
const disp: i28 = math.cast(i28, S + A - P) orelse {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "TODO: branch relocation target ({s}) exceeds max jump distance", .{
target.name(elf_file),
});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
atom.file(elf_file).?.fmtPath(),
atom.name(elf_file),
r_offset,
});
return;
};
try aarch64_util.writeBranchImm(disp, code[r_offset..][0..4]);
},
 
.ADR_PREL_PG_HI21 => {
// TODO: check for relaxation of ADRP+ADD
const saddr = @as(u64, @intCast(P));
const taddr = @as(u64, @intCast(S + A));
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr)));
try aarch64_util.writePages(pages, code[r_offset..][0..4]);
},
 
.ADR_GOT_PAGE => if (target.flags.has_got) {
const saddr = @as(u64, @intCast(P));
const taddr = @as(u64, @intCast(G + GOT + A));
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr)));
try aarch64_util.writePages(pages, code[r_offset..][0..4]);
} else {
// TODO: relax
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "TODO: relax ADR_GOT_PAGE", .{});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
atom.file(elf_file).?.fmtPath(),
atom.name(elf_file),
r_offset,
});
},
 
.LD64_GOT_LO12_NC => {
assert(target.flags.has_got);
const taddr = @as(u64, @intCast(G + GOT + A));
try aarch64_util.writePageOffset(.load_store_64, taddr, code[r_offset..][0..4]);
},
 
.ADD_ABS_LO12_NC,
.LDST8_ABS_LO12_NC,
.LDST16_ABS_LO12_NC,
.LDST32_ABS_LO12_NC,
.LDST64_ABS_LO12_NC,
.LDST128_ABS_LO12_NC,
=> {
// TODO: NC means no overflow check
const taddr = @as(u64, @intCast(S + A));
const kind: aarch64_util.PageOffsetInstKind = switch (r_type) {
.ADD_ABS_LO12_NC => .arithmetic,
.LDST8_ABS_LO12_NC => .load_store_8,
.LDST16_ABS_LO12_NC => .load_store_16,
.LDST32_ABS_LO12_NC => .load_store_32,
.LDST64_ABS_LO12_NC => .load_store_64,
.LDST128_ABS_LO12_NC => .load_store_128,
else => unreachable,
};
try aarch64_util.writePageOffset(kind, taddr, code[r_offset..][0..4]);
},
 
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
}
 
fn resolveRelocNonAlloc(
atom: Atom,
elf_file: *Elf,
rel: elf.Elf64_Rela,
target: *const Symbol,
args: ResolveArgs,
it: *RelocsIterator,
code: []u8,
stream: anytype,
) !void {
_ = it;
_ = code;
_ = target;
 
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
const cwriter = stream.writer();
 
_, const A, const S, _, _, _, _, _ = args;
 
switch (r_type) {
.NONE => unreachable,
.ABS32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
.ABS64 => try cwriter.writeInt(i64, S + A, .little),
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
}
 
const aarch64_util = @import("../aarch64.zig");
};
 
const riscv = struct {
fn scanReloc(
atom: Atom,
elf_file: *Elf,
rel: elf.Elf64_Rela,
symbol: *Symbol,
code: ?[]const u8,
it: *RelocsIterator,
) !void {
_ = code;
_ = it;
 
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
 
switch (r_type) {
.@"64" => {
try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
},
 
.HI20 => {
try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file);
},
 
.CALL_PLT => if (symbol.flags.import) {
symbol.flags.needs_plt = true;
},
 
.GOT_HI20 => {
symbol.flags.needs_got = true;
},
 
.PCREL_HI20,
.PCREL_LO12_I,
.PCREL_LO12_S,
.LO12_I,
.ADD32,
.SUB32,
=> {},
 
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
}
 
fn resolveRelocAlloc(
atom: Atom,
elf_file: *Elf,
rel: elf.Elf64_Rela,
target: *const Symbol,
args: ResolveArgs,
it: *RelocsIterator,
code: []u8,
stream: anytype,
) !void {
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
const cwriter = stream.writer();
 
const P, const A, const S, const GOT, const G, const TP, const DTP, const ZIG_GOT = args;
_ = TP;
_ = DTP;
_ = ZIG_GOT;
 
switch (r_type) {
.NONE => unreachable,
 
.@"64" => {
try atom.resolveDynAbsReloc(
target,
rel,
dynAbsRelocAction(target, elf_file),
elf_file,
cwriter,
);
},
 
.ADD32 => riscv_util.writeAddend(i32, .add, code[r_offset..][0..4], S + A),
.SUB32 => riscv_util.writeAddend(i32, .sub, code[r_offset..][0..4], S + A),
 
.HI20 => {
const value: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow);
riscv_util.writeInstU(code[r_offset..][0..4], value);
},
 
.LO12_I => {
const value: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow);
riscv_util.writeInstI(code[r_offset..][0..4], value);
},
 
.GOT_HI20 => {
assert(target.flags.has_got);
const disp: u32 = @bitCast(math.cast(i32, G + GOT + A - P) orelse return error.Overflow);
riscv_util.writeInstU(code[r_offset..][0..4], disp);
},
 
.CALL_PLT => {
// TODO: relax
const disp: u32 = @bitCast(math.cast(i32, S + A - P) orelse return error.Overflow);
riscv_util.writeInstU(code[r_offset..][0..4], disp); // auipc
riscv_util.writeInstI(code[r_offset + 4 ..][0..4], disp); // jalr
},
 
.PCREL_HI20 => {
const disp: u32 = @bitCast(math.cast(i32, S + A - P) orelse return error.Overflow);
riscv_util.writeInstU(code[r_offset..][0..4], disp);
},
 
.PCREL_LO12_I,
.PCREL_LO12_S,
=> {
assert(A == 0); // according to the spec
// We need to find the paired reloc for this relocation.
const file_ptr = atom.file(elf_file).?;
const atom_addr = atom.address(elf_file);
const pos = it.pos;
const pair = while (it.prev()) |pair| {
if (S == atom_addr + pair.r_offset) break pair;
} else {
// TODO: implement searching forward
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "TODO: find HI20 paired reloc scanning forward", .{});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
atom.file(elf_file).?.fmtPath(),
atom.name(elf_file),
rel.r_offset,
});
return error.RelocFailure;
};
it.pos = pos;
const target_ = switch (file_ptr) {
.zig_object => |x| elf_file.symbol(x.symbol(pair.r_sym())),
.object => |x| elf_file.symbol(x.symbols.items[pair.r_sym()]),
else => unreachable,
};
const S_ = @as(i64, @intCast(target_.address(.{}, elf_file)));
const A_ = pair.r_addend;
const P_ = @as(i64, @intCast(atom_addr + pair.r_offset));
const G_ = @as(i64, @intCast(target_.gotAddress(elf_file))) - GOT;
const disp = switch (@as(elf.R_RISCV, @enumFromInt(pair.r_type()))) {
.PCREL_HI20 => math.cast(i32, S_ + A_ - P_) orelse return error.Overflow,
.GOT_HI20 => math.cast(i32, G_ + GOT + A_ - P_) orelse return error.Overflow,
else => unreachable,
};
relocs_log.debug(" [{x} => {x}]", .{ P_, disp + P_ });
switch (r_type) {
.PCREL_LO12_I => riscv_util.writeInstI(code[r_offset..][0..4], @bitCast(disp)),
.PCREL_LO12_S => riscv_util.writeInstS(code[r_offset..][0..4], @bitCast(disp)),
else => unreachable,
}
},
 
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
}
 
fn resolveRelocNonAlloc(
atom: Atom,
elf_file: *Elf,
rel: elf.Elf64_Rela,
target: *const Symbol,
args: ResolveArgs,
it: *RelocsIterator,
code: []u8,
stream: anytype,
) !void {
_ = target;
_ = it;
 
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
const cwriter = stream.writer();
 
_, const A, const S, const GOT, _, _, const DTP, _ = args;
_ = GOT;
_ = DTP;
 
switch (r_type) {
.NONE => unreachable,
 
.@"32" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
.@"64" => try cwriter.writeInt(i64, S + A, .little),
 
.ADD8 => riscv_util.writeAddend(i8, .add, code[r_offset..][0..1], S + A),
.SUB8 => riscv_util.writeAddend(i8, .sub, code[r_offset..][0..1], S + A),
.ADD16 => riscv_util.writeAddend(i16, .add, code[r_offset..][0..2], S + A),
.SUB16 => riscv_util.writeAddend(i16, .sub, code[r_offset..][0..2], S + A),
.ADD32 => riscv_util.writeAddend(i32, .add, code[r_offset..][0..4], S + A),
.SUB32 => riscv_util.writeAddend(i32, .sub, code[r_offset..][0..4], S + A),
.ADD64 => riscv_util.writeAddend(i64, .add, code[r_offset..][0..8], S + A),
.SUB64 => riscv_util.writeAddend(i64, .sub, code[r_offset..][0..8], S + A),
 
.SET8 => mem.writeInt(i8, code[r_offset..][0..1], @as(i8, @truncate(S + A)), .little),
.SET16 => mem.writeInt(i16, code[r_offset..][0..2], @as(i16, @truncate(S + A)), .little),
.SET32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A)), .little),
 
.SET6 => riscv_util.writeSetSub6(.set, code[r_offset..][0..1], S + A),
.SUB6 => riscv_util.writeSetSub6(.sub, code[r_offset..][0..1], S + A),
 
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
}
 
const riscv_util = @import("../riscv.zig");
};
 
const ResolveArgs = struct { i64, i64, i64, i64, i64, i64, i64, i64 };
 
const RelocError = error{
Overflow,
OutOfMemory,
NoSpaceLeft,
RelocFailure,
RelaxFailure,
UnsupportedCpuArch,
};
 
const RelocsIterator = struct {
relocs: []const elf.Elf64_Rela,
pos: i64 = -1,
 
fn next(it: *RelocsIterator) ?elf.Elf64_Rela {
it.pos += 1;
if (it.pos >= it.relocs.len) return null;
return it.relocs[@intCast(it.pos)];
}
 
fn prev(it: *RelocsIterator) ?elf.Elf64_Rela {
if (it.pos == -1) return null;
const rel = it.relocs[@intCast(it.pos)];
it.pos -= 1;
return rel;
}
 
fn skip(it: *RelocsIterator, num: usize) void {
assert(num > 0);
it.pos += @intCast(num);
}
};
 
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
const eh_frame = @import("eh_frame.zig");
const log = std.log.scoped(.link);
const math = std.math;
const mem = std.mem;
const relocs_log = std.log.scoped(.link_relocs);
const relocation = @import("relocation.zig");
 
const Allocator = std.mem.Allocator;
const Allocator = mem.Allocator;
const Atom = @This();
const Elf = @import("../Elf.zig");
const Fde = eh_frame.Fde;
 
src/link/Elf/Object.zig added: 1425, removed: 683, total 742
@@ -245,6 +245,9 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
atom.rel_index = @intCast(self.relocs.items.len);
atom.rel_num = @intCast(relocs.len);
try self.relocs.appendUnalignedSlice(allocator, relocs);
if (elf_file.getTarget().cpu.arch == .riscv64) {
sortRelocs(self.relocs.items[atom.rel_index..][0..atom.rel_num]);
}
}
},
else => {},
@@ -333,6 +336,7 @@ fn skipShdr(self: *Object, index: u32, elf_file: *Elf) bool {
if (mem.startsWith(u8, name, ".note")) break :blk true;
if (mem.startsWith(u8, name, ".comment")) break :blk true;
if (mem.startsWith(u8, name, ".llvm_addrsig")) break :blk true;
if (mem.startsWith(u8, name, ".riscv.attributes")) break :blk true; // TODO: riscv attributes
if (comp.config.debug_format == .strip and shdr.sh_flags & elf.SHF_ALLOC == 0 and
mem.startsWith(u8, name, ".debug")) break :blk true;
break :blk false;
@@ -381,12 +385,15 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
defer allocator.free(relocs);
const rel_start = @as(u32, @intCast(self.relocs.items.len));
try self.relocs.appendUnalignedSlice(allocator, relocs);
if (elf_file.getTarget().cpu.arch == .riscv64) {
sortRelocs(self.relocs.items[rel_start..][0..relocs.len]);
}
const fdes_start = self.fdes.items.len;
const cies_start = self.cies.items.len;
 
var it = eh_frame.Iterator{ .data = raw };
while (try it.next()) |rec| {
const rel_range = filterRelocs(relocs, rec.offset, rec.size + 4);
const rel_range = filterRelocs(self.relocs.items[rel_start..][0..relocs.len], rec.offset, rec.size + 4);
switch (rec.tag) {
.cie => try self.cies.append(allocator, .{
.offset = data_start + rec.offset,
@@ -449,8 +456,18 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
}
}
 
fn sortRelocs(relocs: []elf.Elf64_Rela) void {
const sortFn = struct {
fn lessThan(c: void, lhs: elf.Elf64_Rela, rhs: elf.Elf64_Rela) bool {
_ = c;
return lhs.r_offset < rhs.r_offset;
}
}.lessThan;
mem.sort(elf.Elf64_Rela, relocs, {}, sortFn);
}
 
fn filterRelocs(
relocs: []align(1) const elf.Elf64_Rela,
relocs: []const elf.Elf64_Rela,
start: u64,
len: u64,
) struct { start: u64, len: u64 } {
@@ -832,7 +849,8 @@ pub fn updateSymtabSize(self: *Object, elf_file: *Elf) !void {
if (local.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
const esym = local.elfSym(elf_file);
switch (esym.st_type()) {
elf.STT_SECTION, elf.STT_NOTYPE => continue,
elf.STT_SECTION => continue,
elf.STT_NOTYPE => if (esym.st_shndx == elf.SHN_UNDEF) continue,
else => {},
}
local.flags.output_symtab = true;
 
src/link/Elf/eh_frame.zig added: 1425, removed: 683, total 742
@@ -317,7 +317,9 @@ fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file:
});
 
switch (cpu_arch) {
.x86_64 => x86_64.resolveReloc(rel, P, S + A, contents[offset..]),
.x86_64 => try x86_64.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
.aarch64 => try aarch64.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
.riscv64 => try riscv.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
else => return error.UnsupportedCpuArch,
}
}
@@ -325,6 +327,8 @@ fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file:
pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
relocs_log.debug("{x}: .eh_frame", .{elf_file.shdrs.items[elf_file.eh_frame_section_index.?].sh_addr});
 
var has_reloc_errors = false;
 
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
 
@@ -335,7 +339,10 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
 
for (cie.relocs(elf_file)) |rel| {
const sym = elf_file.symbol(object.symbols.items[rel.r_sym()]);
try resolveReloc(cie, sym, rel, elf_file, contents);
resolveReloc(cie, sym, rel, elf_file, contents) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
};
}
 
try writer.writeAll(contents);
@@ -359,7 +366,10 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
 
for (fde.relocs(elf_file)) |rel| {
const sym = elf_file.symbol(object.symbols.items[rel.r_sym()]);
try resolveReloc(fde, sym, rel, elf_file, contents);
resolveReloc(fde, sym, rel, elf_file, contents) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
};
}
 
try writer.writeAll(contents);
@@ -367,6 +377,8 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
}
 
try writer.writeInt(u32, 0, .little);
 
if (has_reloc_errors) return error.RelocFailure;
}
 
pub fn writeEhFrameObject(elf_file: *Elf, writer: anytype) !void {
@@ -540,18 +552,53 @@ const EH_PE = struct {
};
 
const x86_64 = struct {
fn resolveReloc(rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) void {
fn resolveReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) !void {
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
switch (r_type) {
.NONE => {},
.@"32" => std.mem.writeInt(i32, data[0..4], @as(i32, @truncate(target)), .little),
.@"64" => std.mem.writeInt(i64, data[0..8], target, .little),
.PC32 => std.mem.writeInt(i32, data[0..4], @as(i32, @intCast(target - source)), .little),
.PC64 => std.mem.writeInt(i64, data[0..8], target - source, .little),
else => unreachable,
else => try reportInvalidReloc(rec, elf_file, rel),
}
}
};
 
const aarch64 = struct {
fn resolveReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) !void {
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
switch (r_type) {
.NONE => {},
.ABS64 => std.mem.writeInt(i64, data[0..8], target, .little),
.PREL32 => std.mem.writeInt(i32, data[0..4], @as(i32, @intCast(target - source)), .little),
.PREL64 => std.mem.writeInt(i64, data[0..8], target - source, .little),
else => try reportInvalidReloc(rec, elf_file, rel),
}
}
};
 
const riscv = struct {
fn resolveReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) !void {
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
switch (r_type) {
.NONE => {},
.@"32_PCREL" => std.mem.writeInt(i32, data[0..4], @as(i32, @intCast(target - source)), .little),
else => try reportInvalidReloc(rec, elf_file, rel),
}
}
};
 
fn reportInvalidReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela) !void {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "invalid relocation type {} at offset 0x{x}", .{
relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
rel.r_offset,
});
try err.addNote(elf_file, "in {}:.eh_frame", .{elf_file.file(rec.file_index).?.fmtPath()});
return error.RelocFailure;
}
 
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
 
src/link/Elf/relocation.zig added: 1425, removed: 683, total 742
@@ -1,4 +1,6 @@
pub const Kind = enum {
none,
other,
abs,
copy,
rel,
@@ -13,23 +15,24 @@ pub const Kind = enum {
 
fn Table(comptime len: comptime_int, comptime RelType: type, comptime mapping: [len]struct { Kind, RelType }) type {
return struct {
fn decode(r_type: u32) ?Kind {
fn decode(r_type: u32) Kind {
inline for (mapping) |entry| {
if (@intFromEnum(entry[1]) == r_type) return entry[0];
}
return null;
return .other;
}
 
fn encode(comptime kind: Kind) u32 {
inline for (mapping) |entry| {
if (entry[0] == kind) return @intFromEnum(entry[1]);
}
unreachable;
@panic("encoding .other is ambiguous");
}
};
}
 
const x86_64_relocs = Table(10, elf.R_X86_64, .{
const x86_64_relocs = Table(11, elf.R_X86_64, .{
.{ .none, .NONE },
.{ .abs, .@"64" },
.{ .copy, .COPY },
.{ .rel, .RELATIVE },
@@ -42,7 +45,8 @@ const x86_64_relocs = Table(10, elf.R_X86_64, .{
.{ .tlsdesc, .TLSDESC },
});
 
const aarch64_relocs = Table(10, elf.R_AARCH64, .{
const aarch64_relocs = Table(11, elf.R_AARCH64, .{
.{ .none, .NONE },
.{ .abs, .ABS64 },
.{ .copy, .COPY },
.{ .rel, .RELATIVE },
@@ -55,7 +59,8 @@ const aarch64_relocs = Table(10, elf.R_AARCH64, .{
.{ .tlsdesc, .TLSDESC },
});
 
const riscv64_relocs = Table(10, elf.R_RISCV, .{
const riscv64_relocs = Table(11, elf.R_RISCV, .{
.{ .none, .NONE },
.{ .abs, .@"64" },
.{ .copy, .COPY },
.{ .rel, .RELATIVE },
 
src/link/MachO/Atom.zig added: 1425, removed: 683, total 742
@@ -699,14 +699,7 @@ fn resolveRelocInner(
const S_: i64 = @intCast(thunk.getTargetAddress(rel.target, macho_file));
break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
};
var inst = aarch64.Instruction{
.unconditional_branch_immediate = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.unconditional_branch_immediate,
), code[rel_offset..][0..4]),
};
inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(disp >> 2))));
try writer.writeInt(u32, inst.toU32(), .little);
try aarch64.writeBranchImm(disp, code[rel_offset..][0..4]);
},
else => unreachable,
}
@@ -776,16 +769,8 @@ fn resolveRelocInner(
};
break :target math.cast(u64, target) orelse return error.Overflow;
};
const pages = @as(u21, @bitCast(try Relocation.calcNumberOfPages(source, target)));
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.pc_relative_address,
), code[rel_offset..][0..4]),
};
inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
try writer.writeInt(u32, inst.toU32(), .little);
const pages = @as(u21, @bitCast(try aarch64.calcNumberOfPages(source, target)));
try aarch64.writePages(pages, code[rel_offset..][0..4]);
},
 
.pageoff => {
@@ -794,35 +779,8 @@ fn resolveRelocInner(
assert(!rel.meta.pcrel);
const target = math.cast(u64, S + A) orelse return error.Overflow;
const inst_code = code[rel_offset..][0..4];
if (Relocation.isArithmeticOp(inst_code)) {
const off = try Relocation.calcPageOffset(target, .arithmetic);
var inst = aarch64.Instruction{
.add_subtract_immediate = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.add_subtract_immediate,
), inst_code),
};
inst.add_subtract_immediate.imm12 = off;
try writer.writeInt(u32, inst.toU32(), .little);
} else {
var inst = aarch64.Instruction{
.load_store_register = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), inst_code),
};
const off = try Relocation.calcPageOffset(target, switch (inst.load_store_register.size) {
0 => if (inst.load_store_register.v == 1)
Relocation.PageOffsetInstKind.load_store_128
else
Relocation.PageOffsetInstKind.load_store_8,
1 => .load_store_16,
2 => .load_store_32,
3 => .load_store_64,
});
inst.load_store_register.offset = off;
try writer.writeInt(u32, inst.toU32(), .little);
}
const kind = aarch64.classifyInst(inst_code);
try aarch64.writePageOffset(kind, target, inst_code);
},
 
.got_load_pageoff => {
@@ -830,15 +788,7 @@ fn resolveRelocInner(
assert(rel.meta.length == 2);
assert(!rel.meta.pcrel);
const target = math.cast(u64, G + A) orelse return error.Overflow;
const off = try Relocation.calcPageOffset(target, .load_store_64);
var inst: aarch64.Instruction = .{
.load_store_register = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), code[rel_offset..][0..4]),
};
inst.load_store_register.offset = off;
try writer.writeInt(u32, inst.toU32(), .little);
try aarch64.writePageOffset(.load_store_64, target, code[rel_offset..][0..4]);
},
 
.tlvp_pageoff => {
@@ -863,7 +813,7 @@ fn resolveRelocInner(
 
const inst_code = code[rel_offset..][0..4];
const reg_info: RegInfo = blk: {
if (Relocation.isArithmeticOp(inst_code)) {
if (aarch64.isArithmeticOp(inst_code)) {
const inst = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.add_subtract_immediate,
@@ -890,7 +840,7 @@ fn resolveRelocInner(
.load_store_register = .{
.rt = reg_info.rd,
.rn = reg_info.rn,
.offset = try Relocation.calcPageOffset(target, .load_store_64),
.offset = try aarch64.calcPageOffset(.load_store_64, target),
.opc = 0b01,
.op1 = 0b01,
.v = 0,
@@ -900,7 +850,7 @@ fn resolveRelocInner(
.add_subtract_immediate = .{
.rd = reg_info.rd,
.rn = reg_info.rn,
.imm12 = try Relocation.calcPageOffset(target, .arithmetic),
.imm12 = try aarch64.calcPageOffset(.arithmetic, target),
.sh = 0,
.s = 0,
.op = 0,
@@ -1183,7 +1133,7 @@ pub const Loc = struct {
 
pub const Alignment = @import("../../InternPool.zig").Alignment;
 
const aarch64 = @import("../../arch/aarch64/bits.zig");
const aarch64 = @import("../aarch64.zig");
const assert = std.debug.assert;
const bind = @import("dyld_info/bind.zig");
const macho = std.macho;
 
src/link/MachO/Relocation.zig added: 1425, removed: 683, total 742
@@ -60,38 +60,6 @@ pub fn lessThan(ctx: void, lhs: Relocation, rhs: Relocation) bool {
return lhs.offset < rhs.offset;
}
 
pub fn calcNumberOfPages(saddr: u64, taddr: u64) error{Overflow}!i21 {
const spage = math.cast(i32, saddr >> 12) orelse return error.Overflow;
const tpage = math.cast(i32, taddr >> 12) orelse return error.Overflow;
const pages = math.cast(i21, tpage - spage) orelse return error.Overflow;
return pages;
}
 
pub const PageOffsetInstKind = enum {
arithmetic,
load_store_8,
load_store_16,
load_store_32,
load_store_64,
load_store_128,
};
 
pub fn calcPageOffset(taddr: u64, kind: PageOffsetInstKind) !u12 {
const narrowed = @as(u12, @truncate(taddr));
return switch (kind) {
.arithmetic, .load_store_8 => narrowed,
.load_store_16 => try math.divExact(u12, narrowed, 2),
.load_store_32 => try math.divExact(u12, narrowed, 4),
.load_store_64 => try math.divExact(u12, narrowed, 8),
.load_store_128 => try math.divExact(u12, narrowed, 16),
};
}
 
pub inline fn isArithmeticOp(inst: *const [4]u8) bool {
const group_decode = @as(u5, @truncate(inst[3]));
return ((group_decode >> 2) == 4);
}
 
pub const Type = enum {
// x86_64
/// RIP-relative displacement (X86_64_RELOC_SIGNED)
 
src/link/MachO/synthetic.zig added: 1425, removed: 683, total 742
@@ -267,9 +267,9 @@ pub const StubsSection = struct {
},
.aarch64 => {
// TODO relax if possible
const pages = try Relocation.calcNumberOfPages(source, target);
const pages = try aarch64.calcNumberOfPages(source, target);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(target, .load_store_64);
const off = try aarch64.calcPageOffset(.load_store_64, target);
try writer.writeInt(
u32,
aarch64.Instruction.ldr(.x16, .x16, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
@@ -411,9 +411,9 @@ pub const StubsHelperSection = struct {
.aarch64 => {
{
// TODO relax if possible
const pages = try Relocation.calcNumberOfPages(sect.addr, dyld_private_addr);
const pages = try aarch64.calcNumberOfPages(sect.addr, dyld_private_addr);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x17, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(dyld_private_addr, .arithmetic);
const off = try aarch64.calcPageOffset(.arithmetic, dyld_private_addr);
try writer.writeInt(u32, aarch64.Instruction.add(.x17, .x17, off, false).toU32(), .little);
}
try writer.writeInt(u32, aarch64.Instruction.stp(
@@ -424,9 +424,9 @@ pub const StubsHelperSection = struct {
).toU32(), .little);
{
// TODO relax if possible
const pages = try Relocation.calcNumberOfPages(sect.addr + 12, dyld_stub_binder_addr);
const pages = try aarch64.calcNumberOfPages(sect.addr + 12, dyld_stub_binder_addr);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(dyld_stub_binder_addr, .load_store_64);
const off = try aarch64.calcPageOffset(.load_store_64, dyld_stub_binder_addr);
try writer.writeInt(u32, aarch64.Instruction.ldr(
.x16,
.x16,
@@ -679,9 +679,9 @@ pub const ObjcStubsSection = struct {
{
const target = sym.getObjcSelrefsAddress(macho_file);
const source = addr;
const pages = try Relocation.calcNumberOfPages(source, target);
const pages = try aarch64.calcNumberOfPages(source, target);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x1, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(target, .load_store_64);
const off = try aarch64.calcPageOffset(.load_store_64, target);
try writer.writeInt(
u32,
aarch64.Instruction.ldr(.x1, .x1, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
@@ -692,9 +692,9 @@ pub const ObjcStubsSection = struct {
const target_sym = macho_file.getSymbol(macho_file.objc_msg_send_index.?);
const target = target_sym.getGotAddress(macho_file);
const source = addr + 2 * @sizeOf(u32);
const pages = try Relocation.calcNumberOfPages(source, target);
const pages = try aarch64.calcNumberOfPages(source, target);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(target, .load_store_64);
const off = try aarch64.calcPageOffset(.load_store_64, target);
try writer.writeInt(
u32,
aarch64.Instruction.ldr(.x16, .x16, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
@@ -778,7 +778,7 @@ pub const WeakBindSection = bind.WeakBind;
pub const LazyBindSection = bind.LazyBind;
pub const ExportTrieSection = Trie;
 
const aarch64 = @import("../../arch/aarch64/bits.zig");
const aarch64 = @import("../aarch64.zig");
const assert = std.debug.assert;
const bind = @import("dyld_info/bind.zig");
const math = std.math;
@@ -788,6 +788,5 @@ const trace = @import("../../tracy.zig").trace;
const Allocator = std.mem.Allocator;
const MachO = @import("../MachO.zig");
const Rebase = @import("dyld_info/Rebase.zig");
const Relocation = @import("Relocation.zig");
const Symbol = @import("Symbol.zig");
const Trie = @import("dyld_info/Trie.zig");
 
src/link/MachO/thunks.zig added: 1425, removed: 683, total 742
@@ -99,9 +99,9 @@ pub const Thunk = struct {
const sym = macho_file.getSymbol(sym_index);
const saddr = thunk.getAddress(macho_file) + i * trampoline_size;
const taddr = sym.getAddress(.{}, macho_file);
const pages = try Relocation.calcNumberOfPages(saddr, taddr);
const pages = try aarch64.calcNumberOfPages(saddr, taddr);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(taddr, .arithmetic);
const off = try aarch64.calcPageOffset(.arithmetic, taddr);
try writer.writeInt(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32(), .little);
try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
}
@@ -164,7 +164,7 @@ const max_distance = (1 << (jump_bits - 1));
/// and assume margin to be 5MiB.
const max_allowed_distance = max_distance - 0x500_000;
 
const aarch64 = @import("../../arch/aarch64/bits.zig");
const aarch64 = @import("../aarch64.zig");
const assert = std.debug.assert;
const log = std.log.scoped(.link);
const macho = std.macho;
 
filename was Deleted added: 1425, removed: 683, total 742
@@ -0,0 +1,106 @@
pub inline fn isArithmeticOp(inst: *const [4]u8) bool {
const group_decode = @as(u5, @truncate(inst[3]));
return ((group_decode >> 2) == 4);
}
 
pub const PageOffsetInstKind = enum {
arithmetic,
load_store_8,
load_store_16,
load_store_32,
load_store_64,
load_store_128,
};
 
pub fn classifyInst(code: *const [4]u8) PageOffsetInstKind {
if (isArithmeticOp(code)) return .arithmetic;
const inst = Instruction{
.load_store_register = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.load_store_register,
), code),
};
return switch (inst.load_store_register.size) {
0 => if (inst.load_store_register.v == 1) .load_store_128 else .load_store_8,
1 => .load_store_16,
2 => .load_store_32,
3 => .load_store_64,
};
}
 
pub fn calcPageOffset(kind: PageOffsetInstKind, taddr: u64) !u12 {
const narrowed = @as(u12, @truncate(taddr));
return switch (kind) {
.arithmetic, .load_store_8 => narrowed,
.load_store_16 => try math.divExact(u12, narrowed, 2),
.load_store_32 => try math.divExact(u12, narrowed, 4),
.load_store_64 => try math.divExact(u12, narrowed, 8),
.load_store_128 => try math.divExact(u12, narrowed, 16),
};
}
 
pub fn writePageOffset(kind: PageOffsetInstKind, taddr: u64, code: *[4]u8) !void {
const value = try calcPageOffset(kind, taddr);
switch (kind) {
.arithmetic => {
var inst = Instruction{
.add_subtract_immediate = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.add_subtract_immediate,
), code),
};
inst.add_subtract_immediate.imm12 = value;
mem.writeInt(u32, code, inst.toU32(), .little);
},
else => {
var inst: Instruction = .{
.load_store_register = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.load_store_register,
), code),
};
inst.load_store_register.offset = value;
mem.writeInt(u32, code, inst.toU32(), .little);
},
}
}
 
pub fn calcNumberOfPages(saddr: u64, taddr: u64) error{Overflow}!i21 {
const spage = math.cast(i32, saddr >> 12) orelse return error.Overflow;
const tpage = math.cast(i32, taddr >> 12) orelse return error.Overflow;
const pages = math.cast(i21, tpage - spage) orelse return error.Overflow;
return pages;
}
 
pub fn writePages(pages: u21, code: *[4]u8) !void {
var inst = Instruction{
.pc_relative_address = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.pc_relative_address,
), code),
};
inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
mem.writeInt(u32, code, inst.toU32(), .little);
}
 
pub fn writeBranchImm(disp: i28, code: *[4]u8) !void {
var inst = Instruction{
.unconditional_branch_immediate = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.unconditional_branch_immediate,
), code),
};
inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(disp >> 2))));
mem.writeInt(u32, code, inst.toU32(), .little);
}
 
const assert = std.debug.assert;
const bits = @import("../arch/aarch64/bits.zig");
const builtin = @import("builtin");
const math = std.math;
const mem = std.mem;
const std = @import("std");
 
pub const Instruction = bits.Instruction;
pub const Register = bits.Register;
 
filename was Deleted added: 1425, removed: 683, total 742
@@ -0,0 +1,74 @@
pub fn writeSetSub6(comptime op: enum { set, sub }, code: *[1]u8, addend: anytype) void {
const mask: u8 = 0b11_000000;
const actual: i8 = @truncate(addend);
var value: u8 = mem.readInt(u8, code, .little);
switch (op) {
.set => value = (value & mask) | @as(u8, @bitCast(actual & ~mask)),
.sub => value = (value & mask) | (@as(u8, @bitCast(@as(i8, @bitCast(value)) -| actual)) & ~mask),
}
mem.writeInt(u8, code, value, .little);
}
 
pub fn writeAddend(
comptime Int: type,
comptime op: enum { add, sub },
code: *[@typeInfo(Int).Int.bits / 8]u8,
value: anytype,
) void {
var V: Int = mem.readInt(Int, code, .little);
const addend: Int = @truncate(value);
switch (op) {
.add => V +|= addend, // TODO: I think saturating arithmetic is correct here
.sub => V -|= addend,
}
mem.writeInt(Int, code, V, .little);
}
 
pub fn writeInstU(code: *[4]u8, value: u32) void {
var inst = Instruction{
.U = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.U,
), code),
};
const compensated: u32 = @bitCast(@as(i32, @bitCast(value)) + 0x800);
inst.U.imm12_31 = bitSlice(compensated, 31, 12);
mem.writeInt(u32, code, inst.toU32(), .little);
}
 
pub fn writeInstI(code: *[4]u8, value: u32) void {
var inst = Instruction{
.I = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.I,
), code),
};
inst.I.imm0_11 = bitSlice(value, 11, 0);
mem.writeInt(u32, code, inst.toU32(), .little);
}
 
pub fn writeInstS(code: *[4]u8, value: u32) void {
var inst = Instruction{
.S = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.S,
), code),
};
inst.S.imm0_4 = bitSlice(value, 4, 0);
inst.S.imm5_11 = bitSlice(value, 11, 5);
mem.writeInt(u32, code, inst.toU32(), .little);
}
 
fn bitSlice(
value: anytype,
comptime high: comptime_int,
comptime low: comptime_int,
) std.math.IntFittingRange(0, 1 << high - low) {
return @truncate((value >> low) & (1 << (high - low + 1)) - 1);
}
 
const bits = @import("../arch/riscv64/bits.zig");
const mem = std.mem;
const std = @import("std");
 
pub const Instruction = bits.Instruction;
 
test/link/elf.zig added: 1425, removed: 683, total 742
@@ -10,124 +10,141 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
.cpu_arch = .x86_64, // TODO relax this once ELF linker is able to handle other archs
.os_tag = .linux,
});
const musl_target = b.resolveTargetQuery(.{
const x86_64_musl = b.resolveTargetQuery(.{
.cpu_arch = .x86_64,
.os_tag = .linux,
.abi = .musl,
});
const glibc_target = b.resolveTargetQuery(.{
const x86_64_gnu = b.resolveTargetQuery(.{
.cpu_arch = .x86_64,
.os_tag = .linux,
.abi = .gnu,
});
const aarch64_musl = b.resolveTargetQuery(.{
.cpu_arch = .aarch64,
.os_tag = .linux,
.abi = .musl,
});
const riscv64_musl = b.resolveTargetQuery(.{
.cpu_arch = .riscv64,
.os_tag = .linux,
.abi = .musl,
});
 
// x86_64 tests
// Exercise linker in -r mode
elf_step.dependOn(testEmitRelocatable(b, .{ .use_llvm = false, .target = musl_target }));
elf_step.dependOn(testEmitRelocatable(b, .{ .target = musl_target }));
elf_step.dependOn(testRelocatableArchive(b, .{ .target = musl_target }));
elf_step.dependOn(testRelocatableEhFrame(b, .{ .target = musl_target }));
elf_step.dependOn(testRelocatableNoEhFrame(b, .{ .target = musl_target }));
elf_step.dependOn(testEmitRelocatable(b, .{ .use_llvm = false, .target = x86_64_musl }));
elf_step.dependOn(testEmitRelocatable(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testRelocatableArchive(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testRelocatableEhFrame(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testRelocatableNoEhFrame(b, .{ .target = x86_64_musl }));
 
// Exercise linker in ar mode
elf_step.dependOn(testEmitStaticLib(b, .{ .target = musl_target }));
elf_step.dependOn(testEmitStaticLibZig(b, .{ .use_llvm = false, .target = musl_target }));
elf_step.dependOn(testEmitStaticLib(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testEmitStaticLibZig(b, .{ .use_llvm = false, .target = x86_64_musl }));
 
// Exercise linker with self-hosted backend (no LLVM)
elf_step.dependOn(testGcSectionsZig(b, .{ .use_llvm = false, .target = default_target }));
elf_step.dependOn(testLinkingObj(b, .{ .use_llvm = false, .target = default_target }));
elf_step.dependOn(testLinkingStaticLib(b, .{ .use_llvm = false, .target = default_target }));
elf_step.dependOn(testLinkingZig(b, .{ .use_llvm = false, .target = default_target }));
elf_step.dependOn(testImportingDataDynamic(b, .{ .use_llvm = false, .target = glibc_target }));
elf_step.dependOn(testImportingDataStatic(b, .{ .use_llvm = false, .target = musl_target }));
elf_step.dependOn(testImportingDataDynamic(b, .{ .use_llvm = false, .target = x86_64_gnu }));
elf_step.dependOn(testImportingDataStatic(b, .{ .use_llvm = false, .target = x86_64_musl }));
 
// Exercise linker with LLVM backend
// musl tests
elf_step.dependOn(testAbsSymbols(b, .{ .target = musl_target }));
elf_step.dependOn(testCommonSymbols(b, .{ .target = musl_target }));
elf_step.dependOn(testCommonSymbolsInArchive(b, .{ .target = musl_target }));
elf_step.dependOn(testEmptyObject(b, .{ .target = musl_target }));
elf_step.dependOn(testEntryPoint(b, .{ .target = musl_target }));
elf_step.dependOn(testGcSections(b, .{ .target = musl_target }));
elf_step.dependOn(testImageBase(b, .{ .target = musl_target }));
elf_step.dependOn(testInitArrayOrder(b, .{ .target = musl_target }));
elf_step.dependOn(testLargeAlignmentExe(b, .{ .target = musl_target }));
elf_step.dependOn(testAbsSymbols(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testCommonSymbols(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testCommonSymbolsInArchive(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testEmptyObject(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testEntryPoint(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testGcSections(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testImageBase(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testInitArrayOrder(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testLargeAlignmentExe(b, .{ .target = x86_64_musl }));
// https://github.com/ziglang/zig/issues/17449
// elf_step.dependOn(testLargeBss(b, .{ .target = musl_target }));
elf_step.dependOn(testLinkingC(b, .{ .target = musl_target }));
elf_step.dependOn(testLinkingCpp(b, .{ .target = musl_target }));
elf_step.dependOn(testLinkingZig(b, .{ .target = musl_target }));
// elf_step.dependOn(testLargeBss(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testLinkingC(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testLinkingCpp(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testLinkingZig(b, .{ .target = x86_64_musl }));
// https://github.com/ziglang/zig/issues/17451
// elf_step.dependOn(testNoEhFrameHdr(b, .{ .target = musl_target }));
elf_step.dependOn(testTlsStatic(b, .{ .target = musl_target }));
elf_step.dependOn(testStrip(b, .{ .target = musl_target }));
// elf_step.dependOn(testNoEhFrameHdr(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testTlsStatic(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testStrip(b, .{ .target = x86_64_musl }));
 
// glibc tests
elf_step.dependOn(testAsNeeded(b, .{ .target = glibc_target }));
elf_step.dependOn(testAsNeeded(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testCanonicalPlt(b, .{ .target = glibc_target }));
elf_step.dependOn(testCopyrel(b, .{ .target = glibc_target }));
// elf_step.dependOn(testCanonicalPlt(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testCopyrel(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testCopyrelAlias(b, .{ .target = glibc_target }));
// elf_step.dependOn(testCopyrelAlias(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testCopyrelAlignment(b, .{ .target = glibc_target }));
elf_step.dependOn(testDsoPlt(b, .{ .target = glibc_target }));
elf_step.dependOn(testDsoUndef(b, .{ .target = glibc_target }));
elf_step.dependOn(testExportDynamic(b, .{ .target = glibc_target }));
elf_step.dependOn(testExportSymbolsFromExe(b, .{ .target = glibc_target }));
// elf_step.dependOn(testCopyrelAlignment(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testDsoPlt(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testDsoUndef(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testExportDynamic(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testExportSymbolsFromExe(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testFuncAddress(b, .{ .target = glibc_target }));
elf_step.dependOn(testHiddenWeakUndef(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncAlias(b, .{ .target = glibc_target }));
// elf_step.dependOn(testFuncAddress(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testHiddenWeakUndef(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncAlias(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testIFuncDlopen(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncDso(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncDynamic(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncExport(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncFuncPtr(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncNoPlt(b, .{ .target = glibc_target }));
// elf_step.dependOn(testIFuncDlopen(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncDso(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncDynamic(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncExport(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncFuncPtr(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncNoPlt(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430 ??
// elf_step.dependOn(testIFuncStatic(b, .{ .target = glibc_target }));
// elf_step.dependOn(testIFuncStaticPie(b, .{ .target = glibc_target }));
elf_step.dependOn(testInitArrayOrder(b, .{ .target = glibc_target }));
elf_step.dependOn(testLargeAlignmentDso(b, .{ .target = glibc_target }));
elf_step.dependOn(testLargeAlignmentExe(b, .{ .target = glibc_target }));
elf_step.dependOn(testLargeBss(b, .{ .target = glibc_target }));
elf_step.dependOn(testLinkOrder(b, .{ .target = glibc_target }));
elf_step.dependOn(testLdScript(b, .{ .target = glibc_target }));
elf_step.dependOn(testLdScriptPathError(b, .{ .target = glibc_target }));
elf_step.dependOn(testLdScriptAllowUndefinedVersion(b, .{ .target = glibc_target, .use_lld = true }));
elf_step.dependOn(testLdScriptDisallowUndefinedVersion(b, .{ .target = glibc_target, .use_lld = true }));
elf_step.dependOn(testMismatchedCpuArchitectureError(b, .{ .target = glibc_target }));
// elf_step.dependOn(testIFuncStatic(b, .{ .target = x86_64_gnu }));
// elf_step.dependOn(testIFuncStaticPie(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testInitArrayOrder(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLargeAlignmentDso(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLargeAlignmentExe(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLargeBss(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLinkOrder(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLdScript(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLdScriptPathError(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLdScriptAllowUndefinedVersion(b, .{ .target = x86_64_gnu, .use_lld = true }));
elf_step.dependOn(testLdScriptDisallowUndefinedVersion(b, .{ .target = x86_64_gnu, .use_lld = true }));
elf_step.dependOn(testMismatchedCpuArchitectureError(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17451
// elf_step.dependOn(testNoEhFrameHdr(b, .{ .target = glibc_target }));
elf_step.dependOn(testPie(b, .{ .target = glibc_target }));
elf_step.dependOn(testPltGot(b, .{ .target = glibc_target }));
elf_step.dependOn(testPreinitArray(b, .{ .target = glibc_target }));
elf_step.dependOn(testSharedAbsSymbol(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsDfStaticTls(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsDso(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsGd(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsGdNoPlt(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsGdToIe(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsIe(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLargeAlignment(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLargeTbss(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLargeStaticImage(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLd(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLdDso(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLdNoPlt(b, .{ .target = glibc_target }));
// elf_step.dependOn(testNoEhFrameHdr(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testPie(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testPltGot(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testPreinitArray(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testSharedAbsSymbol(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsDfStaticTls(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsDso(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsGd(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsGdNoPlt(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsGdToIe(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsIe(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLargeAlignment(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLargeTbss(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLargeStaticImage(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLd(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLdDso(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLdNoPlt(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testTlsNoPic(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsOffsetAlignment(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsPic(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsSmallAlignment(b, .{ .target = glibc_target }));
elf_step.dependOn(testUnknownFileTypeError(b, .{ .target = glibc_target }));
elf_step.dependOn(testUnresolvedError(b, .{ .target = glibc_target }));
elf_step.dependOn(testWeakExports(b, .{ .target = glibc_target }));
elf_step.dependOn(testWeakUndefsDso(b, .{ .target = glibc_target }));
elf_step.dependOn(testZNow(b, .{ .target = glibc_target }));
elf_step.dependOn(testZStackSize(b, .{ .target = glibc_target }));
elf_step.dependOn(testZText(b, .{ .target = glibc_target }));
// elf_step.dependOn(testTlsNoPic(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsOffsetAlignment(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsPic(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsSmallAlignment(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testUnknownFileTypeError(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testUnresolvedError(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testWeakExports(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testWeakUndefsDso(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testZNow(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testZStackSize(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testZText(b, .{ .target = x86_64_gnu }));
 
// aarch64 tests
elf_step.dependOn(testLinkingC(b, .{ .target = aarch64_musl }));
 
// riscv64 tests
elf_step.dependOn(testLinkingC(b, .{ .target = riscv64_musl }));
 
return elf_step;
}