@@ -1,10 +1,14 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const zlib = std.compress.zlib;
const hexLower = std.fmt.fmtSliceHexLower;
const PROT = std.posix.PROT;
const MAP_TYPE = std.os.linux.MAP_TYPE;
pub const Actor = @import("git/actor.zig");
pub const Agent = @import("git/agent.zig");
pub const Blob = @import("git/blob.zig");
pub const Commit = @import("git/commit.zig");
pub const Pack = @import("git/pack.zig");
pub const Tree = @import("git/tree.zig");
pub const Error = error{
ReadError,
@@ -28,7 +32,7 @@ const Types = enum {
tree,
};
const SHA = []const u8; // SUPERBAD, I'm sorry!
pub const SHA = []const u8; // SUPERBAD, I'm sorry!
pub fn shaToHex(sha: []const u8, hex: []u8) void {
std.debug.assert(sha.len == 20);
@@ -45,292 +49,6 @@ pub fn shaToBin(sha: []const u8, bin: []u8) void {
}
}
const Pack = struct {
name: SHA,
pack: []u8,
idx: []u8,
pack_fd: std.fs.File,
idx_fd: std.fs.File,
pack_header: *Header = undefined,
idx_header: *IdxHeader = undefined,
objnames: []u8 = undefined,
crc: []u32 = undefined,
offsets: []u32 = undefined,
hugeoffsets: ?[]u64 = null,
const Header = extern struct {
sig: u32 = 0,
vnum: u32 = 0,
onum: u32 = 0,
};
/// Packfile v2 support only at this time
/// marked as extern to enable mmaping the header if useful
const IdxHeader = extern struct {
magic: u32,
vnum: u32,
fanout: [256]u32,
};
const ObjType = enum(u3) {
invalid = 0,
commit = 1,
tree = 2,
blob = 3,
tag = 4,
ofs_delta = 6,
ref_delta = 7,
};
const ObjHeader = struct {
kind: ObjType,
size: usize,
};
/// assumes name ownership
pub fn init(dir: std.fs.Dir, name: []u8) !Pack {
std.debug.assert(name.len <= 45);
var filename: [50]u8 = undefined;
const ifd = try dir.openFile(try std.fmt.bufPrint(&filename, "{s}.idx", .{name}), .{});
const pfd = try dir.openFile(try std.fmt.bufPrint(&filename, "{s}.pack", .{name}), .{});
var pack = Pack{
.name = name,
.pack = try mmap(pfd),
.idx = try mmap(ifd),
.pack_fd = pfd,
.idx_fd = ifd,
};
try pack.prepare();
return pack;
}
fn prepare(self: *Pack) !void {
try self.prepareIdx();
try self.preparePack();
}
fn prepareIdx(self: *Pack) !void {
self.idx_header = @alignCast(@ptrCast(self.idx.ptr));
const count = @byteSwap(self.idx_header.fanout[255]);
self.objnames = self.idx[258 * 4 ..][0 .. 20 * count];
self.crc.ptr = @alignCast(@ptrCast(self.idx[258 * 4 + 20 * count ..].ptr));
self.crc.len = count;
self.offsets.ptr = @alignCast(@ptrCast(self.idx[258 * 4 + 24 * count ..].ptr));
self.offsets.len = count;
self.hugeoffsets = null;
}
fn preparePack(self: *Pack) !void {
self.idx_header = @alignCast(@ptrCast(self.idx.ptr));
}
fn mmap(f: std.fs.File) ![]u8 {
try f.seekFromEnd(0);
const length = try f.getPos();
const offset = 0;
return std.posix.mmap(null, length, PROT.READ, .{ .TYPE = .SHARED }, f.handle, offset);
}
fn munmap(mem: []align(std.mem.page_size) const u8) void {
std.posix.munmap(mem);
}
/// the packidx fanout is a 0xFF count table of u32 the sum count for that
/// byte which translates the start position for that byte in the main table
pub fn fanOut(self: Pack, i: u8) u32 {
return @byteSwap(self.idx_header.fanout[i]);
}
pub fn fanOutCount(self: Pack, i: u8) u32 {
if (i == 0) return self.fanOut(i);
return self.fanOut(i) - self.fanOut(i - 1);
}
pub fn contains(self: Pack, sha: SHA) ?u32 {
std.debug.assert(sha.len == 20);
return self.containsPrefix(sha) catch unreachable;
}
pub fn containsPrefix(self: Pack, sha: SHA) !?u32 {
const count: usize = self.fanOutCount(sha[0]);
if (count == 0) return null;
const start: usize = if (sha[0] > 0) self.fanOut(sha[0] - 1) else 0;
const objnames = self.objnames[start * 20 ..][0 .. count * 20];
for (0..count) |i| {
const objname = objnames[i * 20 ..][0..20];
if (std.mem.eql(u8, sha, objname[0..sha.len])) {
if (objnames.len > i * 20 + 20 and std.mem.eql(u8, sha, objnames[i * 20 + 20 ..][0..sha.len])) {
return error.AmbiguousRef;
}
return @byteSwap(self.offsets[i + start]);
}
}
return null;
}
pub fn getReaderOffset(self: Pack, offset: u32) !FBSReader {
if (offset > self.pack.len) return error.WTF;
return self.pack[offset];
}
fn parseObjHeader(reader: *FBSReader) Pack.ObjHeader {
var byte: usize = 0;
byte = reader.readByte() catch unreachable;
var h = Pack.ObjHeader{
.size = byte & 0b1111,
.kind = @enumFromInt((byte & 0b01110000) >> 4),
};
var cont: bool = byte & 0x80 != 0;
var shift: u6 = 4;
while (cont) {
byte = reader.readByte() catch unreachable;
h.size |= (byte << shift);
shift += 7;
cont = byte >= 0x80;
}
return h;
}
fn loadBlob(a: Allocator, reader: *FBSReader) ![]u8 {
var _zlib = zlib.decompressor(reader.*);
var zr = _zlib.reader();
return try zr.readAllAlloc(a, 0xffffff);
}
fn readVarInt(reader: *FBSReader) !usize {
var byte: usize = try reader.readByte();
var base: usize = byte & 0x7F;
while (byte >= 0x80) {
base += 1;
byte = try reader.readByte();
base = (base << 7) + (byte & 0x7F);
}
//std.debug.print("varint = {}\n", .{base});
return base;
}
fn deltaInst(reader: *FBSReader, writer: anytype, base: []u8) !usize {
const readb: usize = try reader.readByte();
if (readb == 0) {
std.debug.print("INVALID INSTRUCTION 0x00\n", .{});
@panic("Invalid state :<");
}
if (readb >= 0x80) {
// std.debug.print("COPY {b:0>3} {b:0>4}\n", .{
// (readb & 0b1110000) >> 4,
// (readb & 0b1111),
// });
var offs: usize = 0;
if (readb & 1 != 0) offs |= @as(usize, try reader.readByte()) << 0;
if (readb & 2 != 0) offs |= @as(usize, try reader.readByte()) << 8;
if (readb & 4 != 0) offs |= @as(usize, try reader.readByte()) << 16;
if (readb & 8 != 0) offs |= @as(usize, try reader.readByte()) << 24;
//std.debug.print(" offs: {:12} {b:0>32} \n", .{ offs, offs });
var size: usize = 0;
if (readb & 16 != 0) size |= @as(usize, try reader.readByte()) << 0;
if (readb & 32 != 0) size |= @as(usize, try reader.readByte()) << 8;
if (readb & 64 != 0) size |= @as(usize, try reader.readByte()) << 16;
if (size == 0) size = 0x10000;
//std.debug.print(" size: {:12} {b:0>24} \n", .{ size, size });
//std.debug.print("COPY {: >4} {: >4}\n", .{ offs, size });
if (size != try writer.write(base[offs..][0..size]))
@panic("write didn't not fail");
return size;
} else {
var stage: [128]u8 = undefined;
const s = stage[0..readb];
_ = try reader.read(s);
_ = try writer.write(s);
//std.debug.print("INSERT {} \n", .{readb});
return readb;
}
}
fn loadRefDelta(_: Pack, a: Allocator, reader: *FBSReader, _: usize, repo: Repo) ![]u8 {
var buf: [20]u8 = undefined;
var hexy: [40]u8 = undefined;
_ = try reader.read(&buf);
shaToHex(&buf, &hexy);
const basez = repo.findBlob(a, &buf) catch return error.BlobMissing;
defer a.free(basez);
var _zlib = zlib.decompressor(reader.*);
var zr = _zlib.reader();
const inst = zr.readAllAlloc(a, 0xffffff) catch return error.PackCorrupt;
defer a.free(inst);
var inst_fbs = std.io.fixedBufferStream(inst);
var inst_reader = inst_fbs.reader();
// We don't actually need these when zlib works :)
_ = try readVarInt(&inst_reader);
_ = try readVarInt(&inst_reader);
var buffer = std.ArrayList(u8).init(a);
while (true) {
_ = deltaInst(&inst_reader, buffer.writer(), basez) catch {
break;
};
}
return try buffer.toOwnedSlice();
}
fn loadDelta(self: Pack, a: Allocator, reader: *FBSReader, offset: usize, repo: Repo) ![]u8 {
// fd pos is offset + 2-ish because of the header read
const srclen = try readVarInt(reader);
var _zlib = zlib.decompressor(reader.*);
var zr = _zlib.reader();
const inst = zr.readAllAlloc(a, 0xffffff) catch return error.PackCorrupt;
defer a.free(inst);
var inst_fbs = std.io.fixedBufferStream(inst);
var inst_reader = inst_fbs.reader();
// We don't actually need these when zlib works :)
_ = try readVarInt(&inst_reader);
_ = try readVarInt(&inst_reader);
const baseobj_offset = offset - srclen;
const basez = try self.loadObj(a, baseobj_offset, repo);
defer a.free(basez);
var buffer = std.ArrayList(u8).init(a);
while (true) {
_ = deltaInst(&inst_reader, buffer.writer(), basez) catch {
break;
};
}
return try buffer.toOwnedSlice();
}
pub fn loadObj(self: Pack, a: Allocator, offset: usize, repo: Repo) Error![]u8 {
var fbs = std.io.fixedBufferStream(self.pack[offset..]);
var reader = fbs.reader();
const h = parseObjHeader(&reader);
switch (h.kind) {
.commit, .tree, .blob => return loadBlob(a, &reader) catch return error.PackCorrupt,
.ofs_delta => return try self.loadDelta(a, &reader, offset, repo),
.ref_delta => return try self.loadRefDelta(a, &reader, offset, repo),
else => {
std.debug.print("obj type ({}) not implemened\n", .{h.kind});
unreachable; // not implemented
},
}
unreachable;
}
pub fn raze(self: Pack, a: Allocator) void {
self.pack_fd.close();
self.idx_fd.close();
munmap(@alignCast(self.pack));
munmap(@alignCast(self.idx));
a.free(self.name);
}
};
const Object = struct {
ctx: std.io.FixedBufferStream([]u8),
kind: ?Kind = null,
@@ -378,8 +96,9 @@ const Object = struct {
}
};
const Reader = Object.Reader;
const FBSReader = std.io.FixedBufferStream([]u8).Reader;
// TODO AnyReader
pub const Reader = Object.Reader;
pub const FBSReader = std.io.FixedBufferStream([]u8).Reader;
const FsReader = std.fs.File.Reader;
pub const Repo = struct {
@@ -500,7 +219,7 @@ pub const Repo = struct {
return error.ObjectMissing;
}
fn findBlob(self: Repo, a: Allocator, sha: SHA) ![]u8 {
pub fn findBlob(self: Repo, a: Allocator, sha: SHA) ![]u8 {
std.debug.assert(sha.len == 20);
if (try self.findBlobPack(a, sha)) |pack| return pack;
if (try self.findBlobFile(a, sha)) |file| return file;
@@ -524,7 +243,7 @@ pub const Repo = struct {
}
/// TODO binary search lol
fn findObj(self: Repo, a: Allocator, in_sha: SHA) !Object {
pub fn findObj(self: Repo, a: Allocator, in_sha: SHA) !Object {
var shabin: [20]u8 = in_sha[0..20].*;
if (in_sha.len == 40) {
for (&shabin, 0..) |*s, i| {
@@ -773,236 +492,6 @@ pub const Ref = union(enum) {
missing: void,
};
pub const Actor = struct {
name: []const u8,
email: []const u8,
timestr: []const u8,
tzstr: []const u8,
timestamp: i64 = 0,
/// TODO: This will not remain i64
tz: i64 = 0,
pub fn make(data: []const u8) !Actor {
var itr = std.mem.splitBackwards(u8, data, " ");
const tzstr = itr.next() orelse return error.ActorParse;
const epoch = itr.next() orelse return error.ActorParse;
const epstart = itr.index orelse return error.ActorParse;
const email = itr.next() orelse return error.ActorParse;
const name = itr.rest();
return .{
.name = name,
.email = email,
.timestr = data[epstart..data.len],
.tzstr = tzstr,
.timestamp = std.fmt.parseInt(i64, epoch, 10) catch return error.ActorParse,
};
}
pub fn format(self: Actor, comptime _: []const u8, _: std.fmt.FormatOptions, out: anytype) !void {
try out.print("Actor{{ name {s}, email {s} time {} }}", .{ self.name, self.email, self.timestamp });
}
};
pub const Commit = struct {
// TODO not currently implemented
const GPGSig = struct {};
alloc: ?Allocator = null,
blob: []const u8,
sha: SHA,
tree: SHA,
/// 9 ought to be enough for anyone... or at least robinli ... at least for a while
/// TODO fix and make this dynamic
parent: [9]?SHA,
author: Actor,
committer: Actor,
/// Raw message including the title and body
message: []const u8,
title: []const u8,
body: []const u8,
repo: ?*const Repo = null,
gpgsig: ?GPGSig,
ptr_parent: ?*Commit = null, // TOOO multiple parents
fn header(self: *Commit, data: []const u8) !void {
if (std.mem.indexOf(u8, data, " ")) |brk| {
const name = data[0..brk];
const payload = data[brk..];
if (std.mem.eql(u8, name, "commit")) {
if (std.mem.indexOf(u8, data, "\x00")) |nl| {
self.tree = payload[nl..][0..40];
} else unreachable;
} else if (std.mem.eql(u8, name, "tree")) {
self.tree = payload[1..41];
} else if (std.mem.eql(u8, name, "parent")) {
for (&self.parent) |*parr| {
if (parr.* == null) {
parr.* = payload[1..41];
return;
}
}
} else if (std.mem.eql(u8, name, "author")) {
self.author = try Actor.make(payload);
} else if (std.mem.eql(u8, name, "committer")) {
self.committer = try Actor.make(payload);
} else {
std.debug.print("unknown header: {s}\n", .{name});
return error.UnknownHeader;
}
} else return error.MalformedHeader;
}
/// TODO this
fn gpgSig(_: *Commit, itr: *std.mem.SplitIterator(u8, .sequence)) !void {
while (itr.next()) |line| {
if (std.mem.indexOf(u8, line, "-----END PGP SIGNATURE-----") != null) return;
if (std.mem.indexOf(u8, line, "-----END SSH SIGNATURE-----") != null) return;
}
return error.InvalidGpgsig;
}
pub fn initAlloc(a: Allocator, sha_in: SHA, data: []const u8) !Commit {
const sha = try a.dupe(u8, sha_in);
const blob = try a.dupe(u8, data);
var self = try make(sha, blob, a);
self.alloc = a;
return self;
}
pub fn init(sha: SHA, data: []const u8) !Commit {
return make(sha, data, null);
}
pub fn make(sha: SHA, data: []const u8, a: ?Allocator) !Commit {
_ = a;
var lines = std.mem.split(u8, data, "\n");
var self: Commit = undefined;
self.repo = null;
// I don't like it either, but... lazy
self.parent = .{ null, null, null, null, null, null, null, null, null };
self.blob = data;
while (lines.next()) |line| {
if (std.mem.startsWith(u8, line, "gpgsig")) {
self.gpgSig(&lines) catch |e| {
std.debug.print("GPG sig failed {}\n", .{e});
std.debug.print("full stack '''\n{s}\n'''\n", .{data});
return e;
};
continue;
}
if (line.len == 0) break;
// Seen in GPG headers set by github... thanks github :<
if (std.mem.trim(u8, line, " \t").len != line.len) continue;
self.header(line) catch |e| {
std.debug.print("header failed {} on {} '{s}'\n", .{ e, lines.index.?, line });
std.debug.print("full stack '''\n{s}\n'''\n", .{data});
return e;
};
}
self.message = lines.rest();
if (std.mem.indexOf(u8, self.message, "\n\n")) |nl| {
self.title = self.message[0..nl];
self.body = self.message[nl + 2 ..];
} else {
self.title = self.message;
self.body = self.message[0..0];
}
self.sha = sha;
return self;
}
pub fn fromReader(a: Allocator, sha: SHA, reader: Reader) !Commit {
var buffer: [0xFFFF]u8 = undefined;
const len = try reader.readAll(&buffer);
return try initAlloc(a, sha, buffer[0..len]);
}
pub fn toParent(self: Commit, a: Allocator, idx: u8) !Commit {
if (idx >= self.parent.len) return error.NoParent;
if (self.parent[idx]) |parent| {
if (self.repo) |repo| {
var obj = try repo.findObj(a, parent);
defer obj.raze(a);
var cmt = try Commit.fromReader(a, parent, obj.reader());
cmt.repo = repo;
return cmt;
}
return error.DetachedCommit;
}
return error.NoParent;
}
pub fn mkTree(self: Commit, a: Allocator) !Tree {
if (self.repo) |repo| {
return try Tree.fromRepo(a, repo.*, self.tree);
} else return error.DetachedCommit;
}
pub fn mkSubTree(self: Commit, a: Allocator, subpath: ?[]const u8) !Tree {
const path = subpath orelse return self.mkTree(a);
if (path.len == 0) return self.mkTree(a);
var itr = std.mem.split(u8, path, "/");
var root = try self.mkTree(a);
root.path = try a.dupe(u8, path);
iter: while (itr.next()) |p| {
for (root.objects) |obj| {
if (std.mem.eql(u8, obj.name, p)) {
if (itr.rest().len == 0) {
defer root.raze(a);
var out = try obj.toTree(a, self.repo.?.*);
out.path = try a.dupe(u8, path);
return out;
} else {
const tree = try obj.toTree(a, self.repo.?.*);
defer root = tree;
root.raze(a);
continue :iter;
}
}
} else return error.PathNotFound;
}
return root;
}
/// Warning; this function is probably unsafe
pub fn raze(self: Commit) void {
if (self.alloc) |a| {
a.free(self.sha);
a.free(self.blob);
}
}
pub fn format(
self: Commit,
comptime _: []const u8,
_: std.fmt.FormatOptions,
out: anytype,
) !void {
try out.print(
\\Commit{{
\\commit {s}
\\tree {s}
\\
, .{ self.sha, self.tree });
for (self.parent) |par| {
if (par) |p|
try out.print("parent {s}\n", .{p});
}
try out.print(
\\author {}
\\commiter {}
\\
\\{s}
\\}}
, .{ self.author, self.committer, self.message });
}
};
/// TODO for commitish
/// direct
/// - [x] sha
@@ -1040,35 +529,6 @@ pub fn commitishRepo(rev: []const u8, repo: Repo) bool {
return false;
}
pub const Blob = struct {
mode: [6]u8,
name: []const u8,
hash: [40]u8,
pub fn isFile(self: Blob) bool {
return self.mode[0] != 48;
}
pub fn toObject(self: Blob, a: Allocator, repo: Repo) !Object {
if (!self.isFile()) return error.NotAFile;
_ = a;
_ = repo;
return error.NotImplemented;
}
pub fn toTree(self: Blob, a: Allocator, repo: Repo) !Tree {
if (self.isFile()) return error.NotATree;
const tree = try Tree.fromRepo(a, repo, &self.hash);
return tree;
}
pub fn format(self: Blob, comptime _: []const u8, _: std.fmt.FormatOptions, out: anytype) !void {
try out.print("Blob{{ ", .{});
try if (self.isFile()) out.print("File", .{}) else out.print("Tree", .{});
try out.print(" {s} @ {s} }}", .{ self.name, self.hash });
}
};
pub const ChangeSet = struct {
name: []const u8,
sha: []const u8,
@@ -1095,318 +555,6 @@ pub const ChangeSet = struct {
}
};
pub const Tree = struct {
sha: []const u8,
path: ?[]const u8 = null,
blob: []const u8,
objects: []Blob,
pub fn pushPath(self: *Tree, a: Allocator, path: []const u8) !void {
const spath = self.path orelse {
self.path = try a.dupe(u8, path);
return;
};
self.path = try std.mem.join(a, "/", &[_][]const u8{ spath, path });
a.free(spath);
}
pub fn fromRepo(a: Allocator, r: Repo, sha: SHA) !Tree {
var blob = try r.findObj(a, sha);
defer blob.raze(a);
const b = try blob.reader().readAllAlloc(a, 0xffff);
return try Tree.make(a, sha, b);
}
pub fn make(a: Allocator, sha: SHA, blob: []const u8) !Tree {
var self: Tree = .{
.sha = try a.dupe(u8, sha),
.blob = blob,
.objects = try a.alloc(Blob, std.mem.count(u8, blob, "\x00")),
};
var i: usize = 0;
if (std.mem.indexOf(u8, blob, "tree ")) |tidx| {
if (std.mem.indexOfScalarPos(u8, blob, i, 0)) |index| {
// This is probably wrong for large trees, but #YOLO
std.debug.assert(tidx == 0);
std.debug.assert(std.mem.eql(u8, "tree ", blob[0..5]));
i = index + 1;
}
}
var obj_i: usize = 0;
while (std.mem.indexOfScalarPos(u8, blob, i, 0)) |index| {
var obj = &self.objects[obj_i];
obj_i += 1;
if (blob[i] == '1') {
_ = try std.fmt.bufPrint(&obj.mode, "{s}", .{blob[i .. i + 6]});
_ = try std.fmt.bufPrint(&obj.hash, "{}", .{hexLower(blob[index + 1 .. index + 21])});
obj.name = blob[i + 7 .. index];
} else if (blob[i] == '4') {
_ = try std.fmt.bufPrint(&obj.mode, "0{s}", .{blob[i .. i + 5]});
_ = try std.fmt.bufPrint(&obj.hash, "{}", .{hexLower(blob[index + 1 .. index + 21])});
obj.name = blob[i + 6 .. index];
} else std.debug.print("panic {s} ", .{blob[i..index]});
i = index + 21;
}
if (a.resize(self.objects, obj_i)) {
self.objects.len = obj_i;
}
return self;
}
pub fn fromReader(a: Allocator, sha: SHA, reader: Reader) !Tree {
const buf = try reader.readAllAlloc(a, 0xffff);
return try Tree.make(a, sha, buf);
}
pub fn changedSet(self: Tree, a: Allocator, repo: *Repo) ![]ChangeSet {
const cmtt = try repo.headCommit(a);
defer cmtt.raze();
const search_list: []?Blob = try a.alloc(?Blob, self.objects.len);
for (self.objects, search_list) |src, *dst| {
dst.* = src;
}
defer a.free(search_list);
var par = try repo.headCommit(a);
var ptree = try par.mkSubTree(a, self.path);
var changed = try a.alloc(ChangeSet, self.objects.len);
var old = par;
var oldtree = ptree;
var found: usize = 0;
while (found < search_list.len) {
old = par;
oldtree = ptree;
par = par.toParent(a, 0) catch |err| switch (err) {
error.NoParent => {
for (search_list, 0..) |search_ish, i| {
if (search_ish) |search| {
found += 1;
changed[i] = try ChangeSet.init(
a,
search.name,
old.sha,
old.message,
old.committer.timestamp,
);
}
}
old.raze();
oldtree.raze(a);
break;
},
else => |e| return e,
};
ptree = par.mkSubTree(a, self.path) catch |err| switch (err) {
error.PathNotFound => {
for (search_list, 0..) |search_ish, i| {
if (search_ish) |search| {
found += 1;
changed[i] = try ChangeSet.init(
a,
search.name,
old.sha,
old.message,
old.committer.timestamp,
);
}
}
old.raze();
oldtree.raze(a);
break;
},
else => |e| return e,
};
for (search_list, 0..) |*search_ish, i| {
const search = search_ish.* orelse continue;
var line = search.name;
line.len += 21;
line = line[line.len - 20 .. line.len];
if (std.mem.indexOf(u8, ptree.blob, line)) |_| {} else {
search_ish.* = null;
found += 1;
changed[i] = try ChangeSet.init(
a,
search.name,
old.sha,
old.message,
old.committer.timestamp,
);
continue;
}
}
old.raze();
oldtree.raze(a);
}
par.raze();
ptree.raze(a);
return changed;
}
pub fn raze(self: Tree, a: Allocator) void {
a.free(self.sha);
if (self.path) |p| a.free(p);
a.free(self.objects);
a.free(self.blob);
}
pub fn format(self: Tree, comptime _: []const u8, _: std.fmt.FormatOptions, out: anytype) !void {
var f: usize = 0;
var d: usize = 0;
for (self.objects) |obj| {
if (obj.mode[0] == 48)
d += 1
else
f += 1;
}
try out.print(
\\Tree{{ {} Objects, {} files {} directories }}
, .{ self.objects.len, f, d });
}
};
const DEBUG_GIT_ACTIONS = false;
pub const Agent = struct {
alloc: Allocator,
repo: ?*const Repo = null,
cwd: ?std.fs.Dir = null,
pub fn updateUpstream(self: Agent, branch: []const u8) !bool {
const fetch = try self.exec(&[_][]const u8{
"git",
"fetch",
"upstream",
"-q",
});
if (fetch.len > 0) std.debug.print("fetch {s}\n", .{fetch});
self.alloc.free(fetch);
var buf: [512]u8 = undefined;
const up_branch = try std.fmt.bufPrint(&buf, "upstream/{s}", .{branch});
const pull = try self.execCustom(&[_][]const u8{
"git",
"merge-base",
"--is-ancestor",
"HEAD",
up_branch,
});
defer self.alloc.free(pull.stdout);
defer self.alloc.free(pull.stderr);
if (pull.term.Exited == 0) {
const move = try self.exec(&[_][]const u8{
"git",
"fetch",
"upstream",
"*:*",
"-q",
});
self.alloc.free(move);
return true;
} else {
std.debug.print("refusing to move head non-ancestor\n", .{});
return false;
}
}
pub fn updateDownstream(self: Agent) !bool {
const push = try self.exec(&[_][]const u8{
"git",
"push",
"downstream",
"*:*",
"--porcelain",
});
std.debug.print("pushing downstream ->\n{s}\n", .{push});
self.alloc.free(push);
return true;
}
pub fn forkRemote(self: Agent, uri: []const u8, local_dir: []const u8) ![]u8 {
return try self.exec(&[_][]const u8{
"git",
"clone",
"--bare",
"--origin",
"upstream",
uri,
local_dir,
});
}
pub fn initRepo(self: Agent, dir: []const u8, opt: struct { bare: bool = true }) ![]u8 {
return try self.exec(&[_][]const u8{
"git",
"init",
if (opt.bare) "--bare" else "",
dir,
});
}
pub fn show(self: Agent, sha: []const u8) ![]u8 {
return try self.exec(&[_][]const u8{
"git",
"show",
"--diff-merges=1",
"-p",
sha,
});
}
pub fn blame(self: Agent, name: []const u8) ![]u8 {
std.debug.print("{s}\n", .{name});
return try self.exec(&[_][]const u8{
"git",
"blame",
"--porcelain",
name,
});
}
fn execCustom(self: Agent, argv: []const []const u8) !std.ChildProcess.RunResult {
std.debug.assert(std.mem.eql(u8, argv[0], "git"));
const cwd = if (self.cwd != null and self.cwd.?.fd != std.fs.cwd().fd) self.cwd else null;
const child = std.ChildProcess.run(.{
.cwd_dir = cwd,
.allocator = self.alloc,
.argv = argv,
.max_output_bytes = 0x1FFFFF,
}) catch |err| {
const errstr =
\\git agent error:
\\error :: {}
\\argv ::
;
std.debug.print(errstr, .{err});
for (argv) |arg| std.debug.print("{s} ", .{arg});
std.debug.print("\n", .{});
return err;
};
return child;
}
fn exec(self: Agent, argv: []const []const u8) ![]u8 {
const child = try self.execCustom(argv);
if (child.stderr.len > 0) std.debug.print("git Agent error\nstderr {s}\n", .{child.stderr});
self.alloc.free(child.stderr);
if (DEBUG_GIT_ACTIONS) std.debug.print(
\\git action
\\{s}
\\'''
\\{s}
\\'''
\\
, .{ argv[1], child.stdout });
return child.stdout;
}
};
test "hex tranlations" {
var hexbuf: [40]u8 = undefined;
var binbuf: [20]u8 = undefined;