srctree

Gregory Mullen parent 4d70ba08 e1ecef54
major refactor of git

why do I hate myself
src/api/repo.zig added: 585, removed: 584, total 1
@@ -59,7 +59,7 @@ pub fn repo(ctx: *API.Context) API.Routes.Error!void {
return try ctx.sendJSON([0]Repo{});
},
};
defer gitrepo.raze(ctx.alloc);
defer gitrepo.raze();
 
const head = switch (gitrepo.HEAD(ctx.alloc) catch return error.Unknown) {
.branch => |b| b.sha,
@@ -69,7 +69,7 @@ pub fn repo(ctx: *API.Context) API.Routes.Error!void {
 
return try ctx.sendJSON([1]Repo{.{
.name = req.name,
.head = head,
.head = head.hex[0..],
.updated = "undefined",
}});
}
@@ -94,7 +94,7 @@ pub fn repoBranches(ctx: *API.Context) API.Routes.Error!void {
return try ctx.sendJSON([0]RepoBranches{});
},
};
defer gitrepo.raze(ctx.alloc);
defer gitrepo.raze();
 
return try ctx.sendJSON([1]RepoBranches{.{
.name = req.name,
@@ -125,9 +125,8 @@ pub fn repoTags(ctx: *API.Context) API.Routes.Error!void {
return try ctx.sendJSON([0]RepoTags{});
},
};
defer gitrepo.raze(ctx.alloc);
 
gitrepo.loadTags(ctx.alloc) catch return error.Unknown;
gitrepo.loadData(ctx.alloc) catch return error.Unknown;
defer gitrepo.raze();
 
const repotags = gitrepo.tags orelse return try ctx.sendJSON([1]RepoTags{.{
.name = req.name,
 
src/endpoints/commit-flex.zig added: 585, removed: 584, total 1
@@ -20,7 +20,7 @@ const Scribe = struct {
repo: []const u8,
title: []const u8,
date: DateTime,
sha: []const u8,
sha: Git.SHA,
 
pub fn toContext(self: Commit, a: Allocator) !Template.Context {
var jctx = Template.Context.init(a);
@@ -32,8 +32,8 @@ const Scribe = struct {
"<span>{Y-m-d}</span><span>{day}</span><span>{time}</span>",
.{ self.date, self.date, self.date },
));
try jctx.putSlice("ShaLong", self.sha);
try jctx.putSlice("Sha", self.sha[0..8]);
try jctx.putSlice("ShaLong", self.sha.hex[0..]);
try jctx.putSlice("Sha", self.sha.hex[0..8]);
return jctx;
}
};
@@ -56,7 +56,7 @@ const HeatMapSize = 366 + 6;
const HeatMapArray = [HeatMapSize]u16;
 
pub const HeatMap = struct {
sha: [40]u8,
sha: Git.SHA.Bin,
hits: HeatMapArray,
};
 
@@ -79,11 +79,12 @@ fn countAll(
seen: *std.BufSet,
until: i64,
root_cmt: Git.Commit,
repo: *const Git.Repo,
email: []const u8,
) !*HeatMapArray {
var commit = root_cmt;
while (true) {
if (seen.contains(commit.sha)) return hits;
if (seen.contains(commit.sha.bin[0..])) return hits;
var commit_time = commit.author.timestamp;
if (DateTime.tzToSec(commit.author.tzstr) catch @as(?i32, 0)) |tzs| {
commit_time += tzs;
@@ -95,14 +96,14 @@ fn countAll(
hits[day_off] += 1;
}
for (commit.parent[1..], 1..) |par, pidx| {
if (par) |_| {
seen.insert(par.?) catch unreachable;
const parent = try commit.toParent(a, @truncate(pidx));
if (par) |p| {
seen.insert(p.bin[0..]) catch unreachable;
const parent = try commit.toParent(a, @truncate(pidx), repo);
//defer parent.raze(a);
_ = try countAll(a, hits, seen, until, parent, email);
_ = try countAll(a, hits, seen, until, parent, repo, email);
}
}
commit = commit.toParent(a, 0) catch |err| switch (err) {
commit = commit.toParent(a, 0, repo) catch |err| switch (err) {
error.NoParent => {
return hits;
},
@@ -131,14 +132,14 @@ fn buildJournal(
const repo_dir = try std.fs.cwd().openDir(gitdir, .{});
var repo = try Git.Repo.init(repo_dir);
try repo.loadData(a);
defer repo.raze(a);
defer repo.raze();
 
var lseen = std.BufSet.init(a);
const until = (DateTime.fromEpoch(DateTime.now().timestamp - DAY * 90)).timestamp;
var commit = try repo.headCommit(a);
 
while (true) {
if (lseen.contains(commit.sha)) break;
if (lseen.contains(commit.sha.bin[0..])) break;
var commit_time = commit.author.timestamp;
if (DateTime.tzToSec(commit.author.tzstr) catch @as(?i32, 0)) |tzs| {
commit_time += tzs;
@@ -149,12 +150,12 @@ fn buildJournal(
.name = try Bleach.sanitizeAlloc(a, commit.author.name, .{}),
.title = try Bleach.sanitizeAlloc(a, commit.title, .{}),
.date = DateTime.fromEpoch(commit_time),
.sha = try a.dupe(u8, commit.sha),
.sha = commit.sha,
.repo = try a.dupe(u8, gitdir[8..]),
});
}
 
commit = commit.toParent(a, 0) catch |err| switch (err) {
commit = commit.toParent(a, 0, &repo) catch |err| switch (err) {
error.NoParent => break,
else => |e| return e,
};
@@ -165,7 +166,7 @@ fn buildCommitList(a: Allocator, seen: *std.BufSet, until: i64, gitdir: []const
const repo_dir = try std.fs.cwd().openDir(gitdir, .{});
var repo = try Git.Repo.init(repo_dir);
try repo.loadData(a);
defer repo.raze(a);
defer repo.raze();
 
// TODO return empty hits here
const commit = repo.headCommit(a) catch unreachable;
@@ -186,10 +187,10 @@ fn buildCommitList(a: Allocator, seen: *std.BufSet, until: i64, gitdir: []const
@memset(hits[0..], 0);
}
 
if (!std.mem.eql(u8, heatmap.sha[0..], commit.sha[0..40])) {
@memcpy(heatmap.sha[0..], commit.sha[0..40]);
if (!std.mem.eql(u8, heatmap.sha[0..], commit.sha.bin[0..])) {
@memcpy(heatmap.sha[0..], commit.sha.bin[0..]);
@memset(hits[0..], 0);
_ = try countAll(a, hits, seen, until, commit, email);
_ = try countAll(a, hits, seen, until, commit, &repo, email);
}
 
return hits;
 
src/endpoints/repos.zig added: 585, removed: 584, total 1
@@ -282,9 +282,9 @@ fn list(ctx: *Context) Error!void {
const rdir = idir.openDir(file.name, .{}) catch continue;
var rpo = Git.Repo.init(rdir) catch continue;
rpo.loadData(ctx.alloc) catch return error.Unknown;
defer rpo.raze();
rpo.repo_name = ctx.alloc.dupe(u8, file.name) catch null;
 
rpo.loadTags(ctx.alloc) catch return error.Unknown;
if (rpo.tags != null) {
std.sort.heap(Git.Tag, rpo.tags.?, {}, tagSorter);
}
@@ -305,7 +305,7 @@ fn list(ctx: *Context) Error!void {
 
const repos_compiled = try ctx.alloc.alloc(Template.Structs.Repolist, repos.items.len);
for (repos.items, repos_compiled) |*repo, *compiled| {
defer repo.raze(ctx.alloc);
defer repo.raze();
compiled.* = repoBlock(ctx.alloc, repo.repo_name orelse "unknown", repo.*) catch {
return error.Unknown;
};
@@ -361,7 +361,7 @@ fn treeBlob(ctx: *Context) Error!void {
const dir = cwd.openDir(filename, .{}) catch return error.Unknown;
var repo = Git.Repo.init(dir) catch return error.Unknown;
repo.loadData(ctx.alloc) catch return error.Unknown;
defer repo.raze(ctx.alloc);
defer repo.raze();
 
if (Repos.hasUpstream(ctx.alloc, repo) catch return error.Unknown) |up| {
var upstream = [_]Template.Context{
@@ -384,17 +384,17 @@ fn treeBlob(ctx: *Context) Error!void {
try ctx.putContext("OpenGraph", .{ .block = opengraph[0..] });
 
const cmt = repo.headCommit(ctx.alloc) catch return newRepo(ctx);
var files: Git.Tree = cmt.mkTree(ctx.alloc) catch return error.Unknown;
var files: Git.Tree = cmt.mkTree(ctx.alloc, &repo) catch return error.Unknown;
if (rd.verb) |blb| {
if (std.mem.eql(u8, blb, "blob")) {
return blob(ctx, &repo, files);
} else if (std.mem.eql(u8, blb, "tree")) {
files = mkTree(ctx.alloc, repo, &ctx.uri, files) catch return error.Unknown;
files = mkTree(ctx.alloc, &repo, &ctx.uri, files) catch return error.Unknown;
return tree(ctx, &repo, &files);
} else if (std.mem.eql(u8, blb, "")) { // There's a better way to do this
files = cmt.mkTree(ctx.alloc) catch return error.Unknown;
files = cmt.mkTree(ctx.alloc, &repo) catch return error.Unknown;
} else return error.InvalidURI;
} else files = cmt.mkTree(ctx.alloc) catch return error.Unknown;
} else files = cmt.mkTree(ctx.alloc, &repo) catch return error.Unknown;
return tree(ctx, &repo, &files);
}
 
@@ -485,7 +485,7 @@ fn blame(ctx: *Context) Error!void {
const fname = try aPrint(ctx.alloc, "./repos/{s}", .{rd.name});
const dir = cwd.openDir(fname, .{}) catch return error.Unknown;
var repo = Git.Repo.init(dir) catch return error.Unknown;
defer repo.raze(ctx.alloc);
defer repo.raze();
 
var actions = repo.getAgent(ctx.alloc);
actions.cwd = cwd.openDir(fname, .{}) catch unreachable;
@@ -591,33 +591,30 @@ fn blob(ctx: *Context, repo: *Git.Repo, pfiles: Git.Tree) Error!void {
 
var files = pfiles;
search: while (ctx.uri.next()) |bname| {
for (files.objects) |obj| {
for (files.blobs) |obj| {
if (std.mem.eql(u8, bname, obj.name)) {
blb = obj;
if (obj.isFile()) {
if (ctx.uri.next()) |_| return error.InvalidURI;
break :search;
}
files = Git.Tree.fromRepo(ctx.alloc, repo.*, &obj.hash) catch return error.Unknown;
const treeobj = repo.loadObject(ctx.alloc, obj.sha) catch return error.Unknown;
files = Git.Tree.initOwned(obj.sha, ctx.alloc, treeobj) catch return error.Unknown;
continue :search;
}
} else return error.InvalidURI;
}
 
var resolve = repo.blob(ctx.alloc, &blb.hash) catch return error.Unknown;
var reader = resolve.reader();
 
var resolve = repo.loadBlob(ctx.alloc, blb.sha) catch return error.Unknown;
if (!resolve.isFile()) return error.Unknown;
var formatted: []const u8 = undefined;
 
const d2 = reader.readAllAlloc(ctx.alloc, 0xffffff) catch unreachable;
 
if (Highlighting.Language.guessFromFilename(blb.name)) |lang| {
const pre = try Highlighting.highlight(ctx.alloc, lang, d2);
const pre = try Highlighting.highlight(ctx.alloc, lang, resolve.data.?);
formatted = pre[28..][0 .. pre.len - 38];
} else if (excludedExt(blb.name)) {
formatted = "This file type is currently unsupported";
} else {
formatted = Bleach.sanitizeAlloc(ctx.alloc, d2, .{}) catch return error.Unknown;
formatted = Bleach.sanitizeAlloc(ctx.alloc, resolve.data.?, .{}) catch return error.Unknown;
}
 
var dom = DOM.new(ctx.alloc);
@@ -651,11 +648,12 @@ fn blob(ctx: *Context, repo: *Git.Repo, pfiles: Git.Tree) Error!void {
try ctx.sendPage(&page);
}
 
fn mkTree(a: Allocator, repo: Git.Repo, uri: *UriIter, pfiles: Git.Tree) !Git.Tree {
fn mkTree(a: Allocator, repo: *const Git.Repo, uri: *UriIter, pfiles: Git.Tree) !Git.Tree {
var files: Git.Tree = pfiles;
if (uri.next()) |udir| for (files.objects) |obj| {
if (uri.next()) |udir| for (files.blobs) |obj| {
if (std.mem.eql(u8, udir, obj.name)) {
files = try Git.Tree.fromRepo(a, repo, &obj.hash);
const treeobj = try repo.loadObject(a, obj.sha);
files = try Git.Tree.initOwned(obj.sha, a, treeobj);
return try mkTree(a, repo, uri, files);
}
};
@@ -702,7 +700,7 @@ fn drawFileLine(
 
// I know... I KNOW!!!
dom = dom.open(HTML.div(null, null));
const commit_href = try aPrint(a, "/repo/{s}/commit/{s}", .{ rname, ch.sha[0..8] });
const commit_href = try aPrint(a, "/repo/{s}/commit/{s}", .{ rname, ch.sha.hex[0..8] });
dom.push(try HTML.aHrefAlloc(a, ch.commit_title, commit_href));
dom.dupe(HTML.span(try aPrint(a, "{}", .{Humanize.unix(ch.timestamp)}), null));
dom = dom.close();
@@ -738,7 +736,7 @@ const TreePage = Template.PageData("tree.html");
 
fn tree(ctx: *Context, repo: *Git.Repo, files: *Git.Tree) Error!void {
const head = if (repo.head) |h| switch (h) {
.sha => |s| s,
.sha => |s| s.hex[0..],
.branch => |b| b.name,
else => "unknown",
} else "unknown";
@@ -764,9 +762,9 @@ fn tree(ctx: *Context, repo: *Git.Repo, files: *Git.Tree) Error!void {
dom.push(HTML.span(c.title[0..@min(c.title.len, 50)], null));
const commit_time = try aPrint(ctx.alloc, " {}", .{Humanize.unix(c.committer.timestamp)});
dom = dom.open(HTML.span(null, &HTML.Attr.class("muted")));
const commit_href = try aPrint(ctx.alloc, "/repo/{s}/commit/{s}", .{ rd.name, c.sha[0..8] });
const commit_href = try aPrint(ctx.alloc, "/repo/{s}/commit/{s}", .{ rd.name, c.sha.hex[0..8] });
dom.push(HTML.text(commit_time));
dom.push(try HTML.aHrefAlloc(ctx.alloc, c.sha[0..8], commit_href));
dom.push(try HTML.aHrefAlloc(ctx.alloc, c.sha.hex[0..8], commit_href));
dom = dom.close();
dom = dom.close();
 
@@ -787,8 +785,8 @@ fn tree(ctx: *Context, repo: *Git.Repo, files: *Git.Tree) Error!void {
}
try files.pushPath(ctx.alloc, uri_base);
if (files.changedSet(ctx.alloc, repo)) |changed| {
std.sort.pdq(Git.Blob, files.objects, {}, typeSorter);
for (files.objects) |obj| {
std.sort.pdq(Git.Blob, files.blobs, {}, typeSorter);
for (files.blobs) |obj| {
for (changed) |ch| {
if (std.mem.eql(u8, ch.name, obj.name)) {
dom = try drawFileLine(ctx.alloc, dom, rd.name, uri_base, obj, ch);
@@ -808,12 +806,11 @@ fn tree(ctx: *Context, repo: *Git.Repo, files: *Git.Tree) Error!void {
const repo_data = dom.done();
 
var readme: ?[]const u8 = null;
for (files.objects) |obj| {
 
for (files.blobs) |obj| {
if (isReadme(obj.name)) {
var resolve = repo.blob(ctx.alloc, &obj.hash) catch return error.Unknown;
var reader = resolve.reader();
const readme_txt = reader.readAllAlloc(ctx.alloc, 0xffffff) catch unreachable;
const readme_html = htmlReadme(ctx.alloc, readme_txt) catch unreachable;
const resolve = repo.blob(ctx.alloc, obj.sha) catch return error.Unknown;
const readme_html = htmlReadme(ctx.alloc, resolve.data.?) catch unreachable;
readme = try std.fmt.allocPrint(ctx.alloc, "{pretty}", .{readme_html[0]});
break;
}
@@ -843,9 +840,8 @@ fn tagsList(ctx: *Context) Error!void {
const dir = cwd.openDir(filename, .{}) catch return error.Unknown;
var repo = Git.Repo.init(dir) catch return error.Unknown;
repo.loadData(ctx.alloc) catch return error.Unknown;
defer repo.raze(ctx.alloc);
defer repo.raze();
 
repo.loadTags(ctx.alloc) catch unreachable;
std.sort.heap(Git.Tag, repo.tags.?, {}, tagSorter);
 
const tstack = try ctx.alloc.alloc(Template.Structs.Tags, repo.tags.?.len);
 
src/endpoints/repos/commits.zig added: 585, removed: 584, total 1
@@ -74,11 +74,11 @@ fn commitHtml(ctx: *Context, sha: []const u8, repo_name: []const u8, repo: Git.R
return error.Abusive;
}
 
const current: Git.Commit = repo.commit(ctx.alloc, sha) catch cmt: {
const current: Git.Commit = repo.commit(ctx.alloc, Git.SHA.init(sha)) catch cmt: {
// TODO return 404
var fallback: Git.Commit = repo.headCommit(ctx.alloc) catch return error.Unknown;
while (!std.mem.startsWith(u8, fallback.sha, sha)) {
fallback = fallback.toParent(ctx.alloc, 0) catch return error.Unknown;
while (!std.mem.startsWith(u8, fallback.sha.hex[0..], sha)) {
fallback = fallback.toParent(ctx.alloc, 0, &repo) catch return error.Unknown;
}
break :cmt fallback;
};
@@ -165,8 +165,8 @@ pub fn commitPatch(ctx: *Context, sha: []const u8, repo: Git.Repo) Error!void {
var current: Git.Commit = repo.headCommit(ctx.alloc) catch return error.Unknown;
var acts = repo.getAgent(ctx.alloc);
if (std.mem.indexOf(u8, sha, ".patch")) |tail| {
while (!std.mem.startsWith(u8, current.sha, sha[0..tail])) {
current = current.toParent(ctx.alloc, 0) catch return error.Unknown;
while (!std.mem.startsWith(u8, current.sha.hex[0..], sha[0..tail])) {
current = current.toParent(ctx.alloc, 0, &repo) catch return error.Unknown;
}
 
var diff = acts.show(sha[0..tail]) catch return error.Unknown;
@@ -193,7 +193,7 @@ pub fn viewCommit(ctx: *Context) Error!void {
const dir = cwd.openDir(filename, .{}) catch return error.Unknown;
var repo = Git.Repo.init(dir) catch return error.Unknown;
repo.loadData(ctx.alloc) catch return error.Unknown;
defer repo.raze(ctx.alloc);
defer repo.raze();
 
if (std.mem.endsWith(u8, sha, ".patch"))
return commitPatch(ctx, sha, repo)
@@ -213,8 +213,8 @@ pub fn commitCtxParents(a: Allocator, c: Git.Commit, repo: []const u8) ![]Templa
// TODO leaks on err
if (par_cmt == null) continue;
par.* = .{
.parent_uri = try allocPrint(a, "/repo/{s}/commit/{s}", .{ repo, par_cmt.?[0..8] }),
.parent_sha_short = try a.dupe(u8, par_cmt.?[0..8]),
.parent_uri = try allocPrint(a, "/repo/{s}/commit/{s}", .{ repo, par_cmt.?.hex[0..8] }),
.parent_sha_short = try a.dupe(u8, par_cmt.?.hex[0..8]),
};
}
 
@@ -225,8 +225,8 @@ pub fn commitCtx(a: Allocator, c: Git.Commit, repo: []const u8) !Template.Struct
return .{
.author = Bleach.sanitizeAlloc(a, c.author.name, .{}) catch unreachable,
.parents = try commitCtxParents(a, c, repo),
.sha_uri = try allocPrint(a, "/repo/{s}/commit/{s}", .{ repo, c.sha[0..8] }),
.sha_short = try a.dupe(u8, c.sha[0..8]),
.sha_uri = try allocPrint(a, "/repo/{s}/commit/{s}", .{ repo, c.sha.hex[0..8] }),
.sha_short = try a.dupe(u8, c.sha.hex[0..8]),
//.sha = try a.dupe(u8, c.sha),
.title = Bleach.sanitizeAlloc(a, c.title, .{}) catch unreachable,
.body = Bleach.sanitizeAlloc(a, c.body, .{}) catch unreachable,
@@ -279,8 +279,8 @@ pub fn htmlCommit(a: Allocator, c: Git.Commit, repo: []const u8, comptime top: b
fn commitContext(a: Allocator, c: Git.Commit, repo: []const u8, comptime _: bool) !Template.Context {
var ctx = Template.Context.init(a);
 
try ctx.putSlice("Sha", c.sha[0..8]);
try ctx.putSlice("Uri", try std.fmt.allocPrint(a, "/repo/{s}/commit/{s}", .{ repo, c.sha[0..8] }));
try ctx.putSlice("Sha", c.sha.hex[0..8]);
try ctx.putSlice("Uri", try std.fmt.allocPrint(a, "/repo/{s}/commit/{s}", .{ repo, c.sha.hex[0..8] }));
// TODO handle error.NotImplemented
try ctx.putSlice("Msg_title", Bleach.sanitizeAlloc(a, c.title, .{}) catch unreachable);
try ctx.putSlice("Msg", Bleach.sanitizeAlloc(a, c.body, .{}) catch unreachable);
@@ -293,8 +293,12 @@ fn commitContext(a: Allocator, c: Git.Commit, repo: []const u8, comptime _: bool
var pctx = Template.Context.init(a);
if (par_cmt == null) continue;
 
try pctx.putSlice("Parent", par_cmt.?[0..8]);
try pctx.putSlice("Parent_uri", try std.fmt.allocPrint(a, "/repo/{s}/commit/{s}", .{ repo, par_cmt.?[0..8] }));
try pctx.putSlice("Parent", par_cmt.?.hex[0..8]);
try pctx.putSlice("Parent_uri", try std.fmt.allocPrint(
a,
"/repo/{s}/commit/{s}",
.{ repo, par_cmt.?.hex[0..8] },
));
par.* = pctx;
}
try ctx.putBlock("Parents", parents);
@@ -307,9 +311,9 @@ fn buildList(
name: []const u8,
before: ?[]const u8,
elms: []Template.Context,
sha: []u8,
outsha: *Git.SHA,
) ![]Template.Context {
return buildListBetween(a, repo, name, null, before, elms, sha);
return buildListBetween(a, repo, name, null, before, elms, outsha);
}
 
fn buildListBetween(
@@ -319,19 +323,19 @@ fn buildListBetween(
left: ?[]const u8,
right: ?[]const u8,
elms: []Template.Context,
sha: []u8,
outsha: *Git.SHA,
) ![]Template.Context {
var current: Git.Commit = repo.headCommit(a) catch return error.Unknown;
if (right) |r| {
std.debug.assert(r.len <= 40);
const min = @min(r.len, current.sha.len);
while (!std.mem.eql(u8, r, current.sha[0..min])) {
current = current.toParent(a, 0) catch {
const min = @min(r.len, current.sha.hex.len);
while (!std.mem.eql(u8, r, current.sha.hex[0..min])) {
current = current.toParent(a, 0, &repo) catch {
std.debug.print("unable to build commit history\n", .{});
return elms[0..0];
};
}
current = current.toParent(a, 0) catch {
current = current.toParent(a, 0, &repo) catch {
std.debug.print("unable to build commit history\n", .{});
return elms[0..0];
};
@@ -339,13 +343,13 @@ fn buildListBetween(
var count: usize = 0;
for (elms, 1..) |*c, i| {
count = i;
@memcpy(sha, current.sha[0..8]);
outsha.* = Git.SHA.init(current.sha.bin[0..]);
c.* = try commitContext(a, current, name, false);
if (left) |l| {
const min = @min(l.len, current.sha.len);
if (std.mem.eql(u8, l, current.sha[0..min])) break;
const min = @min(l.len, current.sha.hex.len);
if (std.mem.eql(u8, l, current.sha.hex[0..min])) break;
}
current = current.toParent(a, 0) catch {
current = current.toParent(a, 0, &repo) catch {
break;
};
}
@@ -387,13 +391,16 @@ pub fn commits(ctx: *Context) Error!void {
const dir = cwd.openDir(filename, .{}) catch return error.Unknown;
var repo = Git.Repo.init(dir) catch return error.Unknown;
repo.loadData(ctx.alloc) catch return error.Unknown;
defer repo.raze(ctx.alloc);
defer repo.raze();
 
const commits_b = try ctx.alloc.alloc(Template.Context, 50);
var last_sha: [8]u8 = undefined;
var last_sha: Git.SHA = undefined;
const cmts_list = try buildList(ctx.alloc, repo, rd.name, commitish, commits_b, &last_sha);
 
const before_txt = try std.fmt.allocPrint(ctx.alloc, "/repo/{s}/commits/before/{s}", .{ rd.name, last_sha });
const before_txt = try std.fmt.allocPrint(ctx.alloc, "/repo/{s}/commits/before/{s}", .{
rd.name,
last_sha.hex[0..8],
});
return sendCommits(ctx, cmts_list, before_txt);
}
 
 
src/git.zig added: 585, removed: 584, total 1
@@ -7,6 +7,8 @@ const indexOf = std.mem.indexOf;
const zlib = std.compress.zlib;
const hexLower = std.fmt.fmtSliceHexLower;
const bufPrint = std.fmt.bufPrint;
const parseInt = std.fmt.parseInt;
const allocPrint = std.fmt.allocPrint;
const AnyReader = std.io.AnyReader;
 
pub const Actor = @import("git/actor.zig");
@@ -21,10 +23,14 @@ pub const Error = error{
NotAGitRepo,
RefMissing,
CommitMissing,
InvalidCommit,
BlobMissing,
TreeMissing,
InvalidTree,
ObjectMissing,
IncompleteObject,
OutOfMemory,
NoSpaceLeft,
NotImplemented,
EndOfStream,
PackCorrupt,
@@ -32,82 +38,59 @@ pub const Error = error{
AmbiguousRef,
};
 
const Types = enum {
commit,
blob,
tree,
pub const SHA = struct {
pub const Bin = [20]u8;
pub const Hex = [40]u8;
bin: Bin,
hex: Hex,
partial: bool = false,
 
pub fn init(sha: []const u8) SHA {
if (sha.len == 20) {
return .{
.bin = sha[0..20].*,
.hex = toHex(sha[0..20].*),
};
} else if (sha.len == 40) {
return .{
.bin = toBin(sha[0..40].*),
.hex = sha[0..40].*,
};
} else unreachable;
}
 
pub fn initPartial(_: []const u8) !SHA {}
 
pub fn toHex(sha: Bin) Hex {
var hex: Hex = undefined;
_ = bufPrint(&hex, "{}", .{hexLower(sha[0..])}) catch unreachable;
return hex;
}
 
pub fn toBin(sha: Hex) Bin {
var bin: Bin = undefined;
for (0..20) |i| {
bin[i] = parseInt(u8, sha[i * 2 .. (i + 1) * 2], 16) catch unreachable;
}
return bin;
}
};
 
pub const SHA = []const u8; // SUPERBAD, I'm sorry!
 
pub fn shaToHex(sha: []const u8, hex: []u8) void {
std.debug.assert(sha.len == 20);
std.debug.assert(hex.len == 40);
const out = std.fmt.bufPrint(hex, "{}", .{hexLower(sha)}) catch unreachable;
std.debug.assert(out.len == 40);
}
 
pub fn shaToBin(sha: []const u8, bin: []u8) void {
std.debug.assert(sha.len == 40);
std.debug.assert(bin.len == 20);
for (0..20) |i| {
bin[i] = std.fmt.parseInt(u8, sha[i * 2 .. (i + 1) * 2], 16) catch unreachable;
}
}
 
const Object = struct {
ctx: std.io.FixedBufferStream([]u8),
kind: ?Kind = null,
 
pub const Object = struct {
pub const Kind = enum {
blob,
tree,
commit,
ref,
tag,
};
 
pub const thing = union(Kind) {
blob: Blob,
tree: Tree,
commit: Commit,
ref: Ref,
};
 
const FBS = std.io.fixedBufferStream;
 
pub fn init(data: []u8) Object {
return Object{ .ctx = FBS(data) };
}
 
pub const ReadError = error{
Unknown,
};
 
pub const Reader = std.io.Reader(*Object, ReadError, read);
 
fn read(self: *Object, dest: []u8) ReadError!usize {
return self.ctx.read(dest) catch return ReadError.Unknown;
}
 
pub fn reader(self: *Object) Object.Reader {
return .{ .context = self };
}
 
pub fn reset(self: *Object) void {
self.ctx.pos = 0;
}
 
pub fn raze(self: Object, a: Allocator) void {
a.free(self.ctx.buffer);
}
kind: Kind,
memory: []u8,
header: []u8,
body: []u8,
};
 
// TODO AnyReader
pub const Reader = Object.Reader;
pub const FBSReader = std.io.FixedBufferStream([]u8).Reader;
const FsReader = std.fs.File.Reader;
 
pub const Repo = struct {
alloc: ?Allocator = null,
bare: bool,
dir: std.fs.Dir,
packs: []Pack,
@@ -162,109 +145,146 @@ pub const Repo = struct {
}
 
pub fn loadData(self: *Repo, a: Allocator) !void {
if (self.packs.len == 0) try self.loadPacks(a);
try self.loadRefs(a);
if (self.alloc != null) unreachable;
self.alloc = a;
 
try self.loadPacks();
try self.loadRefs();
try self.loadTags();
_ = try self.HEAD(a);
}
 
const empty_sha = [_]u8{0} ** 20;
 
fn loadFileObj(self: Repo, in_sha: SHA) !std.fs.File {
var sha: [40]u8 = undefined;
if (in_sha.len == 20) {
shaToHex(in_sha, &sha);
} else if (in_sha.len > 40) {
unreachable;
} else {
@memcpy(&sha, in_sha);
}
fn loadFile(self: Repo, a: Allocator, sha: SHA) !Object {
var fb = [_]u8{0} ** 2048;
var filename = try std.fmt.bufPrint(&fb, "./objects/{s}/{s}", .{ sha[0..2], sha[2..] });
return self.dir.openFile(filename, .{}) catch {
filename = try std.fmt.bufPrint(&fb, "./objects/{s}", .{sha});
return self.dir.openFile(filename, .{}) catch |err| switch (err) {
error.FileNotFound => {
std.debug.print("unable to find commit '{s}'\n", .{sha});
return err;
},
else => return err,
};
const grouped = try bufPrint(&fb, "./objects/{s}/{s}", .{ sha.hex[0..2], sha.hex[2..] });
const compressed: []u8 = self.dir.readFileAlloc(a, grouped, 0xffffff) catch |err| switch (err) {
error.FileNotFound => data: {
const exact = try bufPrint(&fb, "./objects/{s}", .{sha.hex[0..]});
break :data self.dir.readFileAlloc(a, exact, 0xffffff) catch |err2| switch (err2) {
error.FileNotFound => {
std.debug.print("unable to find commit '{s}'\n", .{sha.hex[0..]});
return error.ObjectMissing;
},
else => return err2,
};
},
else => return err,
};
defer a.free(compressed);
var fbs = std.io.fixedBufferStream(compressed);
const fbsr = fbs.reader();
var decom = zlib.decompressor(fbsr);
const decomr = decom.reader();
const data = try decomr.readAllAlloc(a, 0xffffff);
errdefer a.free(data);
if (indexOf(u8, data, "\x00")) |i| {
return .{
.memory = data,
.header = data[0..i],
.body = data[i + 1 ..],
.kind = if (startsWith(u8, data, "blob "))
.blob
else if (startsWith(u8, data, "tree "))
.tree
else if (startsWith(u8, data, "commit "))
.commit
else if (startsWith(u8, data, "tag "))
.tag
else
return error.InvalidObject,
};
} else return error.InvalidObject;
}
 
fn findBlobPack(self: Repo, a: Allocator, sha: SHA) !?[]u8 {
fn loadPacked(self: Repo, a: Allocator, sha: SHA) !?Object {
for (self.packs) |pack| {
if (pack.contains(sha)) |offset| {
return try pack.loadObj(a, offset, self);
return try pack.resolveObject(a, offset, &self);
}
}
return null;
}
 
fn findBlobPackPartial(self: Repo, a: Allocator, sha: SHA) !?[]u8 {
fn loadPackedPartial(self: Repo, a: Allocator, sha: SHA) !?Object {
std.debug.assert(sha.partial == true);
for (self.packs) |pack| {
if (try pack.containsPrefix(sha)) |offset| {
return try pack.loadObj(a, offset, self);
if (try pack.containsPrefix(sha.bin[0..])) |offset| {
return try pack.resolveObject(a, offset, &self);
}
}
return null;
}
 
fn findBlobFile(self: Repo, a: Allocator, sha: SHA) !?[]u8 {
if (self.loadFileObj(sha)) |fd| {
defer fd.close();
fn loadPartial(self: Repo, a: Allocator, sha: SHA) !Pack.PackedObject {
if (try self.loadPackedPartial(a, sha)) |pack| return pack;
return error.ObjectMissing;
}
 
var decom = zlib.decompressor(fd.reader());
var reader = decom.reader();
return try reader.readAllAlloc(a, 0xffff);
} else |_| {}
fn loadObjPartial(self: Repo, a: Allocator, sha: SHA) !?Object {
std.debug.assert(sha.partial);
 
if (try self.loadPackedPartial(a, sha)) |pack| return pack;
return null;
}
 
fn findBlobPartial(self: Repo, a: Allocator, sha: SHA) ![]u8 {
if (try self.findBlobPackPartial(a, sha)) |pack| return pack;
//if (try self.findBlobFile(a, sha)) |file| return file;
return error.ObjectMissing;
}
 
pub fn findBlob(self: Repo, a: Allocator, sha: SHA) ![]u8 {
std.debug.assert(sha.len == 20);
if (try self.findBlobPack(a, sha)) |pack| return pack;
if (try self.findBlobFile(a, sha)) |file| return file;
 
return error.ObjectMissing;
}
 
fn findObjPartial(self: Repo, a: Allocator, sha: SHA) !Object {
std.debug.assert(sha.len % 2 == 0);
std.debug.assert(sha.len <= 40);
 
var shabuffer: [20]u8 = undefined;
 
for (shabuffer[0 .. sha.len / 2], 0..sha.len / 2) |*s, i| {
s.* = try std.fmt.parseInt(u8, sha[i * 2 ..][0..2], 16);
pub fn loadObjectOrDelta(self: Repo, a: Allocator, sha: SHA) !union(enum) {
pack: Pack.PackedObject,
file: Object,
} {
for (self.packs) |pack| {
if (pack.contains(sha)) |offset| {
return .{ .pack = try pack.loadData(a, offset, &self) };
}
}
const shabin = shabuffer[0 .. sha.len / 2];
if (try self.findBlobPackPartial(a, shabin)) |pack| return Object.init(pack);
//if (try self.findBlobFile(a, shabin)) |file| return Object.init(file);
return error.ObjectMissing;
return .{ .file = try self.loadFile(a, sha) };
}
 
/// TODO binary search lol
pub fn findObj(self: Repo, a: Allocator, in_sha: SHA) !Object {
var shabin: [20]u8 = in_sha[0..20].*;
if (in_sha.len == 40) {
for (&shabin, 0..) |*s, i| {
s.* = try std.fmt.parseInt(u8, in_sha[i * 2 .. (i + 1) * 2], 16);
}
}
 
if (try self.findBlobPack(a, &shabin)) |pack| return Object.init(pack);
if (try self.findBlobFile(a, &shabin)) |file| return Object.init(file);
return error.ObjectMissing;
pub fn loadObject(self: Repo, a: Allocator, sha: SHA) !Object {
if (try self.loadPacked(a, sha)) |pack| return pack;
return try self.loadFile(a, sha);
}
 
pub fn loadPacks(self: *Repo, a: Allocator) !void {
pub fn loadBlob(self: Repo, a: Allocator, sha: SHA) !Blob {
const obj = try self.loadObject(a, sha);
switch (obj.kind) {
.blob => {
return Blob{
.memory = obj.memory,
.sha = sha,
.mode = undefined,
.name = undefined,
.data = obj.body,
};
},
.tree, .commit, .tag => unreachable,
}
}
pub fn loadTree(self: Repo, a: Allocator, sha: SHA) !Tree {
const obj = try self.loadObject(a, sha);
switch (obj.kind) {
.tree => return try Tree.initOwned(sha, a, obj),
.blob, .commit, .tag => unreachable,
}
}
pub fn loadCommit(self: Repo, a: Allocator, sha: SHA) !Commit {
const obj = try self.loadObject(a, sha);
switch (obj.kind) {
.blob, .tree, .tag => unreachable,
.commit => return try Commit.initOwned(sha, a, obj),
}
}
 
fn loadTag(self: *Repo, a: Allocator, sha: SHA) !Tag {
const obj = try self.loadObject(a, sha);
switch (obj.kind) {
.blob, .tree, .commit => unreachable,
.tag => return try Tag.fromSlice(sha, obj.body),
}
}
 
pub fn loadPacks(self: *Repo) !void {
const a = self.alloc orelse unreachable;
var dir = try self.dir.openDir("./objects/pack", .{ .iterate = true });
defer dir.close();
var itr = dir.iterate();
@@ -279,14 +299,13 @@ pub const Repo = struct {
while (try itr.next()) |file| {
if (!std.mem.eql(u8, file.name[file.name.len - 4 ..], ".idx")) continue;
 
self.packs[i] = try Pack.init(dir, try a.dupe(u8, file.name[0 .. file.name.len - 4]));
self.packs[i] = try Pack.init(dir, file.name[0 .. file.name.len - 4]);
i += 1;
}
}
 
pub fn deref() Object {}
 
pub fn loadRefs(self: *Repo, a: Allocator) !void {
pub fn loadRefs(self: *Repo) !void {
const a = self.alloc orelse unreachable;
var list = std.ArrayList(Ref).init(a);
var idir = try self.dir.openDir("refs/heads", .{ .iterate = true });
defer idir.close();
@@ -303,7 +322,7 @@ pub const Repo = struct {
std.debug.assert(read == 40);
try list.append(Ref{ .branch = .{
.name = try a.dupe(u8, file.name),
.sha = try a.dupe(u8, &buf),
.sha = SHA.init(&buf),
.repo = self,
} });
}
@@ -317,7 +336,7 @@ pub const Repo = struct {
if (std.mem.indexOf(u8, line, "refs/heads")) |_| {
try list.append(Ref{ .branch = .{
.name = try a.dupe(u8, line[52..]),
.sha = try a.dupe(u8, line[0..40]),
.sha = SHA.init(line[0..40]),
.repo = self,
} });
}
@@ -365,14 +384,14 @@ pub const Repo = struct {
if (std.mem.eql(u8, head[0..5], "ref: ")) {
self.head = Ref{
.branch = Branch{
.sha = self.ref(head[16 .. head.len - 1]) catch &[_]u8{0} ** 20,
.sha = self.ref(head[16 .. head.len - 1]) catch SHA.init(&[_]u8{0} ** 20),
.name = try a.dupe(u8, head[5 .. head.len - 1]),
.repo = self,
},
};
} else if (head.len == 41 and head[40] == '\n') {
self.head = Ref{
.sha = try a.dupe(u8, head[0..40]), // We don't want that \n char
.sha = SHA.init(head[0..40]), // We don't want that \n char
};
} else {
std.debug.print("unexpected HEAD {s}\n", .{head});
@@ -381,18 +400,8 @@ pub const Repo = struct {
return self.head.?;
}
 
fn loadTag(self: *Repo, a: Allocator, lsha: SHA) !Tag {
var sha: [20]u8 = lsha[0..20].*;
if (lsha.len == 40) {
for (&sha, 0..) |*s, i| {
s.* = try std.fmt.parseInt(u8, lsha[i * 2 .. (i + 1) * 2], 16);
}
} else if (lsha.len != 20) return error.InvalidSha;
const tag_blob = try self.findBlob(a, sha[0..]);
return try Tag.fromSlice(lsha, tag_blob);
}
 
pub fn loadTags(self: *Repo, a: Allocator) !void {
fn loadTags(self: *Repo) !void {
const a = self.alloc orelse unreachable;
var tagdir = try self.dir.openDir("refs/tags", .{ .iterate = true });
const pk_refs: ?[]const u8 = self.dir.readFileAlloc(a, "packed-refs", 0xffff) catch |err| switch (err) {
error.FileNotFound => null,
@@ -420,7 +429,7 @@ pub const Repo = struct {
var lines = splitScalar(u8, pkrefs, '\n');
while (lines.next()) |line| {
if (indexOf(u8, line, "refs/tags/") != null) {
self.tags.?[index] = try self.loadTag(a, line[0..40]);
self.tags.?[index] = try self.loadTag(a, SHA.init(line[0..40]));
index += 1;
}
}
@@ -440,7 +449,7 @@ pub const Repo = struct {
std.debug.print("unexpected tag format for {s}\n", .{fname});
return error.InvalidTagFound;
}
self.tags.?[index] = try self.loadTag(a, contents[0..40]);
self.tags.?[index] = try self.loadTag(a, SHA.init(contents[0..40]));
index += 1;
}
if (index != self.tags.?.len) return error.UnexpectedError;
@@ -450,20 +459,17 @@ pub const Repo = struct {
return error.NotImplemented;
}
 
pub fn commit(self: *const Repo, a: Allocator, request: SHA) !Commit {
const target = request;
var obj = if (request.len == 40)
try self.findObj(a, target)
pub fn commit(self: *const Repo, a: Allocator, sha: SHA) !Commit {
const obj = if (sha.partial)
try self.loadObjPartial(a, sha)
else
try self.findObjPartial(a, target);
defer obj.raze(a);
var cmt = try Commit.fromReader(a, target, obj.reader());
cmt.repo = self;
return cmt;
try self.loadObject(a, sha);
if (obj == null) return error.CommitMissing;
return try Commit.initOwned(sha, a, obj.?);
}
 
pub fn headCommit(self: *const Repo, a: Allocator) !Commit {
const resolv = switch (self.head.?) {
const resolv: SHA = switch (self.head.?) {
.sha => |s| s,
.branch => |b| try self.ref(b.name["refs/heads/".len..]),
.tag => return error.CommitMissing,
@@ -472,13 +478,8 @@ pub const Repo = struct {
return try self.commit(a, resolv);
}
 
pub fn blob(self: Repo, a: Allocator, sha: SHA) !Object {
var obj = try self.findObj(a, sha);
 
if (std.mem.indexOf(u8, obj.ctx.buffer, "\x00")) |i| {
return Object.init(obj.ctx.buffer[i + 1 ..]);
}
return obj;
pub fn blob(self: Repo, a: Allocator, sha: SHA) !Blob {
return try self.loadBlob(a, sha);
}
 
pub fn description(self: Repo, a: Allocator) ![]u8 {
@@ -489,27 +490,27 @@ pub const Repo = struct {
return error.NoDescription;
}
 
pub fn raze(self: *Repo, a: Allocator) void {
self.dir.close();
for (self.packs) |pack| {
pack.raze(a);
}
a.free(self.packs);
for (self.refs) |r| switch (r) {
.branch => |b| {
a.free(b.name);
a.free(b.sha);
},
else => unreachable,
};
a.free(self.refs);
 
if (self.current) |c| a.free(c);
if (self.head) |h| switch (h) {
.branch => |b| a.free(b.name),
else => {}, //a.free(h);
};
pub fn raze(self: *Repo) void {
if (self.alloc) |a| {
self.dir.close();
for (self.packs) |pack| {
pack.raze();
}
a.free(self.packs);
for (self.refs) |r| switch (r) {
.branch => |b| {
a.free(b.name);
},
else => unreachable,
};
a.free(self.refs);
 
if (self.current) |c| a.free(c);
if (self.head) |h| switch (h) {
.branch => |b| a.free(b.name),
else => {}, //a.free(h);
};
} else unreachable;
// TODO self.tags leaks, badly
}
 
@@ -547,11 +548,8 @@ pub const Branch = struct {
 
pub fn toCommit(self: Branch, a: Allocator) !Commit {
const repo = self.repo orelse return error.NoConnectedRepo;
var obj = try repo.findObj(a, self.sha);
defer obj.raze(a);
var cmt = try Commit.fromReader(a, self.sha, obj.reader());
cmt.repo = repo;
return cmt;
const obj = try repo.loadObject(a, self.sha);
return Commit.initOwned(self.sha, a, obj);
}
};
 
@@ -602,7 +600,7 @@ pub const Tag = struct {
return .{
.name = "[lightweight tag]",
.sha = sha,
.object = sha,
.object = sha.hex[0..],
.type = .lightweight,
.tagger = actor orelse unreachable,
.message = "",
@@ -649,13 +647,6 @@ pub const Tag = struct {
};
}
 
/// LOL, don't use this
fn fromReader(sha: SHA, reader: AnyReader) !Tag {
var buffer: [0xFFFF]u8 = undefined;
const len = try reader.readAll(&buffer);
return try fromSlice(sha, buffer[0..len]);
}
 
test fromSlice {
const blob =
\\object 73751d1c0e9eaeaafbf38a938afd652d98ee9772
@@ -683,7 +674,7 @@ pub const Tag = struct {
\\
;
const t_msg = "Yet another bugfix release for 0.7.0, especially for Samsung phones.\n";
const t = try fromSlice("c66fba80f3351a94432a662b1ecc55a21898f830", blob);
const t = try fromSlice(SHA.init("c66fba80f3351a94432a662b1ecc55a21898f830"), blob);
try std.testing.expectEqualStrings("v0.7.3", t.name);
try std.testing.expectEqualStrings("73751d1c0e9eaeaafbf38a938afd652d98ee9772", t.object);
try std.testing.expectEqual(TagType.commit, t.type);
@@ -737,44 +728,43 @@ pub fn commitishRepo(rev: []const u8, repo: Repo) bool {
}
 
pub const ChangeSet = struct {
alloc: Allocator,
name: []const u8,
sha: []const u8,
sha: SHA,
// Index into commit slice
commit_title: []const u8,
commit: []const u8,
timestamp: i64,
 
pub fn init(a: Allocator, name: []const u8, sha: []const u8, msg: []const u8, ts: i64) !ChangeSet {
pub fn init(a: Allocator, name: []const u8, sha: SHA, msg: []const u8, ts: i64) !ChangeSet {
const commit = try a.dupe(u8, msg);
return ChangeSet{
.alloc = a,
.name = try a.dupe(u8, name),
.sha = try a.dupe(u8, sha),
.sha = sha,
.commit = commit,
.commit_title = if (std.mem.indexOf(u8, commit, "\n\n")) |i| commit[0..i] else commit,
.timestamp = ts,
};
}
 
pub fn raze(self: ChangeSet, a: Allocator) void {
a.free(self.name);
a.free(self.sha);
a.free(self.commit);
pub fn raze(self: ChangeSet) void {
self.alloc.free(self.name);
self.alloc.free(self.commit);
}
};
 
test "hex tranlations" {
var hexbuf: [40]u8 = undefined;
var binbuf: [20]u8 = undefined;
 
const one = "370303630b3fc631a0cb3942860fb6f77446e9c1";
shaToBin(one, &binbuf);
shaToHex(&binbuf, &hexbuf);
var binbuf: [20]u8 = SHA.toBin(one.*);
var hexbuf: [40]u8 = SHA.toHex(binbuf);
 
try std.testing.expectEqualStrings(&binbuf, "\x37\x03\x03\x63\x0b\x3f\xc6\x31\xa0\xcb\x39\x42\x86\x0f\xb6\xf7\x74\x46\xe9\xc1");
try std.testing.expectEqualStrings(&hexbuf, one);
 
const two = "0000000000000000000000000000000000000000";
shaToBin(two, &binbuf);
shaToHex(&binbuf, &hexbuf);
binbuf = SHA.toBin(two.*);
hexbuf = SHA.toHex(binbuf);
 
try std.testing.expectEqualStrings(&binbuf, "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00");
try std.testing.expectEqualStrings(&hexbuf, two);
@@ -788,10 +778,10 @@ test "read" {
var d = zlib.decompressor(file.reader());
const count = try d.read(&b);
//std.debug.print("{s}\n", .{b[0..count]});
const commit = try Commit.make("370303630b3fc631a0cb3942860fb6f77446e9c1", b[0..count], null);
const commit = try Commit.init(SHA.init("370303630b3fc631a0cb3942860fb6f77446e9c1"), b[11 .. count - 11]);
//std.debug.print("{}\n", .{commit});
try std.testing.expectEqualStrings("fcb6817b0efc397f1525ff7ee375e08703ed17a9", commit.tree);
try std.testing.expectEqualStrings("370303630b3fc631a0cb3942860fb6f77446e9c1", commit.sha);
try std.testing.expectEqualStrings("fcb6817b0efc397f1525ff7ee375e08703ed17a9", commit.tree.hex[0..]);
try std.testing.expectEqualStrings("370303630b3fc631a0cb3942860fb6f77446e9c1", commit.sha.hex[0..]);
}
 
test "file" {
@@ -801,20 +791,20 @@ test "file" {
var file = try cwd.openFile("./.git/objects/37/0303630b3fc631a0cb3942860fb6f77446e9c1", .{});
var d = zlib.decompressor(file.reader());
const dz = try d.reader().readAllAlloc(a, 0xffff);
var buffer = Object.init(dz);
defer buffer.raze(a);
const commit = try Commit.fromReader(a, "370303630b3fc631a0cb3942860fb6f77446e9c1", buffer.reader());
defer commit.raze();
defer a.free(dz);
const blob = dz[(indexOf(u8, dz, "\x00") orelse unreachable) + 1 ..];
var commit = try Commit.init(SHA.init("370303630b3fc631a0cb3942860fb6f77446e9c1"), blob);
//defer commit.raze();
//std.debug.print("{}\n", .{commit});
try std.testing.expectEqualStrings("fcb6817b0efc397f1525ff7ee375e08703ed17a9", commit.tree);
try std.testing.expectEqualStrings("370303630b3fc631a0cb3942860fb6f77446e9c1", commit.sha);
try std.testing.expectEqualStrings("fcb6817b0efc397f1525ff7ee375e08703ed17a9", commit.tree.hex[0..]);
try std.testing.expectEqualStrings("370303630b3fc631a0cb3942860fb6f77446e9c1", commit.sha.hex[0..]);
}
 
test "not gpg" {
const null_sha = "0000000000000000000000000000000000000000";
const null_sha = SHA.init("0000000000000000000000000000000000000000");
const blob_invalid_0 =
\\tree 0000bb21f5276fd4f3611a890d12312312415434
\\parent ffffff8bd96b1abaceaa3298374abo82f4239948
\\parent ffffff8bd96b1abaceaa3298374ab082f4239948
\\author Some Dude <some@email.com> 1687200000 -0700
\\committer Some Dude <some@email.com> 1687200000 -0700
\\gpgsig -----BEGIN SSH SIGNATURE-----
@@ -826,8 +816,8 @@ test "not gpg" {
\\
\\commit message
;
const commit = try Commit.make(null_sha, blob_invalid_0, null);
try std.testing.expect(commit.sha.ptr == null_sha.ptr);
const commit = try Commit.init(null_sha, blob_invalid_0);
try std.testing.expect(eql(u8, commit.sha.bin[0..], null_sha.bin[0..]));
}
 
test "toParent" {
@@ -835,7 +825,7 @@ test "toParent" {
 
const cwd = try std.fs.cwd().openDir(".", .{});
var repo = try Repo.init(cwd);
defer repo.raze(a);
defer repo.raze();
try repo.loadData(a);
var commit = try repo.headCommit(a);
 
@@ -843,8 +833,9 @@ test "toParent" {
while (true) {
count += 1;
if (commit.parent[0]) |_| {
const parent = try commit.toParent(a, 0);
const parent = try commit.toParent(a, 0, &repo);
commit.raze();
 
commit = parent;
} else break;
}
@@ -852,21 +843,6 @@ test "toParent" {
try std.testing.expect(count >= 31); // LOL SORRY!
}
 
test "tree" {
var a = std.testing.allocator;
 
var cwd = std.fs.cwd();
var file = try cwd.openFile("./.git/objects/37/0303630b3fc631a0cb3942860fb6f77446e9c1", .{});
var _zlib = zlib.decompressor(file.reader());
var reader = _zlib.reader();
const data = try reader.readAllAlloc(a, 0xffff);
defer a.free(data);
var buffer = Object.init(data);
const commit = try Commit.fromReader(a, "370303630b3fc631a0cb3942860fb6f77446e9c1", buffer.reader());
defer commit.raze();
//std.debug.print("tree {s}\n", .{commit.sha});
}
 
test "tree decom" {
var a = std.testing.allocator;
 
@@ -877,10 +853,13 @@ test "tree decom" {
var d = zlib.decompressor(file.reader());
const count = try d.read(&b);
const buf = try a.dupe(u8, b[0..count]);
var tree = try Tree.make(a, "5edabf724389ef87fa5a5ddb2ebe6dbd888885ae", buf);
defer tree.raze(a);
for (tree.objects) |obj| {
if (false) std.debug.print("{s} {s} {s}\n", .{ obj.mode, obj.hash, obj.name });
defer a.free(buf);
const blob = buf[(indexOf(u8, buf, "\x00") orelse unreachable) + 1 ..];
//std.debug.print("{s}\n", .{buf});
const tree = try Tree.init(SHA.init("5edabf724389ef87fa5a5ddb2ebe6dbd888885ae"), a, blob);
defer tree.raze();
for (tree.blobs) |tobj| {
if (false) std.debug.print("{s} {s} {s}\n", .{ tobj.mode, tobj.hash, tobj.name });
}
if (false) std.debug.print("{}\n", .{tree});
}
@@ -906,9 +885,9 @@ test "read pack" {
var cwd = std.fs.cwd();
const dir = try cwd.openDir("repos/hastur", .{});
var repo = try Repo.init(dir);
defer repo.raze(a);
defer repo.raze();
 
try repo.loadPacks(a);
try repo.loadData(a);
var lol: []u8 = "";
 
for (repo.packs, 0..) |pack, pi| {
@@ -922,11 +901,10 @@ test "read pack" {
}
}
}
var obj = try repo.findObj(a, lol);
defer obj.raze(a);
const commit = try Commit.fromReader(a, lol, obj.reader());
defer commit.raze();
if (false) std.debug.print("{}\n", .{commit});
const obj = try repo.loadObject(a, SHA.init(lol));
defer a.free(obj.memory);
try std.testing.expect(obj.kind == .commit);
if (false) std.debug.print("{}\n", .{obj});
}
 
test "pack contains" {
@@ -934,30 +912,26 @@ test "pack contains" {
var cwd = std.fs.cwd();
const dir = try cwd.openDir("repos/srctree", .{});
var repo = try Repo.init(dir);
try repo.loadPacks(a);
defer repo.raze(a);
try repo.loadData(a);
defer repo.raze();
 
const sha = "7d4786ded56e1ee6cfe72c7986218e234961d03c";
var shabin: [20]u8 = undefined;
for (&shabin, 0..) |*s, i| {
s.* = try std.fmt.parseInt(u8, sha[i * 2 ..][0..2], 16);
}
const sha = SHA.init("7d4786ded56e1ee6cfe72c7986218e234961d03c");
 
var found: bool = false;
for (repo.packs) |pack| {
found = pack.contains(shabin[0..20]) != null;
found = pack.contains(sha) != null;
if (found) break;
}
try std.testing.expect(found);
 
found = false;
for (repo.packs) |pack| {
found = try pack.containsPrefix(shabin[0..10]) != null;
found = try pack.containsPrefix(sha.bin[0..10]) != null;
if (found) break;
}
try std.testing.expect(found);
 
const err = repo.packs[0].containsPrefix(shabin[0..1]);
const err = repo.packs[0].containsPrefix(sha.bin[0..1]);
try std.testing.expectError(error.AmbiguousRef, err);
 
//var long_obj = try repo.findObj(a, lol);
@@ -968,18 +942,16 @@ test "hopefully a delta" {
var cwd = std.fs.cwd();
const dir = try cwd.openDir("repos/hastur", .{});
var repo = try Repo.init(dir);
defer repo.raze(a);
 
try repo.loadData(a);
defer repo.raze();
 
var head = try repo.headCommit(a);
defer head.raze();
//std.debug.print("{}\n", .{head});
if (false) std.debug.print("{}\n", .{head});
 
var obj = try repo.findObj(a, head.tree);
defer obj.raze(a);
const tree = try Tree.fromReader(a, head.tree, obj.reader());
tree.raze(a);
const obj = try repo.loadPacked(a, head.tree);
const tree = try Tree.initOwned(head.tree, a, obj.?);
tree.raze();
if (false) std.debug.print("{}\n", .{tree});
}
 
@@ -987,14 +959,14 @@ test "commit to tree" {
const a = std.testing.allocator;
const cwd = try std.fs.cwd().openDir(".", .{});
var repo = try Repo.init(cwd);
defer repo.raze(a);
defer repo.raze();
 
try repo.loadData(a);
 
const cmt = try repo.headCommit(a);
defer cmt.raze();
const tree = try cmt.mkTree(a);
defer tree.raze(a);
const tree = try cmt.mkTree(a, &repo);
defer tree.raze();
if (false) std.debug.print("tree {}\n", .{tree});
if (false) for (tree.objects) |obj| std.debug.print(" {}\n", .{obj});
}
@@ -1004,21 +976,20 @@ test "blob to commit" {
 
const cwd = try std.fs.cwd().openDir(".", .{});
var repo = try Repo.init(cwd);
defer repo.raze(a);
 
try repo.loadData(a);
defer repo.raze();
 
const cmtt = try repo.headCommit(a);
defer cmtt.raze();
 
const tree = try cmtt.mkTree(a);
defer tree.raze(a);
const tree = try cmtt.mkTree(a, &repo);
defer tree.raze();
 
var timer = try std.time.Timer.start();
var lap = timer.lap();
const found = try tree.changedSet(a, &repo);
if (false) std.debug.print("found {any}\n", .{found});
for (found) |f| f.raze(a);
for (found) |f| f.raze();
a.free(found);
lap = timer.lap();
if (false) std.debug.print("timer {}\n", .{lap});
@@ -1029,26 +1000,26 @@ test "mk sub tree" {
 
const cwd = try std.fs.cwd().openDir(".", .{});
var repo = try Repo.init(cwd);
defer repo.raze(a);
defer repo.raze();
 
try repo.loadData(a);
 
const cmtt = try repo.headCommit(a);
defer cmtt.raze();
 
const tree = try cmtt.mkTree(a);
defer tree.raze(a);
var tree = try cmtt.mkTree(a, &repo);
defer tree.raze();
 
var blob: Blob = blb: for (tree.objects) |obj| {
var blob: Blob = blb: for (tree.blobs) |obj| {
if (std.mem.eql(u8, obj.name, "src")) break :blb obj;
} else return error.ExpectedBlobMissing;
var subtree = try blob.toTree(a, repo);
var subtree = try blob.toTree(a, &repo);
if (false) std.debug.print("{any}\n", .{subtree});
for (subtree.objects) |obj| {
for (subtree.blobs) |obj| {
if (false) std.debug.print("{any}\n", .{obj});
}
 
subtree.raze(a);
subtree.raze();
}
 
test "commit mk sub tree" {
@@ -1056,40 +1027,40 @@ test "commit mk sub tree" {
 
const cwd = try std.fs.cwd().openDir(".", .{});
var repo = try Repo.init(cwd);
defer repo.raze(a);
defer repo.raze();
 
try repo.loadData(a);
 
const cmtt = try repo.headCommit(a);
defer cmtt.raze();
 
const tree = try cmtt.mkTree(a);
defer tree.raze(a);
var tree = try cmtt.mkTree(a, &repo);
defer tree.raze();
 
var blob: Blob = blb: for (tree.objects) |obj| {
var blob: Blob = blb: for (tree.blobs) |obj| {
if (std.mem.eql(u8, obj.name, "src")) break :blb obj;
} else return error.ExpectedBlobMissing;
var subtree = try blob.toTree(a, repo);
var subtree = try blob.toTree(a, &repo);
if (false) std.debug.print("{any}\n", .{subtree});
for (subtree.objects) |obj| {
for (subtree.blobs) |obj| {
if (false) std.debug.print("{any}\n", .{obj});
}
defer subtree.raze(a);
defer subtree.raze();
 
const csubtree = try cmtt.mkSubTree(a, "src");
const csubtree = try cmtt.mkSubTree(a, "src", &repo);
if (false) std.debug.print("{any}\n", .{csubtree});
csubtree.raze(a);
csubtree.raze();
 
const csubtree2 = try cmtt.mkSubTree(a, "src/endpoints");
const csubtree2 = try cmtt.mkSubTree(a, "src/endpoints", &repo);
if (false) std.debug.print("{any}\n", .{csubtree2});
if (false) for (csubtree2.objects) |obj|
std.debug.print("{any}\n", .{obj});
defer csubtree2.raze(a);
defer csubtree2.raze();
 
const changed = try csubtree2.changedSet(a, &repo);
for (csubtree2.objects, changed) |o, c| {
for (csubtree2.blobs, changed) |o, c| {
if (false) std.debug.print("{s} {s}\n", .{ o.name, c.sha });
c.raze(a);
c.raze();
}
a.free(changed);
}
@@ -1102,7 +1073,7 @@ test "considering optimizing blob to commit" {
 
////var repo = try Repo.init(cwd);
//var timer = try std.time.Timer.start();
//defer repo.raze(a);
//defer repo.raze();
 
//try repo.loadPacks(a);
 
@@ -1209,21 +1180,21 @@ test "ref delta" {
const dir = cwd.openDir("repos/hastur", .{}) catch return error.skip;
 
var repo = try Repo.init(dir);
defer repo.raze(a);
defer repo.raze();
 
try repo.loadData(a);
 
const cmtt = try repo.headCommit(a);
defer cmtt.raze();
 
const tree = try cmtt.mkTree(a);
defer tree.raze(a);
const tree = try cmtt.mkTree(a, &repo);
defer tree.raze();
 
var timer = try std.time.Timer.start();
var lap = timer.lap();
const found = try tree.changedSet(a, &repo);
if (false) std.debug.print("found {any}\n", .{found});
for (found) |f| f.raze(a);
for (found) |f| f.raze();
a.free(found);
lap = timer.lap();
if (false) std.debug.print("timer {}\n", .{lap});
@@ -1252,7 +1223,7 @@ test "new repo" {
var new_repo = try Repo.createNew(a, tdir.dir, "new_repo");
_ = try tdir.dir.openDir("new_repo", .{});
try new_repo.loadData(a);
defer new_repo.raze(a);
defer new_repo.raze();
}
 
test "updated at" {
@@ -1260,7 +1231,7 @@ test "updated at" {
 
const cwd = try std.fs.cwd().openDir(".", .{});
var repo = try Repo.init(cwd);
defer repo.raze(a);
defer repo.raze();
 
try repo.loadData(a);
const oldest = try repo.updatedAt(a);
 
src/git/blob.zig added: 585, removed: 584, total 1
@@ -8,9 +8,11 @@ const Tree = @import("tree.zig");
 
pub const Blob = @This();
 
memory: ?[]u8 = null,
sha: Git.SHA,
mode: [6]u8,
name: []const u8,
hash: [40]u8,
data: ?[]u8 = null,
 
pub fn isFile(self: Blob) bool {
return self.mode[0] != 48;
@@ -23,14 +25,18 @@ pub fn toObject(self: Blob, a: Allocator, repo: Repo) !Object {
return error.NotImplemented;
}
 
pub fn toTree(self: Blob, a: Allocator, repo: Repo) !Tree {
pub fn toTree(self: Blob, a: Allocator, repo: *const Repo) !Tree {
if (self.isFile()) return error.NotATree;
const tree = try Tree.fromRepo(a, repo, &self.hash);
return tree;
const obj = try repo.loadObject(a, self.sha);
return try Tree.initOwned(self.sha, a, obj);
}
 
pub fn raze(self: Blob, a: Allocator) void {
if (self.memory) |mem| a.free(mem);
}
 
pub fn format(self: Blob, comptime _: []const u8, _: std.fmt.FormatOptions, out: anytype) !void {
try out.print("Blob{{ ", .{});
try if (self.isFile()) out.print("File", .{}) else out.print("Tree", .{});
try out.print(" {s} @ {s} }}", .{ self.name, self.hash });
try out.print(" {s} @ {s} }}", .{ self.name, self.sha });
}
 
src/git/commit.zig added: 585, removed: 584, total 1
@@ -1,5 +1,6 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const AnyReader = std.io.AnyReader;
 
const Git = @import("../git.zig");
const SHA = Git.SHA;
@@ -13,7 +14,7 @@ pub const Commit = @This();
pub const GPGSig = struct {};
 
alloc: ?Allocator = null,
blob: []const u8,
memory: ?[]const u8 = null,
sha: SHA,
tree: SHA,
/// 9 ought to be enough for anyone... or at least robinli ... at least for a while
@@ -25,7 +26,6 @@ committer: Actor,
message: []const u8,
title: []const u8,
body: []const u8,
repo: ?*const Repo = null,
gpgsig: ?GPGSig,
 
ptr_parent: ?*Commit = null, // TOOO multiple parents
@@ -33,17 +33,13 @@ ptr_parent: ?*Commit = null, // TOOO multiple parents
fn header(self: *Commit, data: []const u8) !void {
if (std.mem.indexOf(u8, data, " ")) |brk| {
const name = data[0..brk];
const payload = data[brk..];
if (std.mem.eql(u8, name, "commit")) {
if (std.mem.indexOf(u8, data, "\x00")) |nl| {
self.tree = payload[nl..][0..40];
} else unreachable;
} else if (std.mem.eql(u8, name, "tree")) {
self.tree = payload[1..41];
const payload = data[brk + 1 ..];
if (std.mem.eql(u8, name, "tree")) {
self.tree = SHA.init(payload[0..40]);
} else if (std.mem.eql(u8, name, "parent")) {
for (&self.parent) |*parr| {
if (parr.* == null) {
parr.* = payload[1..41];
parr.* = SHA.init(payload[0..40]);
return;
}
}
@@ -52,7 +48,7 @@ fn header(self: *Commit, data: []const u8) !void {
} else if (std.mem.eql(u8, name, "committer")) {
self.committer = try Actor.make(payload);
} else {
std.debug.print("unknown header: {s}\n", .{name});
std.debug.print("unknown header: {any}\n", .{name});
return error.UnknownHeader;
}
} else return error.MalformedHeader;
@@ -67,27 +63,12 @@ fn gpgSig(_: *Commit, itr: *std.mem.SplitIterator(u8, .sequence)) !void {
return error.InvalidGpgsig;
}
 
pub fn initAlloc(a: Allocator, sha_in: SHA, data: []const u8) !Commit {
const sha = try a.dupe(u8, sha_in);
const blob = try a.dupe(u8, data);
 
var self = try make(sha, blob, a);
self.alloc = a;
return self;
}
 
pub fn init(sha: SHA, data: []const u8) !Commit {
return make(sha, data, null);
}
 
pub fn make(sha: SHA, data: []const u8, a: ?Allocator) !Commit {
_ = a;
if (std.mem.startsWith(u8, data, "commit")) unreachable;
var lines = std.mem.split(u8, data, "\n");
var self: Commit = undefined;
self.repo = null;
// I don't like it either, but... lazy
self.parent = .{ null, null, null, null, null, null, null, null, null };
self.blob = data;
while (lines.next()) |line| {
if (std.mem.startsWith(u8, line, "gpgsig")) {
self.gpgSig(&lines) catch |e| {
@@ -119,52 +100,46 @@ pub fn make(sha: SHA, data: []const u8, a: ?Allocator) !Commit {
return self;
}
 
pub fn fromReader(a: Allocator, sha: SHA, reader: Git.Reader) !Commit {
var buffer: [0xFFFF]u8 = undefined;
const len = try reader.readAll(&buffer);
return try initAlloc(a, sha, buffer[0..len]);
pub fn initOwned(sha: SHA, a: Allocator, object: Git.Object) !Commit {
var commit = try init(sha, object.body);
commit.alloc = a;
commit.memory = object.memory;
return commit;
}
 
pub fn toParent(self: Commit, a: Allocator, idx: u8) !Commit {
pub fn toParent(self: Commit, a: Allocator, idx: u8, repo: *const Repo) !Commit {
if (idx >= self.parent.len) return error.NoParent;
if (self.parent[idx]) |parent| {
if (self.repo) |repo| {
var obj = try repo.findObj(a, parent);
defer obj.raze(a);
var cmt = try Commit.fromReader(a, parent, obj.reader());
cmt.repo = repo;
return cmt;
}
return error.DetachedCommit;
const tmp = try repo.loadObject(a, parent);
return try initOwned(parent, a, tmp);
}
return error.NoParent;
}
 
pub fn mkTree(self: Commit, a: Allocator) !Tree {
if (self.repo) |repo| {
return try Tree.fromRepo(a, repo.*, self.tree);
} else return error.DetachedCommit;
pub fn mkTree(self: Commit, a: Allocator, repo: *const Repo) !Tree {
const tmp = try repo.loadObject(a, self.tree);
return try Tree.initOwned(self.tree, a, tmp);
}
 
pub fn mkSubTree(self: Commit, a: Allocator, subpath: ?[]const u8) !Tree {
const path = subpath orelse return self.mkTree(a);
if (path.len == 0) return self.mkTree(a);
pub fn mkSubTree(self: Commit, a: Allocator, subpath: ?[]const u8, repo: *const Repo) !Tree {
const rootpath = subpath orelse return self.mkTree(a, repo);
if (rootpath.len == 0) return self.mkTree(a, repo);
 
var itr = std.mem.split(u8, path, "/");
var root = try self.mkTree(a);
root.path = try a.dupe(u8, path);
iter: while (itr.next()) |p| {
for (root.objects) |obj| {
if (std.mem.eql(u8, obj.name, p)) {
var itr = std.mem.split(u8, rootpath, "/");
var root = try self.mkTree(a, repo);
root.path = try a.dupe(u8, rootpath);
iter: while (itr.next()) |path| {
for (root.blobs) |obj| {
if (std.mem.eql(u8, obj.name, path)) {
if (itr.rest().len == 0) {
defer root.raze(a);
var out = try obj.toTree(a, self.repo.?.*);
out.path = try a.dupe(u8, path);
defer root.raze();
var out = try obj.toTree(a, repo);
out.path = try a.dupe(u8, rootpath);
return out;
} else {
const tree = try obj.toTree(a, self.repo.?.*);
const tree = try obj.toTree(a, repo);
defer root = tree;
root.raze(a);
root.raze();
continue :iter;
}
}
@@ -175,10 +150,7 @@ pub fn mkSubTree(self: Commit, a: Allocator, subpath: ?[]const u8) !Tree {
 
/// Warning; this function is probably unsafe
pub fn raze(self: Commit) void {
if (self.alloc) |a| {
a.free(self.sha);
a.free(self.blob);
}
if (self.alloc) |a| a.free(self.memory.?);
}
 
pub fn format(
@@ -192,10 +164,10 @@ pub fn format(
\\commit {s}
\\tree {s}
\\
, .{ self.sha, self.tree });
, .{ self.sha.hex[0..], self.tree.hex[0..] });
for (self.parent) |par| {
if (par) |p|
try out.print("parent {s}\n", .{p});
try out.print("parent {s}\n", .{p.hex[0..]});
}
try out.print(
\\author {}
 
src/git/pack.zig added: 585, removed: 584, total 1
@@ -3,18 +3,17 @@ const Allocator = std.mem.Allocator;
const zlib = std.compress.zlib;
const PROT = std.posix.PROT;
const MAP_TYPE = std.os.linux.MAP_TYPE;
const AnyReader = std.io.AnyReader;
const bufPrint = std.fmt.bufPrint;
 
const Git = @import("../git.zig");
const Error = Git.Error;
const FBSReader = Git.FBSReader;
const Repo = Git.Repo;
const shaToHex = Git.shaToHex;
 
const SHA = Git.SHA;
 
pub const Pack = @This();
 
name: SHA,
pack: []u8,
idx: []u8,
pack_fd: std.fs.File,
@@ -41,7 +40,7 @@ const IdxHeader = extern struct {
fanout: [256]u32,
};
 
const ObjType = enum(u3) {
const PackedObjectTypes = enum(u3) {
invalid = 0,
commit = 1,
tree = 2,
@@ -51,19 +50,22 @@ const ObjType = enum(u3) {
ref_delta = 7,
};
 
const ObjHeader = struct {
kind: ObjType,
size: usize,
pub const PackedObject = struct {
const Header = struct {
kind: PackedObjectTypes,
size: usize,
};
header: PackedObject.Header,
data: []u8,
};
 
/// assumes name ownership
pub fn init(dir: std.fs.Dir, name: []u8) !Pack {
pub fn init(dir: std.fs.Dir, name: []const u8) !Pack {
std.debug.assert(name.len <= 45);
var filename: [50]u8 = undefined;
const ifd = try dir.openFile(try std.fmt.bufPrint(&filename, "{s}.idx", .{name}), .{});
const pfd = try dir.openFile(try std.fmt.bufPrint(&filename, "{s}.pack", .{name}), .{});
const ifd = try dir.openFile(try bufPrint(&filename, "{s}.idx", .{name}), .{});
const pfd = try dir.openFile(try bufPrint(&filename, "{s}.pack", .{name}), .{});
var pack = Pack{
.name = name,
.pack = try mmap(pfd),
.idx = try mmap(ifd),
.pack_fd = pfd,
@@ -117,11 +119,11 @@ pub fn fanOutCount(self: Pack, i: u8) u32 {
}
 
pub fn contains(self: Pack, sha: SHA) ?u32 {
std.debug.assert(sha.len == 20);
return self.containsPrefix(sha) catch unreachable;
return self.containsPrefix(sha.bin[0..]) catch unreachable;
}
 
pub fn containsPrefix(self: Pack, sha: SHA) !?u32 {
pub fn containsPrefix(self: Pack, sha: []const u8) !?u32 {
std.debug.assert(sha.len <= 20);
const count: usize = self.fanOutCount(sha[0]);
if (count == 0) return null;
 
@@ -140,15 +142,15 @@ pub fn containsPrefix(self: Pack, sha: SHA) !?u32 {
return null;
}
 
pub fn getReaderOffset(self: Pack, offset: u32) !FBSReader {
pub fn getReaderOffset(self: Pack, offset: u32) !AnyReader {
if (offset > self.pack.len) return error.WTF;
return self.pack[offset];
}
 
fn parseObjHeader(reader: *FBSReader) Pack.ObjHeader {
fn parseObjHeader(reader: *AnyReader) PackedObject.Header {
var byte: usize = 0;
byte = reader.readByte() catch unreachable;
var h = Pack.ObjHeader{
var h = PackedObject.Header{
.size = byte & 0b1111,
.kind = @enumFromInt((byte & 0b01110000) >> 4),
};
@@ -163,25 +165,24 @@ fn parseObjHeader(reader: *FBSReader) Pack.ObjHeader {
return h;
}
 
fn loadBlob(a: Allocator, reader: *FBSReader) ![]u8 {
var _zlib = zlib.decompressor(reader.*);
var zr = _zlib.reader();
return try zr.readAllAlloc(a, 0xffffff);
fn loadBlob(a: Allocator, reader: *AnyReader) ![]u8 {
var zlib_ = zlib.decompressor(reader.*);
return try zlib_.reader().readAllAlloc(a, 0xffffff);
}
 
fn readVarInt(reader: *FBSReader) !usize {
var byte: usize = try reader.readByte();
fn readVarInt(reader: *AnyReader) error{ReadError}!usize {
var byte: usize = reader.readByte() catch return error.ReadError;
var base: usize = byte & 0x7F;
while (byte >= 0x80) {
base += 1;
byte = try reader.readByte();
byte = reader.readByte() catch return error.ReadError;
base = (base << 7) + (byte & 0x7F);
}
//std.debug.print("varint = {}\n", .{base});
return base;
}
 
fn deltaInst(reader: *FBSReader, writer: anytype, base: []u8) !usize {
fn deltaInst(reader: *AnyReader, writer: anytype, base: []u8) !usize {
const readb: usize = try reader.readByte();
if (readb == 0) {
std.debug.print("INVALID INSTRUCTION 0x00\n", .{});
@@ -219,81 +220,131 @@ fn deltaInst(reader: *FBSReader, writer: anytype, base: []u8) !usize {
}
}
 
fn loadRefDelta(_: Pack, a: Allocator, reader: *FBSReader, _: usize, repo: Repo) ![]u8 {
fn loadRefDelta(_: Pack, a: Allocator, reader: *AnyReader, _: usize, repo: *const Repo) Error!PackedObject {
var buf: [20]u8 = undefined;
var hexy: [40]u8 = undefined;
 
_ = try reader.read(&buf);
shaToHex(&buf, &hexy);
const basez = repo.findBlob(a, &buf) catch return error.BlobMissing;
defer a.free(basez);
if (reader.read(&buf)) |count| {
if (count != 20) return error.PackCorrupt;
} else |_| return error.ReadError;
const sha = SHA.init(buf[0..]);
// I hate it too... but I need a break
var basefree: []u8 = undefined;
var basedata: []u8 = undefined;
var basetype: PackedObjectTypes = undefined;
switch (repo.loadObjectOrDelta(a, sha) catch return error.BlobMissing) {
.pack => |pk| {
basefree = pk.data;
basedata = pk.data;
basetype = pk.header.kind;
},
.file => |fdata| {
basefree = fdata.memory;
basedata = fdata.body;
basetype = switch (fdata.kind) {
.blob => .blob,
.tree => .tree,
.commit => .commit,
.tag => .tag,
};
},
}
defer a.free(basefree);
 
var _zlib = zlib.decompressor(reader.*);
var zr = _zlib.reader();
const inst = zr.readAllAlloc(a, 0xffffff) catch return error.PackCorrupt;
var zlib_ = zlib.decompressor(reader.*);
const inst = zlib_.reader().readAllAlloc(a, 0xffffff) catch return error.PackCorrupt;
defer a.free(inst);
var inst_fbs = std.io.fixedBufferStream(inst);
var inst_reader = inst_fbs.reader();
var inst_reader = inst_fbs.reader().any();
// We don't actually need these when zlib works :)
_ = try readVarInt(&inst_reader);
_ = try readVarInt(&inst_reader);
var buffer = std.ArrayList(u8).init(a);
while (true) {
_ = deltaInst(&inst_reader, buffer.writer(), basez) catch {
_ = deltaInst(&inst_reader, buffer.writer(), basedata) catch {
break;
};
}
return try buffer.toOwnedSlice();
return .{
.header = .{
.size = 0,
.kind = basetype,
},
.data = try buffer.toOwnedSlice(),
};
}
 
fn loadDelta(self: Pack, a: Allocator, reader: *FBSReader, offset: usize, repo: Repo) ![]u8 {
fn loadDelta(self: Pack, a: Allocator, reader: *AnyReader, offset: usize, repo: *const Repo) Error!PackedObject {
// fd pos is offset + 2-ish because of the header read
const srclen = try readVarInt(reader);
 
var _zlib = zlib.decompressor(reader.*);
var zr = _zlib.reader();
const inst = zr.readAllAlloc(a, 0xffffff) catch return error.PackCorrupt;
var zlib_ = zlib.decompressor(reader.*);
const inst = zlib_.reader().readAllAlloc(a, 0xffffff) catch return error.PackCorrupt;
defer a.free(inst);
var inst_fbs = std.io.fixedBufferStream(inst);
var inst_reader = inst_fbs.reader();
var inst_reader = inst_fbs.reader().any();
// We don't actually need these when zlib works :)
_ = try readVarInt(&inst_reader);
_ = try readVarInt(&inst_reader);
 
const baseobj_offset = offset - srclen;
const basez = try self.loadObj(a, baseobj_offset, repo);
defer a.free(basez);
const baseobj = try self.loadData(a, baseobj_offset, repo);
defer a.free(baseobj.data);
 
var buffer = std.ArrayList(u8).init(a);
while (true) {
_ = deltaInst(&inst_reader, buffer.writer(), basez) catch {
_ = deltaInst(&inst_reader, buffer.writer(), baseobj.data) catch {
break;
};
}
return try buffer.toOwnedSlice();
return .{
.header = baseobj.header,
.data = try buffer.toOwnedSlice(),
};
}
 
pub fn loadObj(self: Pack, a: Allocator, offset: usize, repo: Repo) Error![]u8 {
pub fn loadData(self: Pack, a: Allocator, offset: usize, repo: *const Repo) Error!PackedObject {
var fbs = std.io.fixedBufferStream(self.pack[offset..]);
var reader = fbs.reader();
var reader = fbs.reader().any();
const h = parseObjHeader(&reader);
 
switch (h.kind) {
.commit, .tree, .blob, .tag => return loadBlob(a, &reader) catch return error.PackCorrupt,
.ofs_delta => return try self.loadDelta(a, &reader, offset, repo),
.ref_delta => return try self.loadRefDelta(a, &reader, offset, repo),
.invalid => {
std.debug.print("obj type ({}) not implemened\n", .{h.kind});
unreachable; // not implemented
return .{
.header = h,
.data = switch (h.kind) {
.commit, .tree, .blob, .tag => loadBlob(a, &reader) catch return error.PackCorrupt,
.ofs_delta => return try self.loadDelta(a, &reader, offset, repo),
.ref_delta => return try self.loadRefDelta(a, &reader, offset, repo),
.invalid => {
std.debug.print("obj type ({}) not implemened\n", .{h.kind});
@panic("not implemented");
},
},
}
unreachable;
};
}
 
pub fn raze(self: Pack, a: Allocator) void {
pub fn resolveObject(self: Pack, a: Allocator, offset: usize, repo: *const Repo) Error!Git.Object {
const resolved = try self.loadData(a, offset, repo);
errdefer a.free(resolved.data);
 
return switch (resolved.header.kind) {
.blob, .tree, .commit, .tag => |kind| .{
.kind = switch (kind) {
.blob => .blob,
.tree => .tree,
.commit => .commit,
.tag => .tag,
else => unreachable,
},
.memory = resolved.data,
.header = resolved.data[0..0],
.body = resolved.data,
},
else => return error.IncompleteObject,
};
}
 
pub fn raze(self: Pack) void {
self.pack_fd.close();
self.idx_fd.close();
munmap(@alignCast(self.pack));
munmap(@alignCast(self.idx));
a.free(self.name);
}
 
src/git/tree.zig added: 585, removed: 584, total 1
@@ -1,6 +1,7 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const hexLower = std.fmt.fmtSliceHexLower;
const bufPrint = std.fmt.bufPrint;
 
const Git = @import("../git.zig");
const SHA = Git.SHA;
@@ -9,10 +10,12 @@ const Blob = @import("blob.zig");
 
const Tree = @This();
 
sha: []const u8,
alloc: Allocator,
memory: ?[]u8 = null,
sha: SHA,
path: ?[]const u8 = null,
blob: []const u8,
objects: []Blob,
blobs: []Blob,
 
pub fn pushPath(self: *Tree, a: Allocator, path: []const u8) !void {
const spath = self.path orelse {
@@ -24,18 +27,12 @@ pub fn pushPath(self: *Tree, a: Allocator, path: []const u8) !void {
a.free(spath);
}
 
pub fn fromRepo(a: Allocator, r: Repo, sha: SHA) !Tree {
var blob = try r.findObj(a, sha);
defer blob.raze(a);
const b = try blob.reader().readAllAlloc(a, 0xffff);
return try Tree.make(a, sha, b);
}
 
pub fn make(a: Allocator, sha: SHA, blob: []const u8) !Tree {
pub fn init(sha: SHA, a: Allocator, blob: []const u8) !Tree {
var self: Tree = .{
.sha = try a.dupe(u8, sha),
.alloc = a,
.sha = sha,
.blob = blob,
.objects = try a.alloc(Blob, std.mem.count(u8, blob, "\x00")),
.blobs = try a.alloc(Blob, std.mem.count(u8, blob, "\x00")),
};
 
var i: usize = 0;
@@ -49,52 +46,54 @@ pub fn make(a: Allocator, sha: SHA, blob: []const u8) !Tree {
}
var obj_i: usize = 0;
while (std.mem.indexOfScalarPos(u8, blob, i, 0)) |index| {
var obj = &self.objects[obj_i];
var obj = &self.blobs[obj_i];
 
obj_i += 1;
if (blob[i] == '1') {
_ = try std.fmt.bufPrint(&obj.mode, "{s}", .{blob[i .. i + 6]});
_ = try std.fmt.bufPrint(&obj.hash, "{}", .{hexLower(blob[index + 1 .. index + 21])});
_ = try bufPrint(&obj.mode, "{s}", .{blob[i .. i + 6]});
obj.sha = SHA.init(blob[index + 1 .. index + 21]);
obj.name = blob[i + 7 .. index];
} else if (blob[i] == '4') {
_ = try std.fmt.bufPrint(&obj.mode, "0{s}", .{blob[i .. i + 5]});
_ = try std.fmt.bufPrint(&obj.hash, "{}", .{hexLower(blob[index + 1 .. index + 21])});
_ = try bufPrint(&obj.mode, "0{s}", .{blob[i .. i + 5]});
obj.sha = SHA.init(blob[index + 1 .. index + 21]);
obj.name = blob[i + 6 .. index];
} else std.debug.print("panic {s} ", .{blob[i..index]});
 
i = index + 21;
}
if (a.resize(self.objects, obj_i)) {
self.objects.len = obj_i;
if (a.resize(self.blobs, obj_i)) {
self.blobs.len = obj_i;
}
return self;
}
 
pub fn fromReader(a: Allocator, sha: SHA, reader: Git.Reader) !Tree {
const buf = try reader.readAllAlloc(a, 0xffff);
return try Tree.make(a, sha, buf);
pub fn initOwned(sha: SHA, a: Allocator, obj: Git.Object) !Tree {
var tree = try init(sha, a, obj.body);
tree.memory = obj.memory;
return tree;
}
 
pub fn changedSet(self: Tree, a: Allocator, repo: *Repo) ![]Git.ChangeSet {
pub fn changedSet(self: Tree, a: Allocator, repo: *const Repo) ![]Git.ChangeSet {
const cmtt = try repo.headCommit(a);
defer cmtt.raze();
const search_list: []?Blob = try a.alloc(?Blob, self.objects.len);
for (self.objects, search_list) |src, *dst| {
const search_list: []?Blob = try a.alloc(?Blob, self.blobs.len);
for (self.blobs, search_list) |src, *dst| {
dst.* = src;
}
defer a.free(search_list);
 
var par = try repo.headCommit(a);
var ptree = try par.mkSubTree(a, self.path);
var ptree = try par.mkSubTree(a, self.path, repo);
 
var changed = try a.alloc(Git.ChangeSet, self.objects.len);
var changed = try a.alloc(Git.ChangeSet, self.blobs.len);
var old = par;
var oldtree = ptree;
var found: usize = 0;
while (found < search_list.len) {
old = par;
oldtree = ptree;
par = par.toParent(a, 0) catch |err| switch (err) {
error.NoParent => {
par = par.toParent(a, 0, repo) catch |err| switch (err) {
error.NoParent, error.IncompleteObject => {
for (search_list, 0..) |search_ish, i| {
if (search_ish) |search| {
found += 1;
@@ -108,13 +107,13 @@ pub fn changedSet(self: Tree, a: Allocator, repo: *Repo) ![]Git.ChangeSet {
}
}
old.raze();
oldtree.raze(a);
oldtree.raze();
break;
},
else => |e| return e,
};
ptree = par.mkSubTree(a, self.path) catch |err| switch (err) {
error.PathNotFound => {
ptree = par.mkSubTree(a, self.path, repo) catch |err| switch (err) {
error.PathNotFound, error.IncompleteObject => {
for (search_list, 0..) |search_ish, i| {
if (search_ish) |search| {
found += 1;
@@ -128,7 +127,7 @@ pub fn changedSet(self: Tree, a: Allocator, repo: *Repo) ![]Git.ChangeSet {
}
}
old.raze();
oldtree.raze(a);
oldtree.raze();
break;
},
else => |e| return e,
@@ -152,19 +151,18 @@ pub fn changedSet(self: Tree, a: Allocator, repo: *Repo) ![]Git.ChangeSet {
}
}
old.raze();
oldtree.raze(a);
oldtree.raze();
}
 
par.raze();
ptree.raze(a);
ptree.raze();
return changed;
}
 
pub fn raze(self: Tree, a: Allocator) void {
a.free(self.sha);
if (self.path) |p| a.free(p);
a.free(self.objects);
a.free(self.blob);
pub fn raze(self: Tree) void {
if (self.path) |p| self.alloc.free(p);
if (self.memory) |m| self.alloc.free(m);
self.alloc.free(self.blobs);
}
 
pub fn format(self: Tree, comptime _: []const u8, _: std.fmt.FormatOptions, out: anytype) !void {
 
src/repos.zig added: 585, removed: 584, total 1
@@ -125,7 +125,7 @@ pub fn updateThread(cfg: *AgentConfig) void {
const dirname = std.fmt.bufPrint(&name_buffer, "repos/{s}", .{rname}) catch return;
const dir = std.fs.cwd().openDir(dirname, .{}) catch continue;
var repo = Git.Repo.init(dir) catch continue;
defer repo.raze(a);
defer repo.raze();
repo.loadData(a) catch {
std.debug.print("Warning, unable to load data for repo {s}\n", .{rname});
};