srctree

Andrew Kelley parent 91d56e67 3c847752
implement DB saving and loading

README.md added: 164, removed: 34, total 130
@@ -56,8 +56,9 @@ This extension makes it easy to debug Zig WebAssembly code:
 
## Roadmap
 
* when scanning the file system, detect files that are in the database but have
been deleted from the file system
* playback UI element that shows waveform, duration, position, title
* DB saving and loading
* prev, next, pause, play, stop
* seeking
* apply loudness information to playback
 
client/main.zig added: 164, removed: 34, total 130
@@ -89,9 +89,16 @@ fn updateDb(
albums: []const Db.Album,
string_bytes: []const u8,
) !void {
try db.files.appendSlice(gpa, files);
try db.directories.appendSlice(gpa, directories);
try db.string_bytes.appendSlice(gpa, string_bytes);
 
try db.files.entries.resize(gpa, files.len);
@memcpy(db.files.entries.items(.key), files);
try db.files.reIndex(gpa);
 
try db.directories.entries.resize(gpa, directories.len);
@memcpy(db.directories.entries.items(.key), directories);
try db.directories.reIndex(gpa);
 
try db.albums.entries.resize(gpa, albums.len);
@memcpy(db.albums.entries.items(.key), albums);
try db.albums.reIndex(gpa);
 
player/root.zig added: 164, removed: 34, total 130
@@ -28,7 +28,7 @@ pub const InitError = error{
 
pub fn init() InitError!void {
switch (std.options.log_level) {
.debug => av.LOG.DEBUG.set_level(),
.debug => av.LOG.WARNING.set_level(),
else => av.LOG.QUIET.set_level(),
}
 
 
server/main.zig added: 164, removed: 34, total 130
@@ -9,6 +9,7 @@ const Db = @import("shared").Db;
const protocol = @import("shared").protocol;
const WebSocket = @import("WebSocket.zig");
const StaticHttpFileServer = @import("StaticHttpFileServer");
const Directory = std.Build.Cache.Directory;
 
const usage =
\\Usage: groovebasin [options]
@@ -44,7 +45,7 @@ pub fn main() anyerror!noreturn {
std.log.info("Output device: {s}", .{device.name});
 
var opt_config_zon_path: ?[]const u8 = null;
var opt_install_directory: ?std.Build.Cache.Directory = null;
var opt_install_directory: ?Directory = null;
 
{
var i: usize = 1;
@@ -70,7 +71,7 @@ pub fn main() anyerror!noreturn {
}
}
 
const install_directory: std.Build.Cache.Directory = opt_install_directory orelse b: {
const install_directory: Directory = opt_install_directory orelse b: {
const path_name = std.fs.selfExeDirPathAlloc(arena) catch |err|
fatal("unable to determine installation directory: {s}", .{@errorName(err)});
break :b .{
@@ -96,8 +97,7 @@ pub fn main() anyerror!noreturn {
var db = Db.empty;
defer db.deinit(gpa);
 
// Reserve string index 0 for an empty string.
assert((try db.getOrPutString(gpa, "")) == .empty);
try loadDb(gpa, &db, install_directory, config.db_path);
 
{
var it = config.music_directory.handle.iterate();
@@ -112,9 +112,9 @@ pub fn main() anyerror!noreturn {
);
}
 
//try saveDb(&db, config.db_path);
try saveDb(&db, install_directory, config.db_path);
 
std.log.debug("loaded {d} files", .{db.files.items.len});
std.log.debug("loaded {d} files", .{db.files.entries.len});
 
var static_http_file_server = s: {
const sub_path = "www";
@@ -243,14 +243,14 @@ pub const Server = struct {
}
 
fn websocketSendLoop(s: *Server, ws: *WebSocket) !void {
const files = std.mem.sliceAsBytes(s.db.files.items);
const directories = std.mem.sliceAsBytes(s.db.directories.items);
const files = std.mem.sliceAsBytes(s.db.files.keys());
const directories = std.mem.sliceAsBytes(s.db.directories.keys());
const albums = std.mem.sliceAsBytes(s.db.albums.keys());
const string_bytes = s.db.string_bytes.items;
const Header = protocol.Header;
const header: Header = .{
.files_len = @intCast(s.db.files.items.len),
.directories_len = @intCast(s.db.directories.items.len),
.files_len = @intCast(s.db.files.entries.len),
.directories_len = @intCast(s.db.directories.entries.len),
.albums_len = @intCast(s.db.albums.entries.len),
.string_bytes_len = @intCast(s.db.string_bytes.items.len),
};
@@ -287,7 +287,7 @@ pub const Server = struct {
try track_list.ensureUnusedCapacity(gpa, 1);
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
 
for (s.db.files.items, 0..) |*db_file, i| {
for (s.db.files.keys(), 0..) |*db_file, i| {
if (db_file.album == album_index) {
const fs_path = s.db.musicDirPath(db_file, &path_buf);
std.log.debug("opening '{s}'", .{fs_path});
@@ -304,7 +304,8 @@ pub const Server = struct {
pub fn lessThan(ctx: @This(), a: usize, b: usize) bool {
const a_index = @intFromEnum(ctx.tracks[a]);
const b_index = @intFromEnum(ctx.tracks[b]);
return ctx.db.files.items[a_index].track_number < ctx.db.files.items[b_index].track_number;
const files = ctx.db.files.keys();
return files[a_index].track_number < files[b_index].track_number;
}
};
track_list.sortUnstable(AlbumSortContext{
@@ -347,8 +348,22 @@ fn scanDir(db: *Db, gpa: Allocator, db_dir: Db.Path.Index, it: *std.fs.Dir.Itera
},
.file => {
const basename = try db.getOrPutString(gpa, entry.name);
const file_gop = try db.files.getOrPut(gpa, .{
.directory = db_dir,
.basename = basename,
.title = undefined,
.artist = undefined,
.album = undefined,
.track_number = undefined,
.composer = undefined,
.performer = undefined,
});
if (file_gop.found_existing) continue;
db.files.lockPointers();
defer db.files.unlockPointers();
 
const basename_z = db.stringToSlice(basename);
std.log.debug("opening '{s}'", .{basename_z});
std.log.debug("opening new file '{s}'", .{basename_z});
const file = player.File.open(it.dir, basename_z, basename_z) catch |err| {
std.log.warn("unable to open '{s}': {s}", .{ entry.name, @errorName(err) });
continue;
@@ -369,7 +384,7 @@ fn scanDir(db: *Db, gpa: Allocator, db_dir: Db.Path.Index, it: *std.fs.Dir.Itera
break :a @enumFromInt(gop.index);
};
 
_ = try db.addFile(gpa, .{
file_gop.key_ptr.* = .{
.directory = db_dir,
.basename = basename,
.title = metadata.title.unwrap() orelse basename,
@@ -378,7 +393,7 @@ fn scanDir(db: *Db, gpa: Allocator, db_dir: Db.Path.Index, it: *std.fs.Dir.Itera
.track_number = metadata.track_number,
.composer = metadata.composer,
.performer = metadata.performer,
});
};
},
else => {
std.log.info("ignoring {s} '{/}{s}'", .{
@@ -582,3 +597,101 @@ test parseTrackTuple {
try expectEqual(@as(i16, 10), parseTrackTuple("10").numerator);
try expectEqual(@as(i16, -1), parseTrackTuple("10").denominator);
}
 
pub const DbFileHeader = extern struct {
magic: Magic,
files_len: u32,
directories_len: u32,
albums_len: u32,
string_bytes_len: u32,
 
pub const Magic = enum(u32) {
v1 = 0x1f3686b5,
_,
};
};
 
fn saveDb(db: *const Db, install_directory: Directory, db_path: []const u8) !void {
const files = std.mem.sliceAsBytes(db.files.keys());
const directories = std.mem.sliceAsBytes(db.directories.keys());
const albums = std.mem.sliceAsBytes(db.albums.keys());
const string_bytes = db.string_bytes.items;
const header: DbFileHeader = .{
.magic = .v1,
.files_len = @intCast(db.files.entries.len),
.directories_len = @intCast(db.directories.entries.len),
.albums_len = @intCast(db.albums.entries.len),
.string_bytes_len = @intCast(db.string_bytes.items.len),
};
 
var iovecs = [_]std.posix.iovec_const{
.{ .iov_base = std.mem.asBytes(&header), .iov_len = @sizeOf(DbFileHeader) },
.{ .iov_base = files.ptr, .iov_len = files.len },
.{ .iov_base = directories.ptr, .iov_len = directories.len },
.{ .iov_base = albums.ptr, .iov_len = albums.len },
.{ .iov_base = string_bytes.ptr, .iov_len = string_bytes.len },
};
 
var atomic_file = try install_directory.handle.atomicFile(db_path, .{});
defer atomic_file.deinit();
try atomic_file.file.writevAll(&iovecs);
try atomic_file.finish();
}
 
fn loadDb(gpa: Allocator, db: *Db, install_directory: Directory, db_path: []const u8) !void {
var file = install_directory.handle.openFile(db_path, .{}) catch |err| switch (err) {
error.FileNotFound => return initEmptyDb(gpa, db),
else => |e| fatal("unable to load database '{}{s}: {s}'", .{ install_directory, db_path, @errorName(e) }),
};
defer file.close();
 
const header = file.reader().readStruct(DbFileHeader) catch |err| switch (err) {
error.EndOfStream => return initEmptyDb(gpa, db),
else => |e| return e,
};
 
if (header.magic != .v1)
fatal("invalid database header magic={x}", .{@intFromEnum(header.magic)});
 
try db.files.entries.resize(gpa, header.files_len);
try db.directories.entries.resize(gpa, header.directories_len);
try db.albums.entries.resize(gpa, header.albums_len);
try db.string_bytes.resize(gpa, header.string_bytes_len);
 
const files = std.mem.sliceAsBytes(db.files.keys());
const directories = std.mem.sliceAsBytes(db.directories.keys());
const albums = std.mem.sliceAsBytes(db.albums.keys());
const string_bytes = db.string_bytes.items;
 
var iovecs = [_]std.posix.iovec{
.{ .iov_base = files.ptr, .iov_len = files.len },
.{ .iov_base = directories.ptr, .iov_len = directories.len },
.{ .iov_base = albums.ptr, .iov_len = albums.len },
.{ .iov_base = string_bytes.ptr, .iov_len = string_bytes.len },
};
const amt_read = try file.readvAll(&iovecs);
const amt_expected = files.len + directories.len + albums.len + string_bytes.len;
if (amt_read != amt_expected) return error.UnexpectedFileSize;
 
try db.files.reIndexContext(gpa, .{});
try db.directories.reIndex(gpa);
try db.albums.reIndex(gpa);
 
{
// Build the string table map.
if (string_bytes.len < 2 or string_bytes[0] != 0 or string_bytes[string_bytes.len - 1] != 0)
return error.InvalidStringTable;
var i: usize = 0;
while (i < string_bytes.len) {
try db.string_table.putContext(gpa, @intCast(i), {}, .{
.bytes = &db.string_bytes,
});
i = std.mem.indexOfScalarPos(u8, string_bytes, i, 0).? + 1;
}
}
}
 
fn initEmptyDb(gpa: Allocator, db: *Db) !void {
// Reserve string index 0 for an empty string.
assert((try db.getOrPutString(gpa, "")) == .empty);
}
 
shared/Db.zig added: 164, removed: 34, total 130
@@ -1,9 +1,10 @@
const std = @import("std");
const Db = @This();
const Allocator = std.mem.Allocator;
const Hash = std.hash.Wyhash;
 
files: std.ArrayListUnmanaged(File),
directories: std.ArrayListUnmanaged(Path),
files: std.ArrayHashMapUnmanaged(File, void, File.Hasher, false),
directories: std.AutoArrayHashMapUnmanaged(Path, void),
albums: std.AutoArrayHashMapUnmanaged(Album, void),
string_bytes: std.ArrayListUnmanaged(u8),
 
@@ -151,6 +152,19 @@ pub const File = extern struct {
pub const Index = enum(u16) {
_,
};
 
pub const Hasher = struct {
pub fn hash(h: Hasher, a: File) u32 {
_ = h;
return @truncate(Hash.hash(@intFromEnum(a.basename), std.mem.asBytes(&a.directory)));
}
 
pub fn eql(h: Hasher, a: File, b: File, b_index: usize) bool {
_ = h;
_ = b_index;
return a.basename == b.basename and a.directory == b.directory;
}
};
};
 
pub fn deinit(db: *Db, gpa: Allocator) void {
@@ -208,17 +222,12 @@ pub fn optStringToSlice(db: *const Db, optional_string: OptionalString) ?[:0]con
}
 
pub fn addDirectory(db: *Db, gpa: Allocator, path: Path) Allocator.Error!Path.Index {
try db.directories.append(gpa, path);
return @enumFromInt(db.directories.items.len - 1);
const gop = try db.directories.getOrPut(gpa, path);
return @enumFromInt(gop.index);
}
 
pub fn directory(db: *const Db, i: Path.Index) Path {
return db.directories.items[@intFromEnum(i)];
}
 
pub fn addFile(db: *Db, gpa: Allocator, new_file: File) Allocator.Error!File.Index {
try db.files.append(gpa, new_file);
return @enumFromInt(db.directories.items.len - 1);
return db.directories.keys()[@intFromEnum(i)];
}
 
pub fn fmtPath(db: *const Db, p: Path) std.fmt.Formatter(Path.format) {