srctree

Andrew Kelley parent 794dc694 d4911794 b798aaf4
Merge pull request #17687

frontend: rework @embedFile for incremental compilation

inlinesplit
src/Compilation.zig added: 149, removed: 187, total 0
@@ -267,10 +267,6 @@ const Job = union(enum) {
/// It may have already be analyzed, or it may have been determined
/// to be outdated; in this case perform semantic analysis again.
analyze_decl: Module.Decl.Index,
/// The file that was loaded with `@embedFile` has changed on disk
/// and has been re-loaded into memory. All Decls that depend on it
/// need to be re-analyzed.
update_embed_file: *Module.EmbedFile,
/// The source file containing the Decl has been updated, and so the
/// Decl may need its line number information updated in the debug info.
update_line_number: Module.Decl.Index,
@@ -3374,9 +3370,6 @@ pub fn performAllTheWork(
var win32_resource_prog_node = main_progress_node.start("Compile Win32 Resources", comp.rc_source_files.len);
defer win32_resource_prog_node.end();
 
var embed_file_prog_node = main_progress_node.start("Detect @embedFile updates", comp.embed_file_work_queue.count);
defer embed_file_prog_node.end();
 
comp.work_queue_wait_group.reset();
defer comp.work_queue_wait_group.wait();
 
@@ -3412,7 +3405,7 @@ pub fn performAllTheWork(
while (comp.embed_file_work_queue.readItem()) |embed_file| {
comp.astgen_wait_group.start();
try comp.thread_pool.spawn(workerCheckEmbedFile, .{
comp, embed_file, &embed_file_prog_node, &comp.astgen_wait_group,
comp, embed_file, &comp.astgen_wait_group,
});
}
 
@@ -3602,16 +3595,6 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
try module.ensureFuncBodyAnalysisQueued(decl.val.toIntern());
}
},
.update_embed_file => |embed_file| {
const named_frame = tracy.namedFrame("update_embed_file");
defer named_frame.end();
 
const module = comp.bin_file.options.module.?;
module.updateEmbedFile(embed_file) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => return,
};
},
.update_line_number => |decl_index| {
const named_frame = tracy.namedFrame("update_line_number");
defer named_frame.end();
@@ -3921,17 +3904,11 @@ fn workerUpdateBuiltinZigFile(
fn workerCheckEmbedFile(
comp: *Compilation,
embed_file: *Module.EmbedFile,
prog_node: *std.Progress.Node,
wg: *WaitGroup,
) void {
defer wg.finish();
 
var child_prog_node = prog_node.start(embed_file.sub_file_path, 0);
child_prog_node.activate();
defer child_prog_node.end();
 
const mod = comp.bin_file.options.module.?;
mod.detectEmbedFileUpdate(embed_file) catch |err| {
comp.detectEmbedFileUpdate(embed_file) catch |err| {
comp.reportRetryableEmbedFileError(embed_file, err) catch |oom| switch (oom) {
// Swallowing this error is OK because it's implied to be OOM when
// there is a missing `failed_embed_files` error message.
@@ -3941,6 +3918,25 @@ fn workerCheckEmbedFile(
};
}
 
fn detectEmbedFileUpdate(comp: *Compilation, embed_file: *Module.EmbedFile) !void {
const mod = comp.bin_file.options.module.?;
const ip = &mod.intern_pool;
const sub_file_path = ip.stringToSlice(embed_file.sub_file_path);
var file = try embed_file.owner.root.openFile(sub_file_path, .{});
defer file.close();
 
const stat = try file.stat();
 
const unchanged_metadata =
stat.size == embed_file.stat.size and
stat.mtime == embed_file.stat.mtime and
stat.inode == embed_file.stat.inode;
 
if (unchanged_metadata) return;
 
@panic("TODO: handle embed file incremental update");
}
 
pub fn obtainCObjectCacheManifest(comp: *const Compilation) Cache.Manifest {
var man = comp.cache_parent.obtain();
 
@@ -4298,11 +4294,12 @@ fn reportRetryableEmbedFileError(
) error{OutOfMemory}!void {
const mod = comp.bin_file.options.module.?;
const gpa = mod.gpa;
 
const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(mod);
 
const src_loc = embed_file.src_loc;
const ip = &mod.intern_pool;
const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
embed_file.mod.root, embed_file.sub_file_path, @errorName(err),
embed_file.owner.root,
ip.stringToSlice(embed_file.sub_file_path),
@errorName(err),
});
 
errdefer err_msg.destroy(gpa);
 
src/InternPool.zig added: 149, removed: 187, total 0
@@ -5059,13 +5059,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(child == .u8_type);
if (bytes.len != len) {
assert(bytes.len == len_including_sentinel);
assert(bytes[@as(usize, @intCast(len))] == ip.indexToKey(sentinel).int.storage.u64);
assert(bytes[@intCast(len)] == ip.indexToKey(sentinel).int.storage.u64);
}
},
.elems => |elems| {
if (elems.len != len) {
assert(elems.len == len_including_sentinel);
assert(elems[@as(usize, @intCast(len))] == sentinel);
assert(elems[@intCast(len)] == sentinel);
}
},
.repeated_elem => |elem| {
@@ -5168,7 +5168,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
 
if (child == .u8_type) bytes: {
const string_bytes_index = ip.string_bytes.items.len;
try ip.string_bytes.ensureUnusedCapacity(gpa, @as(usize, @intCast(len_including_sentinel + 1)));
try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(len_including_sentinel + 1));
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len);
switch (aggregate.storage) {
.bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes[0..@intCast(len)]),
@@ -5178,15 +5178,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
break :bytes;
},
.int => |int| ip.string_bytes.appendAssumeCapacity(
@as(u8, @intCast(int.storage.u64)),
@intCast(int.storage.u64),
),
else => unreachable,
},
.repeated_elem => |elem| switch (ip.indexToKey(elem)) {
.undef => break :bytes,
.int => |int| @memset(
ip.string_bytes.addManyAsSliceAssumeCapacity(@as(usize, @intCast(len))),
@as(u8, @intCast(int.storage.u64)),
ip.string_bytes.addManyAsSliceAssumeCapacity(@intCast(len)),
@intCast(int.storage.u64),
),
else => unreachable,
},
@@ -5194,12 +5194,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
const has_internal_null =
std.mem.indexOfScalar(u8, ip.string_bytes.items[string_bytes_index..], 0) != null;
if (sentinel != .none) ip.string_bytes.appendAssumeCapacity(
@as(u8, @intCast(ip.indexToKey(sentinel).int.storage.u64)),
@intCast(ip.indexToKey(sentinel).int.storage.u64),
);
const string = if (has_internal_null)
@as(String, @enumFromInt(string_bytes_index))
const string: String = if (has_internal_null)
@enumFromInt(string_bytes_index)
else
(try ip.getOrPutTrailingString(gpa, @as(usize, @intCast(len_including_sentinel)))).toString();
(try ip.getOrPutTrailingString(gpa, @intCast(len_including_sentinel))).toString();
ip.items.appendAssumeCapacity(.{
.tag = .bytes,
.data = ip.addExtraAssumeCapacity(Bytes{
@@ -7557,7 +7557,7 @@ pub fn getOrPutStringFmt(
args: anytype,
) Allocator.Error!NullTerminatedString {
// ensure that references to string_bytes in args do not get invalidated
const len = @as(usize, @intCast(std.fmt.count(format, args) + 1));
const len: usize = @intCast(std.fmt.count(format, args) + 1);
try ip.string_bytes.ensureUnusedCapacity(gpa, len);
ip.string_bytes.writer(undefined).print(format, args) catch unreachable;
ip.string_bytes.appendAssumeCapacity(0);
@@ -7581,7 +7581,7 @@ pub fn getOrPutTrailingString(
len: usize,
) Allocator.Error!NullTerminatedString {
const string_bytes = &ip.string_bytes;
const str_index = @as(u32, @intCast(string_bytes.items.len - len));
const str_index: u32 = @intCast(string_bytes.items.len - len);
if (len > 0 and string_bytes.getLast() == 0) {
_ = string_bytes.pop();
} else {
@@ -7603,6 +7603,33 @@ pub fn getOrPutTrailingString(
}
}
 
/// Uses the last len bytes of ip.string_bytes as the key.
pub fn getTrailingAggregate(
ip: *InternPool,
gpa: Allocator,
ty: Index,
len: usize,
) Allocator.Error!Index {
try ip.items.ensureUnusedCapacity(gpa, 1);
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len);
const str: String = @enumFromInt(@intFromEnum(try getOrPutTrailingString(ip, gpa, len)));
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .aggregate = .{
.ty = ty,
.storage = .{ .bytes = ip.string_bytes.items[@intFromEnum(str)..] },
} }, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
 
ip.items.appendAssumeCapacity(.{
.tag = .bytes,
.data = ip.addExtraAssumeCapacity(Bytes{
.ty = ty,
.bytes = str,
}),
});
return @enumFromInt(ip.items.len - 1);
}
 
pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString {
if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{
.bytes = &ip.string_bytes,
 
src/Module.zig added: 149, removed: 187, total 0
@@ -1214,26 +1214,14 @@ pub const File = struct {
}
};
 
/// Represents the contents of a file loaded with `@embedFile`.
pub const EmbedFile = struct {
/// Relative to the owning package's root_src_dir.
/// Memory is stored in gpa, owned by EmbedFile.
sub_file_path: []const u8,
bytes: [:0]const u8,
/// Relative to the owning module's root directory.
sub_file_path: InternPool.NullTerminatedString,
/// Module that this file is a part of, managed externally.
owner: *Package.Module,
stat: Cache.File.Stat,
/// Package that this file is a part of, managed externally.
mod: *Package.Module,
/// The Decl that was created from the `@embedFile` to own this resource.
/// This is how zig knows what other Decl objects to invalidate if the file
/// changes on disk.
owner_decl: Decl.Index,
 
fn destroy(embed_file: *EmbedFile, mod: *Module) void {
const gpa = mod.gpa;
gpa.free(embed_file.sub_file_path);
gpa.free(embed_file.bytes);
gpa.destroy(embed_file);
}
val: InternPool.Index,
src_loc: SrcLoc,
};
 
/// This struct holds data necessary to construct API-facing `AllErrors.Message`.
@@ -2532,7 +2520,8 @@ pub fn deinit(mod: *Module) void {
var it = mod.embed_table.iterator();
while (it.next()) |entry| {
gpa.free(entry.key_ptr.*);
entry.value_ptr.*.destroy(mod);
const ef: *EmbedFile = entry.value_ptr.*;
gpa.destroy(ef);
}
mod.embed_table.deinit(gpa);
}
@@ -3543,35 +3532,6 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index)
func.analysis(ip).state = .queued;
}
 
pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void {
const tracy = trace(@src());
defer tracy.end();
 
// TODO we can potentially relax this if we store some more information along
// with decl dependency edges
const owner_decl = mod.declPtr(embed_file.owner_decl);
for (owner_decl.dependants.keys()) |dep_index| {
const dep = mod.declPtr(dep_index);
switch (dep.analysis) {
.unreferenced => unreachable,
.in_progress => continue, // already doing analysis, ok
.outdated => continue, // already queued for update
 
.file_failure,
.dependency_failure,
.sema_failure,
.sema_failure_retryable,
.liveness_failure,
.codegen_failure,
.codegen_failure_retryable,
.complete,
=> if (dep.generation != mod.generation) {
try mod.markOutdatedDecl(dep_index);
},
}
}
}
 
/// https://github.com/ziglang/zig/issues/14307
pub fn semaPkg(mod: *Module, pkg: *Package.Module) !void {
const file = (try mod.importPkg(pkg)).file;
@@ -4153,7 +4113,12 @@ pub fn importFile(
};
}
 
pub fn embedFile(mod: *Module, cur_file: *File, import_string: []const u8) !*EmbedFile {
pub fn embedFile(
mod: *Module,
cur_file: *File,
import_string: []const u8,
src_loc: SrcLoc,
) !InternPool.Index {
const gpa = mod.gpa;
 
if (cur_file.mod.deps.get(import_string)) |pkg| {
@@ -4166,13 +4131,17 @@ pub fn embedFile(mod: *Module, cur_file: *File, import_string: []const u8) !*Emb
defer if (!keep_resolved_path) gpa.free(resolved_path);
 
const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
errdefer assert(mod.embed_table.remove(resolved_path));
if (gop.found_existing) return gop.value_ptr.*;
errdefer {
assert(mod.embed_table.remove(resolved_path));
keep_resolved_path = false;
}
if (gop.found_existing) return gop.value_ptr.*.val;
keep_resolved_path = true;
 
const sub_file_path = try gpa.dupe(u8, pkg.root_src_path);
errdefer gpa.free(sub_file_path);
 
return newEmbedFile(mod, pkg, sub_file_path, resolved_path, &keep_resolved_path, gop);
return newEmbedFile(mod, pkg, sub_file_path, resolved_path, gop, src_loc);
}
 
// The resolved path is used as the key in the table, to detect if a file
@@ -4189,8 +4158,12 @@ pub fn embedFile(mod: *Module, cur_file: *File, import_string: []const u8) !*Emb
defer if (!keep_resolved_path) gpa.free(resolved_path);
 
const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
errdefer assert(mod.embed_table.remove(resolved_path));
if (gop.found_existing) return gop.value_ptr.*;
errdefer {
assert(mod.embed_table.remove(resolved_path));
keep_resolved_path = false;
}
if (gop.found_existing) return gop.value_ptr.*.val;
keep_resolved_path = true;
 
const resolved_root_path = try std.fs.path.resolve(gpa, &.{
cur_file.mod.root.root_dir.path orelse ".",
@@ -4213,7 +4186,7 @@ pub fn embedFile(mod: *Module, cur_file: *File, import_string: []const u8) !*Emb
};
errdefer gpa.free(sub_file_path);
 
return newEmbedFile(mod, cur_file.mod, sub_file_path, resolved_path, &keep_resolved_path, gop);
return newEmbedFile(mod, cur_file.mod, sub_file_path, resolved_path, gop, src_loc);
}
 
/// https://github.com/ziglang/zig/issues/14307
@@ -4222,9 +4195,9 @@ fn newEmbedFile(
pkg: *Package.Module,
sub_file_path: []const u8,
resolved_path: []const u8,
keep_resolved_path: *bool,
gop: std.StringHashMapUnmanaged(*EmbedFile).GetOrPutResult,
) !*EmbedFile {
src_loc: SrcLoc,
) !InternPool.Index {
const gpa = mod.gpa;
 
const new_file = try gpa.create(EmbedFile);
@@ -4239,57 +4212,54 @@ fn newEmbedFile(
.inode = actual_stat.inode,
.mtime = actual_stat.mtime,
};
const size_usize = std.math.cast(usize, actual_stat.size) orelse return error.Overflow;
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), size_usize, 1, 0);
errdefer gpa.free(bytes);
const size = std.math.cast(usize, actual_stat.size) orelse return error.Overflow;
const ip = &mod.intern_pool;
 
const ptr = try ip.string_bytes.addManyAsSlice(gpa, size);
const actual_read = try file.readAll(ptr);
if (actual_read != size) return error.UnexpectedEndOfFile;
 
if (mod.comp.whole_cache_manifest) |whole_cache_manifest| {
const copied_resolved_path = try gpa.dupe(u8, resolved_path);
errdefer gpa.free(copied_resolved_path);
mod.comp.whole_cache_manifest_mutex.lock();
defer mod.comp.whole_cache_manifest_mutex.unlock();
try whole_cache_manifest.addFilePostContents(copied_resolved_path, bytes, stat);
try whole_cache_manifest.addFilePostContents(copied_resolved_path, ptr, stat);
}
 
keep_resolved_path.* = true; // It's now owned by embed_table.
const array_ty = try ip.get(gpa, .{ .array_type = .{
.len = size,
.sentinel = .zero_u8,
.child = .u8_type,
} });
const array_val = try ip.getTrailingAggregate(gpa, array_ty, size);
 
const ptr_ty = (try mod.ptrType(.{
.child = array_ty,
.flags = .{
.alignment = .none,
.is_const = true,
.address_space = .generic,
},
})).toIntern();
 
const ptr_val = try ip.get(gpa, .{ .ptr = .{
.ty = ptr_ty,
.addr = .{ .anon_decl = .{
.val = array_val,
.orig_ty = ptr_ty,
} },
} });
 
gop.value_ptr.* = new_file;
new_file.* = .{
.sub_file_path = sub_file_path,
.bytes = bytes,
.sub_file_path = try ip.getOrPutString(gpa, sub_file_path),
.owner = pkg,
.stat = stat,
.mod = pkg,
.owner_decl = undefined, // Set by Sema immediately after this function returns.
.val = ptr_val,
.src_loc = src_loc,
};
return new_file;
}
 
pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void {
var file = try embed_file.mod.root.openFile(embed_file.sub_file_path, .{});
defer file.close();
 
const stat = try file.stat();
 
const unchanged_metadata =
stat.size == embed_file.stat.size and
stat.mtime == embed_file.stat.mtime and
stat.inode == embed_file.stat.inode;
 
if (unchanged_metadata) return;
 
const gpa = mod.gpa;
const size_usize = std.math.cast(usize, stat.size) orelse return error.Overflow;
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), size_usize, 1, 0);
gpa.free(embed_file.bytes);
embed_file.bytes = bytes;
embed_file.stat = .{
.size = stat.size,
.mtime = stat.mtime,
.inode = stat.inode,
};
 
mod.comp.mutex.lock();
defer mod.comp.mutex.unlock();
try mod.comp.work_queue.writeItem(.{ .update_embed_file = embed_file });
return ptr_val;
}
 
pub fn scanNamespace(
 
src/Sema.zig added: 149, removed: 187, total 0
@@ -3739,7 +3739,7 @@ fn resolveComptimeKnownAllocValue(sema: *Sema, block: *Block, alloc: Air.Inst.Re
// The simple strategy failed: we must create a mutable comptime alloc and
// perform all of the runtime store operations at comptime.
 
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(); // TODO: comptime value mutation without Decl
defer anon_decl.deinit();
const decl_index = try anon_decl.finish(elem_ty, try mod.undefValue(elem_ty), ptr_info.flags.alignment);
 
@@ -5454,7 +5454,7 @@ fn storeToInferredAllocComptime(
// The alloc will turn into a Decl.
if (try sema.resolveMaybeUndefValAllowVariables(operand)) |operand_val| store: {
if (operand_val.getVariable(sema.mod) != null) break :store;
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(); // TODO: comptime value mutation without Decl
defer anon_decl.deinit();
iac.decl_index = try anon_decl.finish(operand_ty, operand_val, iac.alignment);
try sema.comptime_mutable_decls.append(iac.decl_index);
@@ -6113,7 +6113,7 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
else => |e| return e,
};
const decl_index = if (operand.val.getFunction(sema.mod)) |function| function.owner_decl else blk: {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(); // TODO: export value without Decl
defer anon_decl.deinit();
break :blk try anon_decl.finish(operand.ty, operand.val, .none);
};
@@ -13155,7 +13155,8 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
return sema.fail(block, operand_src, "file path name cannot be empty", .{});
}
 
const embed_file = mod.embedFile(block.getFileScope(mod), name) catch |err| switch (err) {
const src_loc = operand_src.toSrcLoc(mod.declPtr(block.src_decl), mod);
const val = mod.embedFile(block.getFileScope(mod), name, src_loc) catch |err| switch (err) {
error.ImportOutsideModulePath => {
return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name});
},
@@ -13166,30 +13167,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
},
};
 
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
 
// TODO instead of using `.bytes`, create a new value tag for pointing at
// a `*Module.EmbedFile`. The purpose of this would be:
// - If only the length is read and the bytes are not inspected by comptime code,
// there can be an optimization where the codegen backend does a copy_file_range
// into the final binary, and never loads the data into memory.
// - When a Decl is destroyed, it can free the `*Module.EmbedFile`.
const ty = try mod.arrayType(.{
.len = embed_file.bytes.len,
.sentinel = .zero_u8,
.child = .u8_type,
});
embed_file.owner_decl = try anon_decl.finish(
ty,
(try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .bytes = embed_file.bytes },
} })).toValue(),
.none, // default alignment
);
 
return sema.analyzeDeclRef(embed_file.owner_decl);
return Air.internedToRef(val);
}
 
fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
 
src/main.zig added: 149, removed: 187, total 0
@@ -607,16 +607,6 @@ const usage_build_generic =
\\
;
 
const repl_help =
\\Commands:
\\ update Detect changes to source files and update output files.
\\ run Execute the output file, if it is an executable or test.
\\ update-and-run Perform an `update` followed by `run`.
\\ help Print this text
\\ exit Quit this repl
\\
;
 
const SOName = union(enum) {
no,
yes_default_value,
 
test/behavior.zig added: 149, removed: 187, total 0
@@ -55,7 +55,6 @@ test {
_ = @import("behavior/bugs/3384.zig");
_ = @import("behavior/bugs/3586.zig");
_ = @import("behavior/bugs/3742.zig");
_ = @import("behavior/bugs/3779.zig");
_ = @import("behavior/bugs/4328.zig");
_ = @import("behavior/bugs/4560.zig");
_ = @import("behavior/bugs/4769_a.zig");
@@ -209,6 +208,7 @@ test {
_ = @import("behavior/slice.zig");
_ = @import("behavior/slice_sentinel_comptime.zig");
_ = @import("behavior/src.zig");
_ = @import("behavior/string_literals.zig");
_ = @import("behavior/struct.zig");
_ = @import("behavior/struct_contains_null_ptr_itself.zig");
_ = @import("behavior/struct_contains_slice_of_itself.zig");
 
filename was Deleted added: 149, removed: 187, total 0
 
test/behavior/bugs/3779.zig added: 149, removed: 187, total 0
@@ -36,11 +36,11 @@ test "@typeName() returns a string literal" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
try std.testing.expect(*const [type_name.len:0]u8 == @TypeOf(type_name));
try std.testing.expect(std.mem.eql(u8, "behavior.bugs.3779.TestType", type_name));
try std.testing.expect(std.mem.eql(u8, "behavior.bugs.3779.TestType", ptr_type_name[0..type_name.len]));
try std.testing.expect(std.mem.eql(u8, "behavior.string_literals.TestType", type_name));
try std.testing.expect(std.mem.eql(u8, "behavior.string_literals.TestType", ptr_type_name[0..type_name.len]));
}
 
const actual_contents = @embedFile("3779_file_to_embed.txt");
const actual_contents = @embedFile("file_to_embed.txt");
const ptr_actual_contents: [*:0]const u8 = actual_contents;
const expected_contents = "hello zig\n";
 
@@ -64,7 +64,7 @@ test "@src() returns a struct containing 0-terminated string slices" {
 
const src = testFnForSrc();
try std.testing.expect([:0]const u8 == @TypeOf(src.file));
try std.testing.expect(std.mem.endsWith(u8, src.file, "3779.zig"));
try std.testing.expect(std.mem.endsWith(u8, src.file, "string_literals.zig"));
try std.testing.expect([:0]const u8 == @TypeOf(src.fn_name));
try std.testing.expect(std.mem.endsWith(u8, src.fn_name, "testFnForSrc"));