1 //! Zig Compilation Unit
3 //! Compilation of all Zig source code is represented by one `Zcu`.
5 //! Each `Compilation` has exactly one or zero `Zcu`, depending on whether
6 //! there is or is not any zig source code, respectively.
8 const std = @import("std");
9 const builtin = @import("builtin");
11 const Allocator = std.mem.Allocator;
12 const assert = std.debug.assert;
13 const log = std.log.scoped(.zcu);
14 const BigIntConst = std.math.big.int.Const;
15 const BigIntMutable = std.math.big.int.Mutable;
16 const Target = std.Target;
17 const Ast = std.zig.Ast;
20 const Compilation = @import("Compilation.zig");
21 const Cache = std.Build.Cache;
22 pub const Value = @import("Value.zig");
23 pub const Type = @import("Type.zig");
24 const Package = @import("Package.zig");
25 const link = @import("link.zig");
26 const Air = @import("Air.zig");
27 const Zir = std.zig.Zir;
28 const trace = @import("tracy.zig").trace;
29 const AstGen = std.zig.AstGen;
30 const Sema = @import("Sema.zig");
31 const target_util = @import("target.zig");
32 const build_options = @import("build_options");
33 const Liveness = @import("Liveness.zig");
34 const isUpDir = @import("introspect.zig").isUpDir;
35 const clang = @import("clang.zig");
36 const InternPool = @import("InternPool.zig");
37 const Alignment = InternPool.Alignment;
38 const AnalUnit = InternPool.AnalUnit;
39 const BuiltinFn = std.zig.BuiltinFn;
40 const LlvmObject = @import("codegen/llvm.zig").Object;
41 const dev = @import("dev.zig");
42 const Zoir = std.zig.Zoir;
43 const ZonGen = std.zig.ZonGen;
46 @setEvalBranchQuota(4000);
48 @typeInfo(Zir.Inst.Ref).@"enum".fields,
49 @typeInfo(Air.Inst.Ref).@"enum".fields,
50 @typeInfo(InternPool.Index).@"enum".fields,
51 ) |zir_field, air_field, ip_field| {
52 assert(mem.eql(u8, zir_field.name, ip_field.name));
53 assert(mem.eql(u8, air_field.name, ip_field.name));
57 /// General-purpose allocator. Used for both temporary and long-term storage.
60 /// Usually, the LlvmObject is managed by linker code, however, in the case
61 /// that -fno-emit-bin is specified, the linker code never executes, so we
62 /// store the LlvmObject here.
63 llvm_object: ?LlvmObject.Ptr,
65 /// Pointer to externally managed resource.
66 root_mod: *Package.Module,
67 /// Normally, `main_mod` and `root_mod` are the same. The exception is `zig test`, in which
68 /// `root_mod` is the test runner, and `main_mod` is the user's source file which has the tests.
69 main_mod: *Package.Module,
70 std_mod: *Package.Module,
71 sema_prog_node: std.Progress.Node = std.Progress.Node.none,
72 codegen_prog_node: std.Progress.Node = std.Progress.Node.none,
74 /// Used by AstGen worker to load and store ZIR cache.
75 global_zir_cache: Compilation.Directory,
76 /// Used by AstGen worker to load and store ZIR cache.
77 local_zir_cache: Compilation.Directory,
79 /// This is where all `Export` values are stored. Not all values here are necessarily valid exports;
80 /// to enumerate all exports, `single_exports` and `multi_exports` must be consulted.
81 all_exports: std.ArrayListUnmanaged(Export) = .empty,
82 /// This is a list of free indices in `all_exports`. These indices may be reused by exports from
83 /// future semantic analysis.
84 free_exports: std.ArrayListUnmanaged(Export.Index) = .empty,
85 /// Maps from an `AnalUnit` which performs a single export, to the index into `all_exports` of
86 /// the export it performs. Note that the key is not the `Decl` being exported, but the `AnalUnit`
87 /// whose analysis triggered the export.
88 single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, Export.Index) = .empty,
89 /// Like `single_exports`, but for `AnalUnit`s which perform multiple exports.
90 /// The exports are `all_exports.items[index..][0..len]`.
91 multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
96 /// The set of all the Zig source files in the Zig Compilation Unit. Tracked in
97 /// order to iterate over it and check which source files have been modified on
98 /// the file system when an update is requested, as well as to cache `@import`
101 /// Keys are fully resolved file paths. This table owns the keys and values.
103 /// Protected by Compilation's mutex.
105 /// Not serialized. This state is reconstructed during the first call to
106 /// `Compilation.update` of the process for a given `Compilation`.
108 /// Indexes correspond 1:1 to `files`.
109 import_table: std.StringArrayHashMapUnmanaged(File.Index) = .empty,
111 /// The set of all the files which have been loaded with `@embedFile` in the Module.
112 /// We keep track of this in order to iterate over it and check which files have been
113 /// modified on the file system when an update is requested, as well as to cache
114 /// `@embedFile` results.
115 /// Keys are fully resolved file paths. This table owns the keys and values.
116 embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .empty,
118 /// Stores all Type and Value objects.
119 /// The idea is that this will be periodically garbage-collected, but such logic
120 /// is not yet implemented.
121 intern_pool: InternPool = .empty,
123 analysis_in_progress: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
124 /// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator.
125 failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, *ErrorMsg) = .empty,
126 /// This `AnalUnit` failed semantic analysis because it required analysis of another `AnalUnit` which itself failed.
127 transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
128 /// This `Nav` succeeded analysis, but failed codegen.
129 /// This may be a simple "value" `Nav`, or it may be a function.
130 /// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator.
131 failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .empty,
132 failed_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, *ErrorMsg) = .empty,
133 /// Keep track of one `@compileLog` callsite per `AnalUnit`.
134 /// The value is the source location of the `@compileLog` call, convertible to a `LazySrcLoc`.
135 compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
136 base_node_inst: InternPool.TrackedInst.Index,
138 pub fn src(self: @This()) LazySrcLoc {
140 .base_node_inst = self.base_node_inst,
141 .offset = LazySrcLoc.Offset.nodeOffset(self.node_offset),
145 /// Using a map here for consistency with the other fields here.
146 /// The ErrorMsg memory is owned by the `File`, using Module's general purpose allocator.
147 failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .empty,
148 failed_exports: std.AutoArrayHashMapUnmanaged(Export.Index, *ErrorMsg) = .empty,
149 /// If analysis failed due to a cimport error, the corresponding Clang errors
151 cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .empty,
153 /// Maximum amount of distinct error values, set by --error-limit
154 error_limit: ErrorInt,
156 /// Value is the number of PO dependencies of this AnalUnit.
157 /// This value will decrease as we perform semantic analysis to learn what is outdated.
158 /// If any of these PO deps is outdated, this value will be moved to `outdated`.
159 potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
160 /// Value is the number of PO dependencies of this AnalUnit.
161 /// Once this value drops to 0, the AnalUnit is a candidate for re-analysis.
162 outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
163 /// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0.
164 /// Such `AnalUnit`s are ready for immediate re-analysis.
165 /// See `findOutdatedToAnalyze` for details.
166 outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
167 /// This contains a list of AnalUnit whose analysis or codegen failed, but the
168 /// failure was something like running out of disk space, and trying again may
169 /// succeed. On the next update, we will flush this list, marking all members of
171 retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .empty,
173 func_body_analysis_queued: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty,
174 nav_val_analysis_queued: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty,
176 /// These are the modules which we initially queue for analysis in `Compilation.update`.
177 /// `resolveReferences` will use these as the root of its reachability traversal.
178 analysis_roots: std.BoundedArray(*Package.Module, 3) = .{},
179 /// This is the cached result of `Zcu.resolveReferences`. It is computed on-demand, and
180 /// reset to `null` when any semantic analysis occurs (since this invalidates the data).
181 /// Allocated into `gpa`.
182 resolved_references: ?std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = null,
184 skip_analysis_errors: bool = false,
186 stage1_flags: packed struct {
187 have_winmain: bool = false,
188 have_wwinmain: bool = false,
189 have_winmain_crt_startup: bool = false,
190 have_wwinmain_crt_startup: bool = false,
191 have_dllmain_crt_startup: bool = false,
192 have_c_main: bool = false,
196 compile_log_text: std.ArrayListUnmanaged(u8) = .empty,
198 test_functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty,
200 global_assembly: std.AutoArrayHashMapUnmanaged(AnalUnit, []u8) = .empty,
202 /// Key is the `AnalUnit` *performing* the reference. This representation allows
203 /// incremental updates to quickly delete references caused by a specific `AnalUnit`.
204 /// Value is index into `all_references` of the first reference triggered by the unit.
205 /// The `next` field on the `Reference` forms a linked list of all references
206 /// triggered by the key `AnalUnit`.
207 reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
208 all_references: std.ArrayListUnmanaged(Reference) = .empty,
209 /// Freelist of indices in `all_references`.
210 free_references: std.ArrayListUnmanaged(u32) = .empty,
212 /// Key is the `AnalUnit` *performing* the reference. This representation allows
213 /// incremental updates to quickly delete references caused by a specific `AnalUnit`.
214 /// Value is index into `all_type_reference` of the first reference triggered by the unit.
215 /// The `next` field on the `TypeReference` forms a linked list of all type references
216 /// triggered by the key `AnalUnit`.
217 type_reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
218 all_type_references: std.ArrayListUnmanaged(TypeReference) = .empty,
219 /// Freelist of indices in `all_type_references`.
220 free_type_references: std.ArrayListUnmanaged(u32) = .empty,
222 /// Populated by analysis of `AnalUnit.wrap(.{ .memoized_state = s })`, where `s` depends on the element.
223 builtin_decl_values: BuiltinDecl.Memoized = .initFill(.none),
227 pub const PerThread = @import("Zcu/PerThread.zig");
229 /// Names of declarations in `std.builtin` whose values are memoized in a `BuiltinDecl.Memoized`.
230 /// The name must exactly match the declaration name, as comptime logic is used to compute the namespace accesses.
231 /// Parent namespaces must be before their children in this enum. For instance, `.Type` must be before `.@"Type.Fn"`.
232 /// Additionally, parent namespaces must be resolved in the same stage as their children; see `BuiltinDecl.stage`.
233 pub const BuiltinDecl = enum {
256 @"Type.Pointer.Size",
268 @"Type.ContainerLayout",
274 @"panic.sentinelMismatch",
275 @"panic.unwrapError",
276 @"panic.outOfBounds",
277 @"panic.startGreaterThanEnd",
278 @"panic.inactiveUnionField",
279 @"panic.reachedUnreachable",
282 @"panic.incorrectAlignment",
283 @"panic.invalidErrorCode",
284 @"panic.castTruncatedData",
285 @"panic.negativeToUnsigned",
286 @"panic.integerOverflow",
287 @"panic.shlOverflow",
288 @"panic.shrOverflow",
289 @"panic.divideByZero",
290 @"panic.exactDivisionRemainder",
291 @"panic.integerPartOutOfBounds",
292 @"panic.corruptSwitch",
293 @"panic.shiftRhsTooBig",
294 @"panic.invalidEnumValue",
295 @"panic.forLenMismatch",
296 @"panic.memcpyLenMismatch",
297 @"panic.memcpyAlias",
298 @"panic.noreturnReturned",
302 /// Determines what kind of validation will be done to the decl's value.
303 pub fn kind(decl: BuiltinDecl) enum { type, func, string } {
304 return switch (decl) {
305 .returnError => .func,
330 .@"Type.Pointer.Size",
341 .@"Type.StructField",
342 .@"Type.ContainerLayout",
344 .@"Type.Declaration",
350 .@"panic.sentinelMismatch",
351 .@"panic.unwrapError",
352 .@"panic.outOfBounds",
353 .@"panic.startGreaterThanEnd",
354 .@"panic.inactiveUnionField",
355 .@"panic.reachedUnreachable",
356 .@"panic.unwrapNull",
357 .@"panic.castToNull",
358 .@"panic.incorrectAlignment",
359 .@"panic.invalidErrorCode",
360 .@"panic.castTruncatedData",
361 .@"panic.negativeToUnsigned",
362 .@"panic.integerOverflow",
363 .@"panic.shlOverflow",
364 .@"panic.shrOverflow",
365 .@"panic.divideByZero",
366 .@"panic.exactDivisionRemainder",
367 .@"panic.integerPartOutOfBounds",
368 .@"panic.corruptSwitch",
369 .@"panic.shiftRhsTooBig",
370 .@"panic.invalidEnumValue",
371 .@"panic.forLenMismatch",
372 .@"panic.memcpyLenMismatch",
373 .@"panic.memcpyAlias",
374 .@"panic.noreturnReturned",
379 /// Resolution of these values is done in three distinct stages:
380 /// * Resolution of `std.builtin.Panic` and everything under it
381 /// * Resolution of `VaList`
382 /// * Everything else
384 /// Panics are separated because they are provided by the user, so must be able to use
385 /// things like reification.
387 /// `VaList` is separate because its value depends on the target, so it needs some reflection
388 /// machinery to work; additionally, it is `@compileError` on some targets, so must be referenced
390 pub fn stage(decl: BuiltinDecl) InternPool.MemoizedStateStage {
391 if (decl == .VaList) return .va_list;
393 if (@intFromEnum(decl) <= @intFromEnum(BuiltinDecl.@"Type.Declaration")) {
400 /// Based on the tag name, determines how to access this decl; either as a direct child of the
401 /// `std.builtin` namespace, or as a child of some preceding `BuiltinDecl` value.
402 pub fn access(decl: BuiltinDecl) union(enum) {
404 nested: struct { BuiltinDecl, []const u8 },
406 @setEvalBranchQuota(2000);
407 return switch (decl) {
408 inline else => |tag| {
409 const name = @tagName(tag);
410 const split = (comptime std.mem.lastIndexOfScalar(u8, name, '.')) orelse return .{ .direct = name };
411 const parent = @field(BuiltinDecl, name[0..split]);
412 comptime assert(@intFromEnum(parent) < @intFromEnum(tag)); // dependencies ordered correctly
413 return .{ .nested = .{ parent, name[split + 1 ..] } };
418 const Memoized = std.enums.EnumArray(BuiltinDecl, InternPool.Index);
421 pub const SimplePanicId = enum {
428 negative_to_unsigned,
433 exact_division_remainder,
434 integer_part_out_of_bounds,
443 pub fn toBuiltin(id: SimplePanicId) BuiltinDecl {
446 .reached_unreachable => .@"panic.reachedUnreachable",
447 .unwrap_null => .@"panic.unwrapNull",
448 .cast_to_null => .@"panic.castToNull",
449 .incorrect_alignment => .@"panic.incorrectAlignment",
450 .invalid_error_code => .@"panic.invalidErrorCode",
451 .cast_truncated_data => .@"panic.castTruncatedData",
452 .negative_to_unsigned => .@"panic.negativeToUnsigned",
453 .integer_overflow => .@"panic.integerOverflow",
454 .shl_overflow => .@"panic.shlOverflow",
455 .shr_overflow => .@"panic.shrOverflow",
456 .divide_by_zero => .@"panic.divideByZero",
457 .exact_division_remainder => .@"panic.exactDivisionRemainder",
458 .integer_part_out_of_bounds => .@"panic.integerPartOutOfBounds",
459 .corrupt_switch => .@"panic.corruptSwitch",
460 .shift_rhs_too_big => .@"panic.shiftRhsTooBig",
461 .invalid_enum_value => .@"panic.invalidEnumValue",
462 .for_len_mismatch => .@"panic.forLenMismatch",
463 .memcpy_len_mismatch => .@"panic.memcpyLenMismatch",
464 .memcpy_alias => .@"panic.memcpyAlias",
465 .noreturn_returned => .@"panic.noreturnReturned",
471 pub const GlobalErrorSet = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
473 pub const CImportError = struct {
478 source_line: ?[*:0]u8,
481 pub fn deinit(err: CImportError, gpa: Allocator) void {
482 if (err.path) |some| gpa.free(std.mem.span(some));
483 if (err.source_line) |some| gpa.free(std.mem.span(some));
484 gpa.free(std.mem.span(err.msg));
488 pub const ErrorInt = u32;
490 pub const Exported = union(enum) {
491 /// The Nav being exported. Note this is *not* the Nav corresponding to the AnalUnit performing the export.
492 nav: InternPool.Nav.Index,
493 /// Constant value being exported.
494 uav: InternPool.Index,
496 pub fn getValue(exported: Exported, zcu: *Zcu) Value {
497 return switch (exported) {
498 .nav => |nav| zcu.navValue(nav),
499 .uav => |uav| Value.fromInterned(uav),
503 pub fn getAlign(exported: Exported, zcu: *Zcu) Alignment {
504 return switch (exported) {
505 .nav => |nav| switch (zcu.intern_pool.getNav(nav).status) {
506 .unresolved => unreachable,
507 .type_resolved => |r| r.alignment,
508 .fully_resolved => |r| r.alignment,
515 pub const Export = struct {
522 /// Indicates that the failure was due to a temporary issue, such as an I/O error
523 /// when writing to the output file. Retrying the export may succeed.
528 pub const Options = struct {
529 name: InternPool.NullTerminatedString,
530 linkage: std.builtin.GlobalLinkage = .strong,
531 section: InternPool.OptionalNullTerminatedString = .none,
532 visibility: std.builtin.SymbolVisibility = .default,
535 /// Index into `all_exports`.
536 pub const Index = enum(u32) {
539 pub fn ptr(i: Index, zcu: *const Zcu) *Export {
540 return &zcu.all_exports.items[@intFromEnum(i)];
545 pub const Reference = struct {
546 /// The `AnalUnit` whose semantic analysis was triggered by this reference.
547 referenced: AnalUnit,
548 /// Index into `all_references` of the next `Reference` triggered by the same `AnalUnit`.
549 /// `std.math.maxInt(u32)` is the sentinel.
551 /// The source location of the reference.
555 pub const TypeReference = struct {
556 /// The container type which was referenced.
557 referenced: InternPool.Index,
558 /// Index into `all_type_references` of the next `TypeReference` triggered by the same `AnalUnit`.
559 /// `std.math.maxInt(u32)` is the sentinel.
561 /// The source location of the reference.
565 /// The container that structs, enums, unions, and opaques have.
566 pub const Namespace = struct {
567 parent: OptionalIndex,
568 file_scope: File.Index,
570 /// Will be a struct, enum, union, or opaque.
571 owner_type: InternPool.Index,
572 /// Members of the namespace which are marked `pub`.
573 pub_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .empty,
574 /// Members of the namespace which are *not* marked `pub`.
575 priv_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .empty,
576 /// All `usingnamespace` declarations in this namespace which are marked `pub`.
577 pub_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .empty,
578 /// All `usingnamespace` declarations in this namespace which are *not* marked `pub`.
579 priv_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .empty,
580 /// All `comptime` declarations in this namespace. We store these purely so that incremental
581 /// compilation can re-use the existing `ComptimeUnit`s when a namespace changes.
582 comptime_decls: std.ArrayListUnmanaged(InternPool.ComptimeUnit.Id) = .empty,
583 /// All `test` declarations in this namespace. We store these purely so that incremental
584 /// compilation can re-use the existing `Nav`s when a namespace changes.
585 test_decls: std.ArrayListUnmanaged(InternPool.Nav.Index) = .empty,
587 pub const Index = InternPool.NamespaceIndex;
588 pub const OptionalIndex = InternPool.OptionalNamespaceIndex;
590 const NavNameContext = struct {
593 pub fn hash(ctx: NavNameContext, nav: InternPool.Nav.Index) u32 {
594 const name = ctx.zcu.intern_pool.getNav(nav).name;
595 return std.hash.uint32(@intFromEnum(name));
598 pub fn eql(ctx: NavNameContext, a_nav: InternPool.Nav.Index, b_nav: InternPool.Nav.Index, b_index: usize) bool {
600 const a_name = ctx.zcu.intern_pool.getNav(a_nav).name;
601 const b_name = ctx.zcu.intern_pool.getNav(b_nav).name;
602 return a_name == b_name;
606 pub const NameAdapter = struct {
609 pub fn hash(ctx: NameAdapter, s: InternPool.NullTerminatedString) u32 {
611 return std.hash.uint32(@intFromEnum(s));
614 pub fn eql(ctx: NameAdapter, a: InternPool.NullTerminatedString, b_nav: InternPool.Nav.Index, b_index: usize) bool {
616 return a == ctx.zcu.intern_pool.getNav(b_nav).name;
620 pub fn fileScope(ns: Namespace, zcu: *Zcu) *File {
621 return zcu.fileByIndex(ns.file_scope);
624 pub fn fileScopeIp(ns: Namespace, ip: *InternPool) *File {
625 return ip.filePtr(ns.file_scope);
628 /// This renders e.g. "std/fs.zig:Dir.OpenOptions"
629 pub fn renderFullyQualifiedDebugName(
632 name: InternPool.NullTerminatedString,
634 ) @TypeOf(writer).Error!void {
635 const sep: u8 = if (ns.parent.unwrap()) |parent| sep: {
636 try zcu.namespacePtr(parent).renderFullyQualifiedDebugName(
638 zcu.declPtr(ns.decl_index).name,
643 try ns.fileScope(zcu).renderFullyQualifiedDebugName(writer);
646 if (name != .empty) try writer.print("{c}{}", .{ sep, name.fmt(&zcu.intern_pool) });
649 pub fn internFullyQualifiedName(
653 tid: Zcu.PerThread.Id,
654 name: InternPool.NullTerminatedString,
655 ) !InternPool.NullTerminatedString {
656 const ns_name = Type.fromInterned(ns.owner_type).containerTypeName(ip);
657 if (name == .empty) return ns_name;
658 return ip.getOrPutStringFmt(gpa, tid, "{}.{}", .{ ns_name.fmt(ip), name.fmt(ip) }, .no_embedded_nulls);
662 pub const File = struct {
663 /// Relative to the owning package's root source directory.
664 /// Memory is stored in gpa, owned by File.
665 sub_file_path: []const u8,
668 /// We have not yet attempted to load this file.
669 /// `stat` is not populated and may be `undefined`.
671 /// A filesystem access failed. It should be retried on the next update.
672 /// There is a `failed_files` entry containing a non-`null` message.
673 /// `stat` is not populated and may be `undefined`.
675 /// Parsing/AstGen/ZonGen of this file has failed.
676 /// There is an error in `zir` or `zoir`.
677 /// There is a `failed_files` entry (with a `null` message).
678 /// `stat` is populated.
680 /// Parsing and AstGen/ZonGen of this file has succeeded.
681 /// `stat` is populated.
684 /// Whether this is populated depends on `status`.
685 stat: Cache.File.Stat,
687 source: ?[:0]const u8,
692 /// Module that this file is a part of, managed externally.
693 mod: *Package.Module,
694 /// Whether this file is a part of multiple packages. This is an error condition which will be reported after AstGen.
695 multi_pkg: bool = false,
696 /// List of references to this file, used for multi-package errors.
697 references: std.ArrayListUnmanaged(File.Reference) = .empty,
699 /// The ZIR for this file from the last update with no file failures. As such, this ZIR is never
700 /// failed (although it may have compile errors).
702 /// Because updates with file failures do not perform ZIR mapping or semantic analysis, we keep
703 /// this around so we have the "old" ZIR to map when an update is ready to do so. Once such an
704 /// update occurs, this field is unloaded, since it is no longer necessary.
706 /// In other words, if `TrackedInst`s are tied to ZIR other than what's in the `zir` field, this
707 /// field is populated with that old ZIR.
708 prev_zir: ?*Zir = null,
710 /// This field serves a similar purpose to `prev_zir`, but for ZOIR. However, since we do not
711 /// need to map old ZOIR to new ZOIR -- instead only invalidating dependencies if the ZOIR
712 /// changed -- this field is just a simple boolean.
714 /// When `zoir` is updated, this field is set to `true`. In `updateZirRefs`, if this is `true`,
715 /// we invalidate the corresponding `zon_file` dependency, and reset it to `false`.
716 zoir_invalidated: bool = false,
718 /// A single reference to a file.
719 pub const Reference = union(enum) {
720 /// The file is imported directly (i.e. not as a package) with @import.
723 token: Ast.TokenIndex,
725 /// The file is the root of a module.
726 root: *Package.Module,
729 pub fn getMode(self: File) Ast.Mode {
730 if (std.mem.endsWith(u8, self.sub_file_path, ".zon")) {
732 } else if (std.mem.endsWith(u8, self.sub_file_path, ".zig")) {
735 // `Module.importFile` rejects all other extensions
740 pub fn unload(file: *File, gpa: Allocator) void {
741 if (file.zoir) |zoir| zoir.deinit(gpa);
742 file.unloadTree(gpa);
743 file.unloadSource(gpa);
747 pub fn unloadTree(file: *File, gpa: Allocator) void {
748 if (file.tree) |*tree| {
754 pub fn unloadSource(file: *File, gpa: Allocator) void {
755 if (file.source) |source| {
761 pub fn unloadZir(file: *File, gpa: Allocator) void {
762 if (file.zir) |*zir| {
768 pub const Source = struct {
770 stat: Cache.File.Stat,
773 pub fn getSource(file: *File, gpa: Allocator) !Source {
774 if (file.source) |source| return .{
779 // Keep track of inode, file size, mtime, hash so we can detect which files
780 // have been modified when an incremental update is requested.
781 var f = try file.mod.root.openFile(file.sub_file_path, .{});
784 const stat = try f.stat();
786 if (stat.size > std.math.maxInt(u32))
787 return error.FileTooBig;
789 const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
790 errdefer gpa.free(source);
792 const amt = try f.readAll(source);
793 if (amt != stat.size)
794 return error.UnexpectedEndOfFile;
796 // Here we do not modify stat fields because this function is the one
797 // used for error reporting. We need to keep the stat fields stale so that
798 // updateFile can know to regenerate ZIR.
800 file.source = source;
801 errdefer comptime unreachable; // don't error after populating `source`
813 pub fn getTree(file: *File, gpa: Allocator) !*const Ast {
814 if (file.tree) |*tree| return tree;
816 const source = try file.getSource(gpa);
817 file.tree = try .parse(gpa, source.bytes, file.getMode());
821 pub fn getZoir(file: *File, zcu: *Zcu) !*const Zoir {
822 if (file.zoir) |*zoir| return zoir;
824 const tree = file.tree.?;
825 assert(tree.mode == .zon);
827 file.zoir = try ZonGen.generate(zcu.gpa, tree, .{});
828 if (file.zoir.?.hasCompileErrors()) {
829 try zcu.failed_files.putNoClobber(zcu.gpa, file, null);
830 return error.AnalysisFail;
835 pub fn fullyQualifiedNameLen(file: File) usize {
836 const ext = std.fs.path.extension(file.sub_file_path);
837 return file.sub_file_path.len - ext.len;
840 pub fn renderFullyQualifiedName(file: File, writer: anytype) !void {
841 // Convert all the slashes into dots and truncate the extension.
842 const ext = std.fs.path.extension(file.sub_file_path);
843 const noext = file.sub_file_path[0 .. file.sub_file_path.len - ext.len];
844 for (noext) |byte| switch (byte) {
845 '/', '\\' => try writer.writeByte('.'),
846 else => try writer.writeByte(byte),
850 pub fn renderFullyQualifiedDebugName(file: File, writer: anytype) !void {
851 for (file.sub_file_path) |byte| switch (byte) {
852 '/', '\\' => try writer.writeByte('/'),
853 else => try writer.writeByte(byte),
857 pub fn internFullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
858 const gpa = pt.zcu.gpa;
859 const ip = &pt.zcu.intern_pool;
860 const strings = ip.getLocal(pt.tid).getMutableStrings(gpa);
861 const slice = try strings.addManyAsSlice(file.fullyQualifiedNameLen());
862 var fbs = std.io.fixedBufferStream(slice[0]);
863 file.renderFullyQualifiedName(fbs.writer()) catch unreachable;
864 assert(fbs.pos == slice[0].len);
865 return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(slice[0].len), .no_embedded_nulls);
868 pub fn fullPath(file: File, ally: Allocator) ![]u8 {
869 return file.mod.root.joinString(ally, file.sub_file_path);
872 pub fn dumpSrc(file: *File, src: LazySrcLoc) void {
873 const loc = std.zig.findLineColumn(file.source.bytes, src);
874 std.debug.print("{s}:{d}:{d}\n", .{ file.sub_file_path, loc.line + 1, loc.column + 1 });
877 /// Add a reference to this file during AstGen.
878 pub fn addReference(file: *File, zcu: *Zcu, ref: File.Reference) !void {
879 // Don't add the same module root twice. Note that since we always add module roots at the
880 // front of the references array (see below), this loop is actually O(1) on valid code.
882 for (file.references.items) |other| {
884 .root => |r| if (ref.root == r) return,
885 else => break, // reached the end of the "is-root" references
891 // We put root references at the front of the list both to make the above loop fast and
892 // to make multi-module errors more helpful (since "root-of" notes are generally more
893 // informative than "imported-from" notes). This path is hit very rarely, so the speed
894 // of the insert operation doesn't matter too much.
895 .root => try file.references.insert(zcu.gpa, 0, ref),
897 // Other references we'll just put at the end.
898 else => try file.references.append(zcu.gpa, ref),
901 const mod = switch (ref) {
902 .import => |import| zcu.fileByIndex(import.file).mod,
905 if (mod != file.mod) file.multi_pkg = true;
908 /// Mark this file and every file referenced by it as multi_pkg and report an
909 /// astgen_failure error for them. AstGen must have completed in its entirety.
910 pub fn recursiveMarkMultiPkg(file: *File, pt: Zcu.PerThread) void {
911 file.multi_pkg = true;
912 file.status = .astgen_failure;
914 // We can only mark children as failed if the ZIR is loaded, which may not
915 // be the case if there were other astgen failures in this file
916 if (file.zir == null) return;
918 const imports_index = file.zir.?.extra[@intFromEnum(Zir.ExtraIndex.imports)];
919 if (imports_index == 0) return;
920 const extra = file.zir.?.extraData(Zir.Inst.Imports, imports_index);
922 var extra_index = extra.end;
923 for (0..extra.data.imports_len) |_| {
924 const item = file.zir.?.extraData(Zir.Inst.Imports.Item, extra_index);
925 extra_index = item.end;
927 const import_path = file.zir.?.nullTerminatedString(item.data.name);
928 if (mem.eql(u8, import_path, "builtin")) continue;
930 const res = pt.importFile(file, import_path) catch continue;
931 if (!res.is_pkg and !res.file.multi_pkg) {
932 res.file.recursiveMarkMultiPkg(pt);
937 pub const Index = InternPool.FileIndex;
940 /// Represents the contents of a file loaded with `@embedFile`.
941 pub const EmbedFile = struct {
942 /// Module that this file is a part of, managed externally.
943 owner: *Package.Module,
944 /// Relative to the owning module's root directory.
945 sub_file_path: InternPool.NullTerminatedString,
947 /// `.none` means the file was not loaded, so `stat` is undefined.
948 val: InternPool.Index,
949 /// If this is `null` and `val` is `.none`, the file has never been loaded.
950 err: ?(std.fs.File.OpenError || std.fs.File.StatError || std.fs.File.ReadError || error{UnexpectedEof}),
951 stat: Cache.File.Stat,
953 pub const Index = enum(u32) {
955 pub fn get(idx: Index, zcu: *const Zcu) *EmbedFile {
956 return zcu.embed_table.values()[@intFromEnum(idx)];
961 /// This struct holds data necessary to construct API-facing `AllErrors.Message`.
962 /// Its memory is managed with the general purpose allocator so that they
963 /// can be created and destroyed in response to incremental updates.
964 pub const ErrorMsg = struct {
967 notes: []ErrorMsg = &.{},
968 reference_trace_root: AnalUnit.Optional = .none,
973 comptime format: []const u8,
976 assert(src_loc.offset != .unneeded);
977 const err_msg = try gpa.create(ErrorMsg);
978 errdefer gpa.destroy(err_msg);
979 err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args);
983 /// Assumes the ErrorMsg struct and msg were both allocated with `gpa`,
984 /// as well as all notes.
985 pub fn destroy(err_msg: *ErrorMsg, gpa: Allocator) void {
987 gpa.destroy(err_msg);
993 comptime format: []const u8,
998 .msg = try std.fmt.allocPrint(gpa, format, args),
1002 pub fn deinit(err_msg: *ErrorMsg, gpa: Allocator) void {
1003 for (err_msg.notes) |*note| {
1006 gpa.free(err_msg.notes);
1007 gpa.free(err_msg.msg);
1008 err_msg.* = undefined;
1012 pub const AstGenSrc = union(enum) {
1015 importing_file: Zcu.File.Index,
1016 import_tok: std.zig.Ast.TokenIndex,
1020 /// Canonical reference to a position within a source file.
1021 pub const SrcLoc = struct {
1023 base_node: Ast.Node.Index,
1024 /// Relative to `base_node`.
1025 lazy: LazySrcLoc.Offset,
1027 pub fn baseSrcToken(src_loc: SrcLoc) Ast.TokenIndex {
1028 const tree = src_loc.file_scope.tree.?;
1029 return tree.firstToken(src_loc.base_node);
1032 pub fn relativeToNodeIndex(src_loc: SrcLoc, offset: i32) Ast.Node.Index {
1033 return @bitCast(offset + @as(i32, @bitCast(src_loc.base_node)));
1036 pub const Span = Ast.Span;
1038 pub fn span(src_loc: SrcLoc, gpa: Allocator) !Span {
1039 switch (src_loc.lazy) {
1040 .unneeded => unreachable,
1041 .entire_file => return Span{ .start = 0, .end = 1, .main = 0 },
1043 .byte_abs => |byte_index| return Span{ .start = byte_index, .end = byte_index + 1, .main = byte_index },
1045 .token_abs => |tok_index| {
1046 const tree = try src_loc.file_scope.getTree(gpa);
1047 const start = tree.tokens.items(.start)[tok_index];
1048 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1049 return Span{ .start = start, .end = end, .main = start };
1051 .node_abs => |node| {
1052 const tree = try src_loc.file_scope.getTree(gpa);
1053 return tree.nodeToSpan(node);
1055 .byte_offset => |byte_off| {
1056 const tree = try src_loc.file_scope.getTree(gpa);
1057 const tok_index = src_loc.baseSrcToken();
1058 const start = tree.tokens.items(.start)[tok_index] + byte_off;
1059 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1060 return Span{ .start = start, .end = end, .main = start };
1062 .token_offset => |tok_off| {
1063 const tree = try src_loc.file_scope.getTree(gpa);
1064 const tok_index = src_loc.baseSrcToken() + tok_off;
1065 const start = tree.tokens.items(.start)[tok_index];
1066 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1067 return Span{ .start = start, .end = end, .main = start };
1069 .node_offset => |traced_off| {
1070 const node_off = traced_off.x;
1071 const tree = try src_loc.file_scope.getTree(gpa);
1072 const node = src_loc.relativeToNodeIndex(node_off);
1073 return tree.nodeToSpan(node);
1075 .node_offset_main_token => |node_off| {
1076 const tree = try src_loc.file_scope.getTree(gpa);
1077 const node = src_loc.relativeToNodeIndex(node_off);
1078 const main_token = tree.nodes.items(.main_token)[node];
1079 return tree.tokensToSpan(main_token, main_token, main_token);
1081 .node_offset_bin_op => |node_off| {
1082 const tree = try src_loc.file_scope.getTree(gpa);
1083 const node = src_loc.relativeToNodeIndex(node_off);
1084 return tree.nodeToSpan(node);
1086 .node_offset_initializer => |node_off| {
1087 const tree = try src_loc.file_scope.getTree(gpa);
1088 const node = src_loc.relativeToNodeIndex(node_off);
1089 return tree.tokensToSpan(
1090 tree.firstToken(node) - 3,
1091 tree.lastToken(node),
1092 tree.nodes.items(.main_token)[node] - 2,
1095 .node_offset_var_decl_ty => |node_off| {
1096 const tree = try src_loc.file_scope.getTree(gpa);
1097 const node = src_loc.relativeToNodeIndex(node_off);
1098 const node_tags = tree.nodes.items(.tag);
1099 const full = switch (node_tags[node]) {
1104 => tree.fullVarDecl(node).?,
1105 .@"usingnamespace" => {
1106 const node_data = tree.nodes.items(.data);
1107 return tree.nodeToSpan(node_data[node].lhs);
1109 else => unreachable,
1111 if (full.ast.type_node != 0) {
1112 return tree.nodeToSpan(full.ast.type_node);
1114 const tok_index = full.ast.mut_token + 1; // the name token
1115 const start = tree.tokens.items(.start)[tok_index];
1116 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1117 return Span{ .start = start, .end = end, .main = start };
1119 .node_offset_var_decl_align => |node_off| {
1120 const tree = try src_loc.file_scope.getTree(gpa);
1121 const node = src_loc.relativeToNodeIndex(node_off);
1122 var buf: [1]Ast.Node.Index = undefined;
1123 const align_node = if (tree.fullVarDecl(node)) |v|
1125 else if (tree.fullFnProto(&buf, node)) |f|
1129 return tree.nodeToSpan(align_node);
1131 .node_offset_var_decl_section => |node_off| {
1132 const tree = try src_loc.file_scope.getTree(gpa);
1133 const node = src_loc.relativeToNodeIndex(node_off);
1134 var buf: [1]Ast.Node.Index = undefined;
1135 const section_node = if (tree.fullVarDecl(node)) |v|
1137 else if (tree.fullFnProto(&buf, node)) |f|
1141 return tree.nodeToSpan(section_node);
1143 .node_offset_var_decl_addrspace => |node_off| {
1144 const tree = try src_loc.file_scope.getTree(gpa);
1145 const node = src_loc.relativeToNodeIndex(node_off);
1146 var buf: [1]Ast.Node.Index = undefined;
1147 const addrspace_node = if (tree.fullVarDecl(node)) |v|
1148 v.ast.addrspace_node
1149 else if (tree.fullFnProto(&buf, node)) |f|
1150 f.ast.addrspace_expr
1153 return tree.nodeToSpan(addrspace_node);
1155 .node_offset_var_decl_init => |node_off| {
1156 const tree = try src_loc.file_scope.getTree(gpa);
1157 const node = src_loc.relativeToNodeIndex(node_off);
1158 const full = tree.fullVarDecl(node).?;
1159 return tree.nodeToSpan(full.ast.init_node);
1161 .node_offset_builtin_call_arg => |builtin_arg| {
1162 const tree = try src_loc.file_scope.getTree(gpa);
1163 const node_datas = tree.nodes.items(.data);
1164 const node_tags = tree.nodes.items(.tag);
1165 const node = src_loc.relativeToNodeIndex(builtin_arg.builtin_call_node);
1166 const param = switch (node_tags[node]) {
1167 .builtin_call_two, .builtin_call_two_comma => switch (builtin_arg.arg_index) {
1168 0 => node_datas[node].lhs,
1169 1 => node_datas[node].rhs,
1170 else => unreachable,
1172 .builtin_call, .builtin_call_comma => tree.extra_data[node_datas[node].lhs + builtin_arg.arg_index],
1173 else => unreachable,
1175 return tree.nodeToSpan(param);
1177 .node_offset_ptrcast_operand => |node_off| {
1178 const tree = try src_loc.file_scope.getTree(gpa);
1179 const main_tokens = tree.nodes.items(.main_token);
1180 const node_datas = tree.nodes.items(.data);
1181 const node_tags = tree.nodes.items(.tag);
1183 var node = src_loc.relativeToNodeIndex(node_off);
1185 switch (node_tags[node]) {
1186 .builtin_call_two, .builtin_call_two_comma => {},
1190 if (node_datas[node].lhs == 0) break; // 0 args
1191 if (node_datas[node].rhs != 0) break; // 2 args
1193 const builtin_token = main_tokens[node];
1194 const builtin_name = tree.tokenSlice(builtin_token);
1195 const info = BuiltinFn.list.get(builtin_name) orelse break;
1207 node = node_datas[node].lhs;
1210 return tree.nodeToSpan(node);
1212 .node_offset_array_access_index => |node_off| {
1213 const tree = try src_loc.file_scope.getTree(gpa);
1214 const node_datas = tree.nodes.items(.data);
1215 const node = src_loc.relativeToNodeIndex(node_off);
1216 return tree.nodeToSpan(node_datas[node].rhs);
1218 .node_offset_slice_ptr,
1219 .node_offset_slice_start,
1220 .node_offset_slice_end,
1221 .node_offset_slice_sentinel,
1223 const tree = try src_loc.file_scope.getTree(gpa);
1224 const node = src_loc.relativeToNodeIndex(node_off);
1225 const full = tree.fullSlice(node).?;
1226 const part_node = switch (src_loc.lazy) {
1227 .node_offset_slice_ptr => full.ast.sliced,
1228 .node_offset_slice_start => full.ast.start,
1229 .node_offset_slice_end => full.ast.end,
1230 .node_offset_slice_sentinel => full.ast.sentinel,
1231 else => unreachable,
1233 return tree.nodeToSpan(part_node);
1235 .node_offset_call_func => |node_off| {
1236 const tree = try src_loc.file_scope.getTree(gpa);
1237 const node = src_loc.relativeToNodeIndex(node_off);
1238 var buf: [1]Ast.Node.Index = undefined;
1239 const full = tree.fullCall(&buf, node).?;
1240 return tree.nodeToSpan(full.ast.fn_expr);
1242 .node_offset_field_name => |node_off| {
1243 const tree = try src_loc.file_scope.getTree(gpa);
1244 const node_datas = tree.nodes.items(.data);
1245 const node_tags = tree.nodes.items(.tag);
1246 const node = src_loc.relativeToNodeIndex(node_off);
1247 var buf: [1]Ast.Node.Index = undefined;
1248 const tok_index = switch (node_tags[node]) {
1249 .field_access => node_datas[node].rhs,
1253 .async_call_one_comma,
1259 const full = tree.fullCall(&buf, node).?;
1260 break :blk tree.lastToken(full.ast.fn_expr);
1262 else => tree.firstToken(node) - 2,
1264 const start = tree.tokens.items(.start)[tok_index];
1265 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1266 return Span{ .start = start, .end = end, .main = start };
1268 .node_offset_field_name_init => |node_off| {
1269 const tree = try src_loc.file_scope.getTree(gpa);
1270 const node = src_loc.relativeToNodeIndex(node_off);
1271 const tok_index = tree.firstToken(node) - 2;
1272 const start = tree.tokens.items(.start)[tok_index];
1273 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1274 return Span{ .start = start, .end = end, .main = start };
1276 .node_offset_deref_ptr => |node_off| {
1277 const tree = try src_loc.file_scope.getTree(gpa);
1278 const node = src_loc.relativeToNodeIndex(node_off);
1279 return tree.nodeToSpan(node);
1281 .node_offset_asm_source => |node_off| {
1282 const tree = try src_loc.file_scope.getTree(gpa);
1283 const node = src_loc.relativeToNodeIndex(node_off);
1284 const full = tree.fullAsm(node).?;
1285 return tree.nodeToSpan(full.ast.template);
1287 .node_offset_asm_ret_ty => |node_off| {
1288 const tree = try src_loc.file_scope.getTree(gpa);
1289 const node = src_loc.relativeToNodeIndex(node_off);
1290 const full = tree.fullAsm(node).?;
1291 const asm_output = full.outputs[0];
1292 const node_datas = tree.nodes.items(.data);
1293 return tree.nodeToSpan(node_datas[asm_output].lhs);
1296 .node_offset_if_cond => |node_off| {
1297 const tree = try src_loc.file_scope.getTree(gpa);
1298 const node = src_loc.relativeToNodeIndex(node_off);
1299 const node_tags = tree.nodes.items(.tag);
1300 const src_node = switch (node_tags[node]) {
1303 => tree.fullIf(node).?.ast.cond_expr,
1308 => tree.fullWhile(node).?.ast.cond_expr,
1313 const inputs = tree.fullFor(node).?.ast.inputs;
1314 const start = tree.firstToken(inputs[0]);
1315 const end = tree.lastToken(inputs[inputs.len - 1]);
1316 return tree.tokensToSpan(start, end, start);
1321 else => unreachable,
1323 return tree.nodeToSpan(src_node);
1325 .for_input => |for_input| {
1326 const tree = try src_loc.file_scope.getTree(gpa);
1327 const node = src_loc.relativeToNodeIndex(for_input.for_node_offset);
1328 const for_full = tree.fullFor(node).?;
1329 const src_node = for_full.ast.inputs[for_input.input_index];
1330 return tree.nodeToSpan(src_node);
1332 .for_capture_from_input => |node_off| {
1333 const tree = try src_loc.file_scope.getTree(gpa);
1334 const token_tags = tree.tokens.items(.tag);
1335 const input_node = src_loc.relativeToNodeIndex(node_off);
1336 // We have to actually linear scan the whole AST to find the for loop
1337 // that contains this input.
1338 const node_tags = tree.nodes.items(.tag);
1339 for (node_tags, 0..) |node_tag, node_usize| {
1340 const node = @as(Ast.Node.Index, @intCast(node_usize));
1342 .for_simple, .@"for" => {
1343 const for_full = tree.fullFor(node).?;
1344 for (for_full.ast.inputs, 0..) |input, input_index| {
1345 if (input_node == input) {
1346 var count = input_index;
1347 var tok = for_full.payload_token;
1349 switch (token_tags[tok]) {
1356 return tree.tokensToSpan(tok, tok + 1, tok);
1361 return tree.tokensToSpan(tok, tok + 2, tok);
1364 else => unreachable,
1374 .call_arg => |call_arg| {
1375 const tree = try src_loc.file_scope.getTree(gpa);
1376 const node = src_loc.relativeToNodeIndex(call_arg.call_node_offset);
1377 var buf: [2]Ast.Node.Index = undefined;
1378 const call_full = tree.fullCall(buf[0..1], node) orelse {
1379 const node_tags = tree.nodes.items(.tag);
1380 assert(node_tags[node] == .builtin_call);
1381 const call_args_node = tree.extra_data[tree.nodes.items(.data)[node].rhs - 1];
1382 switch (node_tags[call_args_node]) {
1384 .array_init_one_comma,
1385 .array_init_dot_two,
1386 .array_init_dot_two_comma,
1388 .array_init_dot_comma,
1392 const full = tree.fullArrayInit(&buf, call_args_node).?.ast.elements;
1393 return tree.nodeToSpan(full[call_arg.arg_index]);
1396 .struct_init_one_comma,
1397 .struct_init_dot_two,
1398 .struct_init_dot_two_comma,
1400 .struct_init_dot_comma,
1404 const full = tree.fullStructInit(&buf, call_args_node).?.ast.fields;
1405 return tree.nodeToSpan(full[call_arg.arg_index]);
1407 else => return tree.nodeToSpan(call_args_node),
1410 return tree.nodeToSpan(call_full.ast.params[call_arg.arg_index]);
1412 .fn_proto_param, .fn_proto_param_type => |fn_proto_param| {
1413 const tree = try src_loc.file_scope.getTree(gpa);
1414 const node = src_loc.relativeToNodeIndex(fn_proto_param.fn_proto_node_offset);
1415 var buf: [1]Ast.Node.Index = undefined;
1416 const full = tree.fullFnProto(&buf, node).?;
1417 var it = full.iterate(tree);
1419 while (it.next()) |param| : (i += 1) {
1420 if (i != fn_proto_param.param_index) continue;
1422 switch (src_loc.lazy) {
1423 .fn_proto_param_type => if (param.anytype_ellipsis3) |tok| {
1424 return tree.tokenToSpan(tok);
1426 return tree.nodeToSpan(param.type_expr);
1428 .fn_proto_param => if (param.anytype_ellipsis3) |tok| {
1429 const first = param.comptime_noalias orelse param.name_token orelse tok;
1430 return tree.tokensToSpan(first, tok, first);
1432 const first = param.comptime_noalias orelse param.name_token orelse tree.firstToken(param.type_expr);
1433 return tree.tokensToSpan(first, tree.lastToken(param.type_expr), first);
1435 else => unreachable,
1440 .node_offset_bin_lhs => |node_off| {
1441 const tree = try src_loc.file_scope.getTree(gpa);
1442 const node = src_loc.relativeToNodeIndex(node_off);
1443 const node_datas = tree.nodes.items(.data);
1444 return tree.nodeToSpan(node_datas[node].lhs);
1446 .node_offset_bin_rhs => |node_off| {
1447 const tree = try src_loc.file_scope.getTree(gpa);
1448 const node = src_loc.relativeToNodeIndex(node_off);
1449 const node_datas = tree.nodes.items(.data);
1450 return tree.nodeToSpan(node_datas[node].rhs);
1452 .array_cat_lhs, .array_cat_rhs => |cat| {
1453 const tree = try src_loc.file_scope.getTree(gpa);
1454 const node = src_loc.relativeToNodeIndex(cat.array_cat_offset);
1455 const node_datas = tree.nodes.items(.data);
1456 const arr_node = if (src_loc.lazy == .array_cat_lhs)
1457 node_datas[node].lhs
1459 node_datas[node].rhs;
1461 const node_tags = tree.nodes.items(.tag);
1462 var buf: [2]Ast.Node.Index = undefined;
1463 switch (node_tags[arr_node]) {
1465 .array_init_one_comma,
1466 .array_init_dot_two,
1467 .array_init_dot_two_comma,
1469 .array_init_dot_comma,
1473 const full = tree.fullArrayInit(&buf, arr_node).?.ast.elements;
1474 return tree.nodeToSpan(full[cat.elem_index]);
1476 else => return tree.nodeToSpan(arr_node),
1480 .node_offset_switch_operand => |node_off| {
1481 const tree = try src_loc.file_scope.getTree(gpa);
1482 const node = src_loc.relativeToNodeIndex(node_off);
1483 const node_datas = tree.nodes.items(.data);
1484 return tree.nodeToSpan(node_datas[node].lhs);
1487 .node_offset_switch_special_prong => |node_off| {
1488 const tree = try src_loc.file_scope.getTree(gpa);
1489 const switch_node = src_loc.relativeToNodeIndex(node_off);
1490 const node_datas = tree.nodes.items(.data);
1491 const node_tags = tree.nodes.items(.tag);
1492 const main_tokens = tree.nodes.items(.main_token);
1493 const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange);
1494 const case_nodes = tree.extra_data[extra.start..extra.end];
1495 for (case_nodes) |case_node| {
1496 const case = tree.fullSwitchCase(case_node).?;
1497 const is_special = (case.ast.values.len == 0) or
1498 (case.ast.values.len == 1 and
1499 node_tags[case.ast.values[0]] == .identifier and
1500 mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_"));
1501 if (!is_special) continue;
1503 return tree.nodeToSpan(case_node);
1507 .node_offset_switch_range => |node_off| {
1508 const tree = try src_loc.file_scope.getTree(gpa);
1509 const switch_node = src_loc.relativeToNodeIndex(node_off);
1510 const node_datas = tree.nodes.items(.data);
1511 const node_tags = tree.nodes.items(.tag);
1512 const main_tokens = tree.nodes.items(.main_token);
1513 const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange);
1514 const case_nodes = tree.extra_data[extra.start..extra.end];
1515 for (case_nodes) |case_node| {
1516 const case = tree.fullSwitchCase(case_node).?;
1517 const is_special = (case.ast.values.len == 0) or
1518 (case.ast.values.len == 1 and
1519 node_tags[case.ast.values[0]] == .identifier and
1520 mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_"));
1521 if (is_special) continue;
1523 for (case.ast.values) |item_node| {
1524 if (node_tags[item_node] == .switch_range) {
1525 return tree.nodeToSpan(item_node);
1530 .node_offset_fn_type_align => |node_off| {
1531 const tree = try src_loc.file_scope.getTree(gpa);
1532 const node = src_loc.relativeToNodeIndex(node_off);
1533 var buf: [1]Ast.Node.Index = undefined;
1534 const full = tree.fullFnProto(&buf, node).?;
1535 return tree.nodeToSpan(full.ast.align_expr);
1537 .node_offset_fn_type_addrspace => |node_off| {
1538 const tree = try src_loc.file_scope.getTree(gpa);
1539 const node = src_loc.relativeToNodeIndex(node_off);
1540 var buf: [1]Ast.Node.Index = undefined;
1541 const full = tree.fullFnProto(&buf, node).?;
1542 return tree.nodeToSpan(full.ast.addrspace_expr);
1544 .node_offset_fn_type_section => |node_off| {
1545 const tree = try src_loc.file_scope.getTree(gpa);
1546 const node = src_loc.relativeToNodeIndex(node_off);
1547 var buf: [1]Ast.Node.Index = undefined;
1548 const full = tree.fullFnProto(&buf, node).?;
1549 return tree.nodeToSpan(full.ast.section_expr);
1551 .node_offset_fn_type_cc => |node_off| {
1552 const tree = try src_loc.file_scope.getTree(gpa);
1553 const node = src_loc.relativeToNodeIndex(node_off);
1554 var buf: [1]Ast.Node.Index = undefined;
1555 const full = tree.fullFnProto(&buf, node).?;
1556 return tree.nodeToSpan(full.ast.callconv_expr);
1559 .node_offset_fn_type_ret_ty => |node_off| {
1560 const tree = try src_loc.file_scope.getTree(gpa);
1561 const node = src_loc.relativeToNodeIndex(node_off);
1562 var buf: [1]Ast.Node.Index = undefined;
1563 const full = tree.fullFnProto(&buf, node).?;
1564 return tree.nodeToSpan(full.ast.return_type);
1566 .node_offset_param => |node_off| {
1567 const tree = try src_loc.file_scope.getTree(gpa);
1568 const token_tags = tree.tokens.items(.tag);
1569 const node = src_loc.relativeToNodeIndex(node_off);
1571 var first_tok = tree.firstToken(node);
1572 while (true) switch (token_tags[first_tok - 1]) {
1573 .colon, .identifier, .keyword_comptime, .keyword_noalias => first_tok -= 1,
1576 return tree.tokensToSpan(
1578 tree.lastToken(node),
1582 .token_offset_param => |token_off| {
1583 const tree = try src_loc.file_scope.getTree(gpa);
1584 const token_tags = tree.tokens.items(.tag);
1585 const main_token = tree.nodes.items(.main_token)[src_loc.base_node];
1586 const tok_index = @as(Ast.TokenIndex, @bitCast(token_off + @as(i32, @bitCast(main_token))));
1588 var first_tok = tok_index;
1589 while (true) switch (token_tags[first_tok - 1]) {
1590 .colon, .identifier, .keyword_comptime, .keyword_noalias => first_tok -= 1,
1593 return tree.tokensToSpan(
1600 .node_offset_anyframe_type => |node_off| {
1601 const tree = try src_loc.file_scope.getTree(gpa);
1602 const node_datas = tree.nodes.items(.data);
1603 const parent_node = src_loc.relativeToNodeIndex(node_off);
1604 return tree.nodeToSpan(node_datas[parent_node].rhs);
1607 .node_offset_lib_name => |node_off| {
1608 const tree = try src_loc.file_scope.getTree(gpa);
1609 const parent_node = src_loc.relativeToNodeIndex(node_off);
1610 var buf: [1]Ast.Node.Index = undefined;
1611 const full = tree.fullFnProto(&buf, parent_node).?;
1612 const tok_index = full.lib_name.?;
1613 const start = tree.tokens.items(.start)[tok_index];
1614 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1615 return Span{ .start = start, .end = end, .main = start };
1618 .node_offset_array_type_len => |node_off| {
1619 const tree = try src_loc.file_scope.getTree(gpa);
1620 const parent_node = src_loc.relativeToNodeIndex(node_off);
1622 const full = tree.fullArrayType(parent_node).?;
1623 return tree.nodeToSpan(full.ast.elem_count);
1625 .node_offset_array_type_sentinel => |node_off| {
1626 const tree = try src_loc.file_scope.getTree(gpa);
1627 const parent_node = src_loc.relativeToNodeIndex(node_off);
1629 const full = tree.fullArrayType(parent_node).?;
1630 return tree.nodeToSpan(full.ast.sentinel);
1632 .node_offset_array_type_elem => |node_off| {
1633 const tree = try src_loc.file_scope.getTree(gpa);
1634 const parent_node = src_loc.relativeToNodeIndex(node_off);
1636 const full = tree.fullArrayType(parent_node).?;
1637 return tree.nodeToSpan(full.ast.elem_type);
1639 .node_offset_un_op => |node_off| {
1640 const tree = try src_loc.file_scope.getTree(gpa);
1641 const node_datas = tree.nodes.items(.data);
1642 const node = src_loc.relativeToNodeIndex(node_off);
1644 return tree.nodeToSpan(node_datas[node].lhs);
1646 .node_offset_ptr_elem => |node_off| {
1647 const tree = try src_loc.file_scope.getTree(gpa);
1648 const parent_node = src_loc.relativeToNodeIndex(node_off);
1650 const full = tree.fullPtrType(parent_node).?;
1651 return tree.nodeToSpan(full.ast.child_type);
1653 .node_offset_ptr_sentinel => |node_off| {
1654 const tree = try src_loc.file_scope.getTree(gpa);
1655 const parent_node = src_loc.relativeToNodeIndex(node_off);
1657 const full = tree.fullPtrType(parent_node).?;
1658 return tree.nodeToSpan(full.ast.sentinel);
1660 .node_offset_ptr_align => |node_off| {
1661 const tree = try src_loc.file_scope.getTree(gpa);
1662 const parent_node = src_loc.relativeToNodeIndex(node_off);
1664 const full = tree.fullPtrType(parent_node).?;
1665 return tree.nodeToSpan(full.ast.align_node);
1667 .node_offset_ptr_addrspace => |node_off| {
1668 const tree = try src_loc.file_scope.getTree(gpa);
1669 const parent_node = src_loc.relativeToNodeIndex(node_off);
1671 const full = tree.fullPtrType(parent_node).?;
1672 return tree.nodeToSpan(full.ast.addrspace_node);
1674 .node_offset_ptr_bitoffset => |node_off| {
1675 const tree = try src_loc.file_scope.getTree(gpa);
1676 const parent_node = src_loc.relativeToNodeIndex(node_off);
1678 const full = tree.fullPtrType(parent_node).?;
1679 return tree.nodeToSpan(full.ast.bit_range_start);
1681 .node_offset_ptr_hostsize => |node_off| {
1682 const tree = try src_loc.file_scope.getTree(gpa);
1683 const parent_node = src_loc.relativeToNodeIndex(node_off);
1685 const full = tree.fullPtrType(parent_node).?;
1686 return tree.nodeToSpan(full.ast.bit_range_end);
1688 .node_offset_container_tag => |node_off| {
1689 const tree = try src_loc.file_scope.getTree(gpa);
1690 const node_tags = tree.nodes.items(.tag);
1691 const parent_node = src_loc.relativeToNodeIndex(node_off);
1693 switch (node_tags[parent_node]) {
1694 .container_decl_arg, .container_decl_arg_trailing => {
1695 const full = tree.containerDeclArg(parent_node);
1696 return tree.nodeToSpan(full.ast.arg);
1698 .tagged_union_enum_tag, .tagged_union_enum_tag_trailing => {
1699 const full = tree.taggedUnionEnumTag(parent_node);
1701 return tree.tokensToSpan(
1702 tree.firstToken(full.ast.arg) - 2,
1703 tree.lastToken(full.ast.arg) + 1,
1704 tree.nodes.items(.main_token)[full.ast.arg],
1707 else => unreachable,
1710 .node_offset_field_default => |node_off| {
1711 const tree = try src_loc.file_scope.getTree(gpa);
1712 const node_tags = tree.nodes.items(.tag);
1713 const parent_node = src_loc.relativeToNodeIndex(node_off);
1715 const full: Ast.full.ContainerField = switch (node_tags[parent_node]) {
1716 .container_field => tree.containerField(parent_node),
1717 .container_field_init => tree.containerFieldInit(parent_node),
1718 else => unreachable,
1720 return tree.nodeToSpan(full.ast.value_expr);
1722 .node_offset_init_ty => |node_off| {
1723 const tree = try src_loc.file_scope.getTree(gpa);
1724 const parent_node = src_loc.relativeToNodeIndex(node_off);
1726 var buf: [2]Ast.Node.Index = undefined;
1727 const type_expr = if (tree.fullArrayInit(&buf, parent_node)) |array_init|
1728 array_init.ast.type_expr
1730 tree.fullStructInit(&buf, parent_node).?.ast.type_expr;
1731 return tree.nodeToSpan(type_expr);
1733 .node_offset_store_ptr => |node_off| {
1734 const tree = try src_loc.file_scope.getTree(gpa);
1735 const node_tags = tree.nodes.items(.tag);
1736 const node_datas = tree.nodes.items(.data);
1737 const node = src_loc.relativeToNodeIndex(node_off);
1739 switch (node_tags[node]) {
1741 return tree.nodeToSpan(node_datas[node].lhs);
1743 else => return tree.nodeToSpan(node),
1746 .node_offset_store_operand => |node_off| {
1747 const tree = try src_loc.file_scope.getTree(gpa);
1748 const node_tags = tree.nodes.items(.tag);
1749 const node_datas = tree.nodes.items(.data);
1750 const node = src_loc.relativeToNodeIndex(node_off);
1752 switch (node_tags[node]) {
1754 return tree.nodeToSpan(node_datas[node].rhs);
1756 else => return tree.nodeToSpan(node),
1759 .node_offset_return_operand => |node_off| {
1760 const tree = try src_loc.file_scope.getTree(gpa);
1761 const node = src_loc.relativeToNodeIndex(node_off);
1762 const node_tags = tree.nodes.items(.tag);
1763 const node_datas = tree.nodes.items(.data);
1764 if (node_tags[node] == .@"return" and node_datas[node].lhs != 0) {
1765 return tree.nodeToSpan(node_datas[node].lhs);
1767 return tree.nodeToSpan(node);
1769 .container_field_name,
1770 .container_field_value,
1771 .container_field_type,
1772 .container_field_align,
1774 const tree = try src_loc.file_scope.getTree(gpa);
1775 const node = src_loc.relativeToNodeIndex(0);
1776 var buf: [2]Ast.Node.Index = undefined;
1777 const container_decl = tree.fullContainerDecl(&buf, node) orelse
1778 return tree.nodeToSpan(node);
1780 var cur_field_idx: usize = 0;
1781 for (container_decl.ast.members) |member_node| {
1782 const field = tree.fullContainerField(member_node) orelse continue;
1783 if (cur_field_idx < field_idx) {
1787 const field_component_node = switch (src_loc.lazy) {
1788 .container_field_name => 0,
1789 .container_field_value => field.ast.value_expr,
1790 .container_field_type => field.ast.type_expr,
1791 .container_field_align => field.ast.align_expr,
1792 else => unreachable,
1794 if (field_component_node == 0) {
1795 return tree.tokenToSpan(field.ast.main_token);
1797 return tree.nodeToSpan(field_component_node);
1801 .tuple_field_type, .tuple_field_init => |field_info| {
1802 const tree = try src_loc.file_scope.getTree(gpa);
1803 const node = src_loc.relativeToNodeIndex(0);
1804 var buf: [2]Ast.Node.Index = undefined;
1805 const container_decl = tree.fullContainerDecl(&buf, node) orelse
1806 return tree.nodeToSpan(node);
1808 const field = tree.fullContainerField(container_decl.ast.members[field_info.elem_index]).?;
1809 return tree.nodeToSpan(switch (src_loc.lazy) {
1810 .tuple_field_type => field.ast.type_expr,
1811 .tuple_field_init => field.ast.value_expr,
1812 else => unreachable,
1815 .init_elem => |init_elem| {
1816 const tree = try src_loc.file_scope.getTree(gpa);
1817 const init_node = src_loc.relativeToNodeIndex(init_elem.init_node_offset);
1818 var buf: [2]Ast.Node.Index = undefined;
1819 if (tree.fullArrayInit(&buf, init_node)) |full| {
1820 const elem_node = full.ast.elements[init_elem.elem_index];
1821 return tree.nodeToSpan(elem_node);
1822 } else if (tree.fullStructInit(&buf, init_node)) |full| {
1823 const field_node = full.ast.fields[init_elem.elem_index];
1824 return tree.tokensToSpan(
1825 tree.firstToken(field_node) - 3,
1826 tree.lastToken(field_node),
1827 tree.nodes.items(.main_token)[field_node] - 2,
1832 .init_field_linkage,
1833 .init_field_section,
1834 .init_field_visibility,
1836 .init_field_locality,
1838 .init_field_library,
1839 .init_field_thread_local,
1840 .init_field_dll_import,
1841 => |builtin_call_node| {
1842 const wanted = switch (src_loc.lazy) {
1843 .init_field_name => "name",
1844 .init_field_linkage => "linkage",
1845 .init_field_section => "section",
1846 .init_field_visibility => "visibility",
1847 .init_field_rw => "rw",
1848 .init_field_locality => "locality",
1849 .init_field_cache => "cache",
1850 .init_field_library => "library",
1851 .init_field_thread_local => "thread_local",
1852 .init_field_dll_import => "dll_import",
1853 else => unreachable,
1855 const tree = try src_loc.file_scope.getTree(gpa);
1856 const node_datas = tree.nodes.items(.data);
1857 const node_tags = tree.nodes.items(.tag);
1858 const node = src_loc.relativeToNodeIndex(builtin_call_node);
1859 const arg_node = switch (node_tags[node]) {
1860 .builtin_call_two, .builtin_call_two_comma => node_datas[node].rhs,
1861 .builtin_call, .builtin_call_comma => tree.extra_data[node_datas[node].lhs + 1],
1862 else => unreachable,
1864 var buf: [2]Ast.Node.Index = undefined;
1865 const full = tree.fullStructInit(&buf, arg_node) orelse
1866 return tree.nodeToSpan(arg_node);
1867 for (full.ast.fields) |field_node| {
1868 // . IDENTIFIER = field_node
1869 const name_token = tree.firstToken(field_node) - 2;
1870 const name = tree.tokenSlice(name_token);
1871 if (std.mem.eql(u8, name, wanted)) {
1872 return tree.tokensToSpan(
1874 tree.lastToken(field_node),
1875 tree.nodes.items(.main_token)[field_node] - 2,
1879 return tree.nodeToSpan(arg_node);
1882 .switch_case_item_range_first,
1883 .switch_case_item_range_last,
1885 .switch_tag_capture,
1887 const switch_node_offset, const want_case_idx = switch (src_loc.lazy) {
1889 .switch_case_item_range_first,
1890 .switch_case_item_range_last,
1891 => |x| .{ x.switch_node_offset, x.case_idx },
1893 .switch_tag_capture,
1894 => |x| .{ x.switch_node_offset, x.case_idx },
1895 else => unreachable,
1898 const tree = try src_loc.file_scope.getTree(gpa);
1899 const node_datas = tree.nodes.items(.data);
1900 const node_tags = tree.nodes.items(.tag);
1901 const main_tokens = tree.nodes.items(.main_token);
1902 const switch_node = src_loc.relativeToNodeIndex(switch_node_offset);
1903 const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange);
1904 const case_nodes = tree.extra_data[extra.start..extra.end];
1906 var multi_i: u32 = 0;
1907 var scalar_i: u32 = 0;
1908 const case = for (case_nodes) |case_node| {
1909 const case = tree.fullSwitchCase(case_node).?;
1910 const is_special = special: {
1911 if (case.ast.values.len == 0) break :special true;
1912 if (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .identifier) {
1913 break :special mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_");
1915 break :special false;
1918 if (want_case_idx.isSpecial()) {
1924 const is_multi = case.ast.values.len != 1 or
1925 node_tags[case.ast.values[0]] == .switch_range;
1927 switch (want_case_idx.kind) {
1928 .scalar => if (!is_multi and want_case_idx.index == scalar_i) break case,
1929 .multi => if (is_multi and want_case_idx.index == multi_i) break case,
1939 const want_item = switch (src_loc.lazy) {
1941 .switch_case_item_range_first,
1942 .switch_case_item_range_last,
1944 .switch_capture, .switch_tag_capture => {
1945 const token_tags = tree.tokens.items(.tag);
1946 const start = switch (src_loc.lazy) {
1947 .switch_capture => case.payload_token.?,
1948 .switch_tag_capture => tok: {
1949 var tok = case.payload_token.?;
1950 if (token_tags[tok] == .asterisk) tok += 1;
1951 tok += 2; // skip over comma
1954 else => unreachable,
1956 const end = switch (token_tags[start]) {
1957 .asterisk => start + 1,
1960 return tree.tokensToSpan(start, end, start);
1962 else => unreachable,
1965 switch (want_item.kind) {
1967 var item_i: u32 = 0;
1968 for (case.ast.values) |item_node| {
1969 if (node_tags[item_node] == .switch_range) continue;
1970 if (item_i != want_item.index) {
1974 return tree.nodeToSpan(item_node);
1978 var range_i: u32 = 0;
1979 for (case.ast.values) |item_node| {
1980 if (node_tags[item_node] != .switch_range) continue;
1981 if (range_i != want_item.index) {
1985 return switch (src_loc.lazy) {
1986 .switch_case_item => tree.nodeToSpan(item_node),
1987 .switch_case_item_range_first => tree.nodeToSpan(node_datas[item_node].lhs),
1988 .switch_case_item_range_last => tree.nodeToSpan(node_datas[item_node].rhs),
1989 else => unreachable,
1995 .func_decl_param_comptime => |param_idx| {
1996 const tree = try src_loc.file_scope.getTree(gpa);
1997 var buf: [1]Ast.Node.Index = undefined;
1998 const full = tree.fullFnProto(&buf, src_loc.base_node).?;
1999 var param_it = full.iterate(tree);
2000 for (0..param_idx) |_| assert(param_it.next() != null);
2001 const param = param_it.next().?;
2002 return tree.tokenToSpan(param.comptime_noalias.?);
2004 .func_decl_param_ty => |param_idx| {
2005 const tree = try src_loc.file_scope.getTree(gpa);
2006 var buf: [1]Ast.Node.Index = undefined;
2007 const full = tree.fullFnProto(&buf, src_loc.base_node).?;
2008 var param_it = full.iterate(tree);
2009 for (0..param_idx) |_| assert(param_it.next() != null);
2010 const param = param_it.next().?;
2011 return tree.nodeToSpan(param.type_expr);
2017 pub const LazySrcLoc = struct {
2018 /// This instruction provides the source node locations are resolved relative to.
2019 /// It is a `declaration`, `struct_decl`, `union_decl`, `enum_decl`, or `opaque_decl`.
2020 /// This must be valid even if `relative` is an absolute value, since it is required to
2021 /// determine the file which the `LazySrcLoc` refers to.
2022 base_node_inst: InternPool.TrackedInst.Index,
2023 /// This field determines the source location relative to `base_node_inst`.
2026 pub const Offset = union(enum) {
2027 /// When this tag is set, the code that constructed this `LazySrcLoc` is asserting
2028 /// that all code paths which would need to resolve the source location are
2029 /// unreachable. If you are debugging this tag incorrectly being this value,
2030 /// look into using reverse-continue with a memory watchpoint to see where the
2031 /// value is being set to this tag.
2032 /// `base_node_inst` is unused.
2034 /// Means the source location points to an entire file; not any particular
2035 /// location within the file. `file_scope` union field will be active.
2037 /// The source location points to a byte offset within a source file,
2038 /// offset from 0. The source file is determined contextually.
2040 /// The source location points to a token within a source file,
2041 /// offset from 0. The source file is determined contextually.
2043 /// The source location points to an AST node within a source file,
2044 /// offset from 0. The source file is determined contextually.
2046 /// The source location points to a byte offset within a source file,
2047 /// offset from the byte offset of the base node within the file.
2049 /// This data is the offset into the token list from the base node's first token.
2051 /// The source location points to an AST node, which is this value offset
2052 /// from its containing base node AST index.
2053 node_offset: TracedOffset,
2054 /// The source location points to the main token of an AST node, found
2055 /// by taking this AST node index offset from the containing base node.
2056 node_offset_main_token: i32,
2057 /// The source location points to the beginning of a struct initializer.
2058 node_offset_initializer: i32,
2059 /// The source location points to a variable declaration type expression,
2060 /// found by taking this AST node index offset from the containing
2061 /// base node, which points to a variable declaration AST node. Next, navigate
2062 /// to the type expression.
2063 node_offset_var_decl_ty: i32,
2064 /// The source location points to the alignment expression of a var decl.
2065 node_offset_var_decl_align: i32,
2066 /// The source location points to the linksection expression of a var decl.
2067 node_offset_var_decl_section: i32,
2068 /// The source location points to the addrspace expression of a var decl.
2069 node_offset_var_decl_addrspace: i32,
2070 /// The source location points to the initializer of a var decl.
2071 node_offset_var_decl_init: i32,
2072 /// The source location points to the given argument of a builtin function call.
2073 /// `builtin_call_node` points to the builtin call.
2074 /// `arg_index` is the index of the argument which hte source location refers to.
2075 node_offset_builtin_call_arg: struct {
2076 builtin_call_node: i32,
2079 /// Like `node_offset_builtin_call_arg` but recurses through arbitrarily many calls
2080 /// to pointer cast builtins (taking the first argument of the most nested).
2081 node_offset_ptrcast_operand: i32,
2082 /// The source location points to the index expression of an array access
2083 /// expression, found by taking this AST node index offset from the containing
2084 /// base node, which points to an array access AST node. Next, navigate
2085 /// to the index expression.
2086 node_offset_array_access_index: i32,
2087 /// The source location points to the LHS of a slice expression
2088 /// expression, found by taking this AST node index offset from the containing
2089 /// base node, which points to a slice AST node. Next, navigate
2090 /// to the sentinel expression.
2091 node_offset_slice_ptr: i32,
2092 /// The source location points to start expression of a slice expression
2093 /// expression, found by taking this AST node index offset from the containing
2094 /// base node, which points to a slice AST node. Next, navigate
2095 /// to the sentinel expression.
2096 node_offset_slice_start: i32,
2097 /// The source location points to the end expression of a slice
2098 /// expression, found by taking this AST node index offset from the containing
2099 /// base node, which points to a slice AST node. Next, navigate
2100 /// to the sentinel expression.
2101 node_offset_slice_end: i32,
2102 /// The source location points to the sentinel expression of a slice
2103 /// expression, found by taking this AST node index offset from the containing
2104 /// base node, which points to a slice AST node. Next, navigate
2105 /// to the sentinel expression.
2106 node_offset_slice_sentinel: i32,
2107 /// The source location points to the callee expression of a function
2108 /// call expression, found by taking this AST node index offset from the containing
2109 /// base node, which points to a function call AST node. Next, navigate
2110 /// to the callee expression.
2111 node_offset_call_func: i32,
2112 /// The payload is offset from the containing base node.
2113 /// The source location points to the field name of:
2114 /// * a field access expression (`a.b`), or
2115 /// * the callee of a method call (`a.b()`)
2116 node_offset_field_name: i32,
2117 /// The payload is offset from the containing base node.
2118 /// The source location points to the field name of the operand ("b" node)
2119 /// of a field initialization expression (`.a = b`)
2120 node_offset_field_name_init: i32,
2121 /// The source location points to the pointer of a pointer deref expression,
2122 /// found by taking this AST node index offset from the containing
2123 /// base node, which points to a pointer deref AST node. Next, navigate
2124 /// to the pointer expression.
2125 node_offset_deref_ptr: i32,
2126 /// The source location points to the assembly source code of an inline assembly
2127 /// expression, found by taking this AST node index offset from the containing
2128 /// base node, which points to inline assembly AST node. Next, navigate
2129 /// to the asm template source code.
2130 node_offset_asm_source: i32,
2131 /// The source location points to the return type of an inline assembly
2132 /// expression, found by taking this AST node index offset from the containing
2133 /// base node, which points to inline assembly AST node. Next, navigate
2134 /// to the return type expression.
2135 node_offset_asm_ret_ty: i32,
2136 /// The source location points to the condition expression of an if
2137 /// expression, found by taking this AST node index offset from the containing
2138 /// base node, which points to an if expression AST node. Next, navigate
2139 /// to the condition expression.
2140 node_offset_if_cond: i32,
2141 /// The source location points to a binary expression, such as `a + b`, found
2142 /// by taking this AST node index offset from the containing base node.
2143 node_offset_bin_op: i32,
2144 /// The source location points to the LHS of a binary expression, found
2145 /// by taking this AST node index offset from the containing base node,
2146 /// which points to a binary expression AST node. Next, navigate to the LHS.
2147 node_offset_bin_lhs: i32,
2148 /// The source location points to the RHS of a binary expression, found
2149 /// by taking this AST node index offset from the containing base node,
2150 /// which points to a binary expression AST node. Next, navigate to the RHS.
2151 node_offset_bin_rhs: i32,
2152 /// The source location points to the operand of a switch expression, found
2153 /// by taking this AST node index offset from the containing base node,
2154 /// which points to a switch expression AST node. Next, navigate to the operand.
2155 node_offset_switch_operand: i32,
2156 /// The source location points to the else/`_` prong of a switch expression, found
2157 /// by taking this AST node index offset from the containing base node,
2158 /// which points to a switch expression AST node. Next, navigate to the else/`_` prong.
2159 node_offset_switch_special_prong: i32,
2160 /// The source location points to all the ranges of a switch expression, found
2161 /// by taking this AST node index offset from the containing base node,
2162 /// which points to a switch expression AST node. Next, navigate to any of the
2163 /// range nodes. The error applies to all of them.
2164 node_offset_switch_range: i32,
2165 /// The source location points to the align expr of a function type
2166 /// expression, found by taking this AST node index offset from the containing
2167 /// base node, which points to a function type AST node. Next, navigate to
2168 /// the calling convention node.
2169 node_offset_fn_type_align: i32,
2170 /// The source location points to the addrspace expr of a function type
2171 /// expression, found by taking this AST node index offset from the containing
2172 /// base node, which points to a function type AST node. Next, navigate to
2173 /// the calling convention node.
2174 node_offset_fn_type_addrspace: i32,
2175 /// The source location points to the linksection expr of a function type
2176 /// expression, found by taking this AST node index offset from the containing
2177 /// base node, which points to a function type AST node. Next, navigate to
2178 /// the calling convention node.
2179 node_offset_fn_type_section: i32,
2180 /// The source location points to the calling convention of a function type
2181 /// expression, found by taking this AST node index offset from the containing
2182 /// base node, which points to a function type AST node. Next, navigate to
2183 /// the calling convention node.
2184 node_offset_fn_type_cc: i32,
2185 /// The source location points to the return type of a function type
2186 /// expression, found by taking this AST node index offset from the containing
2187 /// base node, which points to a function type AST node. Next, navigate to
2188 /// the return type node.
2189 node_offset_fn_type_ret_ty: i32,
2190 node_offset_param: i32,
2191 token_offset_param: i32,
2192 /// The source location points to the type expression of an `anyframe->T`
2193 /// expression, found by taking this AST node index offset from the containing
2194 /// base node, which points to a `anyframe->T` expression AST node. Next, navigate
2195 /// to the type expression.
2196 node_offset_anyframe_type: i32,
2197 /// The source location points to the string literal of `extern "foo"`, found
2198 /// by taking this AST node index offset from the containing
2199 /// base node, which points to a function prototype or variable declaration
2200 /// expression AST node. Next, navigate to the string literal of the `extern "foo"`.
2201 node_offset_lib_name: i32,
2202 /// The source location points to the len expression of an `[N:S]T`
2203 /// expression, found by taking this AST node index offset from the containing
2204 /// base node, which points to an `[N:S]T` expression AST node. Next, navigate
2205 /// to the len expression.
2206 node_offset_array_type_len: i32,
2207 /// The source location points to the sentinel expression of an `[N:S]T`
2208 /// expression, found by taking this AST node index offset from the containing
2209 /// base node, which points to an `[N:S]T` expression AST node. Next, navigate
2210 /// to the sentinel expression.
2211 node_offset_array_type_sentinel: i32,
2212 /// The source location points to the elem expression of an `[N:S]T`
2213 /// expression, found by taking this AST node index offset from the containing
2214 /// base node, which points to an `[N:S]T` expression AST node. Next, navigate
2215 /// to the elem expression.
2216 node_offset_array_type_elem: i32,
2217 /// The source location points to the operand of an unary expression.
2218 node_offset_un_op: i32,
2219 /// The source location points to the elem type of a pointer.
2220 node_offset_ptr_elem: i32,
2221 /// The source location points to the sentinel of a pointer.
2222 node_offset_ptr_sentinel: i32,
2223 /// The source location points to the align expr of a pointer.
2224 node_offset_ptr_align: i32,
2225 /// The source location points to the addrspace expr of a pointer.
2226 node_offset_ptr_addrspace: i32,
2227 /// The source location points to the bit-offset of a pointer.
2228 node_offset_ptr_bitoffset: i32,
2229 /// The source location points to the host size of a pointer.
2230 node_offset_ptr_hostsize: i32,
2231 /// The source location points to the tag type of an union or an enum.
2232 node_offset_container_tag: i32,
2233 /// The source location points to the default value of a field.
2234 node_offset_field_default: i32,
2235 /// The source location points to the type of an array or struct initializer.
2236 node_offset_init_ty: i32,
2237 /// The source location points to the LHS of an assignment.
2238 node_offset_store_ptr: i32,
2239 /// The source location points to the RHS of an assignment.
2240 node_offset_store_operand: i32,
2241 /// The source location points to the operand of a `return` statement, or
2242 /// the `return` itself if there is no explicit operand.
2243 node_offset_return_operand: i32,
2244 /// The source location points to a for loop input.
2246 /// Points to the for loop AST node.
2247 for_node_offset: i32,
2248 /// Picks one of the inputs from the condition.
2251 /// The source location points to one of the captures of a for loop, found
2252 /// by taking this AST node index offset from the containing
2253 /// base node, which points to one of the input nodes of a for loop.
2254 /// Next, navigate to the corresponding capture.
2255 for_capture_from_input: i32,
2256 /// The source location points to the argument node of a function call.
2258 /// Points to the function call AST node.
2259 call_node_offset: i32,
2260 /// The index of the argument the source location points to.
2263 fn_proto_param: FnProtoParam,
2264 fn_proto_param_type: FnProtoParam,
2265 array_cat_lhs: ArrayCat,
2266 array_cat_rhs: ArrayCat,
2267 /// The source location points to the name of the field at the given index
2268 /// of the container type declaration at the base node.
2269 container_field_name: u32,
2270 /// Like `continer_field_name`, but points at the field's default value.
2271 container_field_value: u32,
2272 /// Like `continer_field_name`, but points at the field's type.
2273 container_field_type: u32,
2274 /// Like `continer_field_name`, but points at the field's alignment.
2275 container_field_align: u32,
2276 /// The source location points to the type of the field at the given index
2277 /// of the tuple type declaration at `tuple_decl_node_offset`.
2278 tuple_field_type: TupleField,
2279 /// The source location points to the default init of the field at the given index
2280 /// of the tuple type declaration at `tuple_decl_node_offset`.
2281 tuple_field_init: TupleField,
2282 /// The source location points to the given element/field of a struct or
2283 /// array initialization expression.
2285 /// Points to the AST node of the initialization expression.
2286 init_node_offset: i32,
2287 /// The index of the field/element the source location points to.
2290 // The following source locations are like `init_elem`, but refer to a
2291 // field with a specific name. If such a field is not given, the entire
2292 // initialization expression is used instead.
2293 // The `i32` points to the AST node of a builtin call, whose *second*
2294 // argument is the init expression.
2295 init_field_name: i32,
2296 init_field_linkage: i32,
2297 init_field_section: i32,
2298 init_field_visibility: i32,
2300 init_field_locality: i32,
2301 init_field_cache: i32,
2302 init_field_library: i32,
2303 init_field_thread_local: i32,
2304 init_field_dll_import: i32,
2305 /// The source location points to the value of an item in a specific
2306 /// case of a `switch`.
2307 switch_case_item: SwitchItem,
2308 /// The source location points to the "first" value of a range item in
2309 /// a specific case of a `switch`.
2310 switch_case_item_range_first: SwitchItem,
2311 /// The source location points to the "last" value of a range item in
2312 /// a specific case of a `switch`.
2313 switch_case_item_range_last: SwitchItem,
2314 /// The source location points to the main capture of a specific case of
2316 switch_capture: SwitchCapture,
2317 /// The source location points to the "tag" capture (second capture) of
2318 /// a specific case of a `switch`.
2319 switch_tag_capture: SwitchCapture,
2320 /// The source location points to the `comptime` token on the given comptime parameter,
2321 /// where the base node is a function declaration. The value is the parameter index.
2322 func_decl_param_comptime: u32,
2323 /// The source location points to the type annotation on the given function parameter,
2324 /// where the base node is a function declaration. The value is the parameter index.
2325 func_decl_param_ty: u32,
2327 pub const FnProtoParam = struct {
2328 /// The offset of the function prototype AST node.
2329 fn_proto_node_offset: i32,
2330 /// The index of the parameter the source location points to.
2334 pub const SwitchItem = struct {
2335 /// The offset of the switch AST node.
2336 switch_node_offset: i32,
2337 /// The index of the case to point to within this switch.
2338 case_idx: SwitchCaseIndex,
2339 /// The index of the item to point to within this case.
2340 item_idx: SwitchItemIndex,
2343 pub const SwitchCapture = struct {
2344 /// The offset of the switch AST node.
2345 switch_node_offset: i32,
2346 /// The index of the case whose capture to point to.
2347 case_idx: SwitchCaseIndex,
2350 pub const SwitchCaseIndex = packed struct(u32) {
2351 kind: enum(u1) { scalar, multi },
2354 pub const special: SwitchCaseIndex = @bitCast(@as(u32, std.math.maxInt(u32)));
2355 pub fn isSpecial(idx: SwitchCaseIndex) bool {
2356 return @as(u32, @bitCast(idx)) == @as(u32, @bitCast(special));
2360 pub const SwitchItemIndex = packed struct(u32) {
2361 kind: enum(u1) { single, range },
2365 pub const ArrayCat = struct {
2366 /// Points to the array concat AST node.
2367 array_cat_offset: i32,
2368 /// The index of the element the source location points to.
2372 pub const TupleField = struct {
2373 /// Points to the AST node of the tuple type decaration.
2374 tuple_decl_node_offset: i32,
2375 /// The index of the tuple field the source location points to.
2379 pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease;
2381 noinline fn nodeOffsetDebug(node_offset: i32) Offset {
2382 var result: LazySrcLoc = .{ .node_offset = .{ .x = node_offset } };
2383 result.node_offset.trace.addAddr(@returnAddress(), "init");
2387 fn nodeOffsetRelease(node_offset: i32) Offset {
2388 return .{ .node_offset = .{ .x = node_offset } };
2391 /// This wraps a simple integer in debug builds so that later on we can find out
2392 /// where in semantic analysis the value got set.
2393 pub const TracedOffset = struct {
2395 trace: std.debug.Trace = std.debug.Trace.init,
2397 const want_tracing = false;
2401 pub const unneeded: LazySrcLoc = .{
2402 .base_node_inst = undefined,
2403 .offset = .unneeded,
2406 /// Returns `null` if the ZIR instruction has been lost across incremental updates.
2407 pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) ?struct { *File, Ast.Node.Index } {
2408 comptime assert(Zir.inst_tracking_version == 0);
2410 const ip = &zcu.intern_pool;
2411 const file_index, const zir_inst = inst: {
2412 const info = base_node_inst.resolveFull(ip) orelse return null;
2413 break :inst .{ info.file, info.inst };
2415 const file = zcu.fileByIndex(file_index);
2417 // If we're relative to .main_struct_inst, we know the ast node is the root and don't need to resolve the ZIR,
2418 // which may not exist e.g. in the case of errors in ZON files.
2419 if (zir_inst == .main_struct_inst) return .{ file, 0 };
2421 // Otherwise, make sure ZIR is loaded.
2422 const zir = file.zir.?;
2424 const inst = zir.instructions.get(@intFromEnum(zir_inst));
2425 const base_node: Ast.Node.Index = switch (inst.tag) {
2426 .declaration => inst.data.declaration.src_node,
2427 .struct_init, .struct_init_ref => zir.extraData(Zir.Inst.StructInit, inst.data.pl_node.payload_index).data.abs_node,
2428 .struct_init_anon => zir.extraData(Zir.Inst.StructInitAnon, inst.data.pl_node.payload_index).data.abs_node,
2429 .extended => switch (inst.data.extended.opcode) {
2430 .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_node,
2431 .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_node,
2432 .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_node,
2433 .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_node,
2434 .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.node,
2435 else => unreachable,
2437 else => unreachable,
2439 return .{ file, base_node };
2442 /// Resolve the file and AST node of `base_node_inst` to get a resolved `SrcLoc`.
2443 /// The resulting `SrcLoc` should only be used ephemerally, as it is not correct across incremental updates.
2444 pub fn upgrade(lazy: LazySrcLoc, zcu: *Zcu) SrcLoc {
2445 return lazy.upgradeOrLost(zcu).?;
2448 /// Like `upgrade`, but returns `null` if the source location has been lost across incremental updates.
2449 pub fn upgradeOrLost(lazy: LazySrcLoc, zcu: *Zcu) ?SrcLoc {
2450 const file, const base_node: Ast.Node.Index = if (lazy.offset == .entire_file) .{
2451 zcu.fileByIndex(lazy.base_node_inst.resolveFile(&zcu.intern_pool)),
2453 } else resolveBaseNode(lazy.base_node_inst, zcu) orelse return null;
2456 .base_node = base_node,
2457 .lazy = lazy.offset,
2462 pub const SemaError = error{ OutOfMemory, AnalysisFail };
2463 pub const CompileError = error{
2465 /// When this is returned, the compile error for the failure has already been recorded.
2467 /// In a comptime scope, a return instruction was encountered. This error is only seen when
2468 /// doing a comptime function call.
2470 /// In a comptime scope, a break instruction was encountered. This error is only seen when
2471 /// evaluating a comptime block.
2475 pub fn init(zcu: *Zcu, thread_count: usize) !void {
2476 const gpa = zcu.gpa;
2477 try zcu.intern_pool.init(gpa, thread_count);
2480 pub fn deinit(zcu: *Zcu) void {
2481 const gpa = zcu.gpa;
2483 const pt: Zcu.PerThread = .activate(zcu, .main);
2484 defer pt.deactivate();
2486 if (zcu.llvm_object) |llvm_object| llvm_object.deinit();
2488 for (zcu.import_table.keys()) |key| {
2491 for (zcu.import_table.values()) |file_index| {
2492 pt.destroyFile(file_index);
2494 zcu.import_table.deinit(gpa);
2496 for (zcu.embed_table.keys(), zcu.embed_table.values()) |path, embed_file| {
2498 gpa.destroy(embed_file);
2500 zcu.embed_table.deinit(gpa);
2502 zcu.compile_log_text.deinit(gpa);
2504 zcu.local_zir_cache.handle.close();
2505 zcu.global_zir_cache.handle.close();
2507 for (zcu.failed_analysis.values()) |value| value.destroy(gpa);
2508 for (zcu.failed_codegen.values()) |value| value.destroy(gpa);
2509 for (zcu.failed_types.values()) |value| value.destroy(gpa);
2510 zcu.analysis_in_progress.deinit(gpa);
2511 zcu.failed_analysis.deinit(gpa);
2512 zcu.transitive_failed_analysis.deinit(gpa);
2513 zcu.failed_codegen.deinit(gpa);
2514 zcu.failed_types.deinit(gpa);
2516 for (zcu.failed_files.values()) |value| {
2517 if (value) |msg| msg.destroy(gpa);
2519 zcu.failed_files.deinit(gpa);
2521 for (zcu.failed_exports.values()) |value| {
2524 zcu.failed_exports.deinit(gpa);
2526 for (zcu.cimport_errors.values()) |*errs| {
2529 zcu.cimport_errors.deinit(gpa);
2531 zcu.compile_log_sources.deinit(gpa);
2533 zcu.all_exports.deinit(gpa);
2534 zcu.free_exports.deinit(gpa);
2535 zcu.single_exports.deinit(gpa);
2536 zcu.multi_exports.deinit(gpa);
2538 zcu.potentially_outdated.deinit(gpa);
2539 zcu.outdated.deinit(gpa);
2540 zcu.outdated_ready.deinit(gpa);
2541 zcu.retryable_failures.deinit(gpa);
2543 zcu.func_body_analysis_queued.deinit(gpa);
2544 zcu.nav_val_analysis_queued.deinit(gpa);
2546 zcu.test_functions.deinit(gpa);
2548 for (zcu.global_assembly.values()) |s| {
2551 zcu.global_assembly.deinit(gpa);
2553 zcu.reference_table.deinit(gpa);
2554 zcu.all_references.deinit(gpa);
2555 zcu.free_references.deinit(gpa);
2557 zcu.type_reference_table.deinit(gpa);
2558 zcu.all_type_references.deinit(gpa);
2559 zcu.free_type_references.deinit(gpa);
2561 if (zcu.resolved_references) |*r| r.deinit(gpa);
2563 zcu.intern_pool.deinit(gpa);
2566 pub fn namespacePtr(zcu: *Zcu, index: Namespace.Index) *Namespace {
2567 return zcu.intern_pool.namespacePtr(index);
2570 pub fn namespacePtrUnwrap(zcu: *Zcu, index: Namespace.OptionalIndex) ?*Namespace {
2571 return zcu.namespacePtr(index.unwrap() orelse return null);
2574 // TODO https://github.com/ziglang/zig/issues/8643
2575 pub const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8;
2576 pub const HackDataLayout = extern struct {
2577 data: [8]u8 align(@alignOf(Zir.Inst.Data)),
2581 if (data_has_safety_tag) {
2582 assert(@sizeOf(HackDataLayout) == @sizeOf(Zir.Inst.Data));
2586 pub fn loadZirCache(gpa: Allocator, cache_file: std.fs.File) !Zir {
2587 return loadZirCacheBody(gpa, try cache_file.reader().readStruct(Zir.Header), cache_file);
2590 pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) !Zir {
2591 var instructions: std.MultiArrayList(Zir.Inst) = .{};
2592 errdefer instructions.deinit(gpa);
2594 try instructions.setCapacity(gpa, header.instructions_len);
2595 instructions.len = header.instructions_len;
2598 .instructions = instructions.toOwnedSlice(),
2599 .string_bytes = &.{},
2602 errdefer zir.deinit(gpa);
2604 zir.string_bytes = try gpa.alloc(u8, header.string_bytes_len);
2605 zir.extra = try gpa.alloc(u32, header.extra_len);
2607 const safety_buffer = if (data_has_safety_tag)
2608 try gpa.alloc([8]u8, header.instructions_len)
2611 defer if (data_has_safety_tag) gpa.free(safety_buffer);
2613 const data_ptr = if (data_has_safety_tag)
2614 @as([*]u8, @ptrCast(safety_buffer.ptr))
2616 @as([*]u8, @ptrCast(zir.instructions.items(.data).ptr));
2618 var iovecs = [_]std.posix.iovec{
2620 .base = @as([*]u8, @ptrCast(zir.instructions.items(.tag).ptr)),
2621 .len = header.instructions_len,
2625 .len = header.instructions_len * 8,
2628 .base = zir.string_bytes.ptr,
2629 .len = header.string_bytes_len,
2632 .base = @as([*]u8, @ptrCast(zir.extra.ptr)),
2633 .len = header.extra_len * 4,
2636 const amt_read = try cache_file.readvAll(&iovecs);
2637 const amt_expected = zir.instructions.len * 9 +
2638 zir.string_bytes.len +
2640 if (amt_read != amt_expected) return error.UnexpectedFileSize;
2641 if (data_has_safety_tag) {
2642 const tags = zir.instructions.items(.tag);
2643 for (zir.instructions.items(.data), 0..) |*data, i| {
2644 const union_tag = Zir.Inst.Tag.data_tags[@intFromEnum(tags[i])];
2645 const as_struct = @as(*HackDataLayout, @ptrCast(data));
2647 .safety_tag = @intFromEnum(union_tag),
2648 .data = safety_buffer[i],
2656 pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.Stat, zir: Zir) (std.fs.File.WriteError || Allocator.Error)!void {
2657 const safety_buffer = if (data_has_safety_tag)
2658 try gpa.alloc([8]u8, zir.instructions.len)
2661 defer if (data_has_safety_tag) gpa.free(safety_buffer);
2663 const data_ptr: [*]const u8 = if (data_has_safety_tag)
2664 if (zir.instructions.len == 0)
2667 @ptrCast(safety_buffer.ptr)
2669 @ptrCast(zir.instructions.items(.data).ptr);
2671 if (data_has_safety_tag) {
2672 // The `Data` union has a safety tag but in the file format we store it without.
2673 for (zir.instructions.items(.data), 0..) |*data, i| {
2674 const as_struct: *const HackDataLayout = @ptrCast(data);
2675 safety_buffer[i] = as_struct.data;
2679 const header: Zir.Header = .{
2680 .instructions_len = @intCast(zir.instructions.len),
2681 .string_bytes_len = @intCast(zir.string_bytes.len),
2682 .extra_len = @intCast(zir.extra.len),
2684 .stat_size = stat.size,
2685 .stat_inode = stat.inode,
2686 .stat_mtime = stat.mtime,
2688 var iovecs: [5]std.posix.iovec_const = .{
2690 .base = @ptrCast(&header),
2691 .len = @sizeOf(Zir.Header),
2694 .base = @ptrCast(zir.instructions.items(.tag).ptr),
2695 .len = zir.instructions.len,
2699 .len = zir.instructions.len * 8,
2702 .base = zir.string_bytes.ptr,
2703 .len = zir.string_bytes.len,
2706 .base = @ptrCast(zir.extra.ptr),
2707 .len = zir.extra.len * 4,
2710 try cache_file.writevAll(&iovecs);
2713 pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir) std.fs.File.WriteError!void {
2714 const header: Zoir.Header = .{
2715 .nodes_len = @intCast(zoir.nodes.len),
2716 .extra_len = @intCast(zoir.extra.len),
2717 .limbs_len = @intCast(zoir.limbs.len),
2718 .string_bytes_len = @intCast(zoir.string_bytes.len),
2719 .compile_errors_len = @intCast(zoir.compile_errors.len),
2720 .error_notes_len = @intCast(zoir.error_notes.len),
2722 .stat_size = stat.size,
2723 .stat_inode = stat.inode,
2724 .stat_mtime = stat.mtime,
2726 var iovecs: [9]std.posix.iovec_const = .{
2728 .base = @ptrCast(&header),
2729 .len = @sizeOf(Zoir.Header),
2732 .base = @ptrCast(zoir.nodes.items(.tag)),
2733 .len = zoir.nodes.len * @sizeOf(Zoir.Node.Repr.Tag),
2736 .base = @ptrCast(zoir.nodes.items(.data)),
2737 .len = zoir.nodes.len * 4,
2740 .base = @ptrCast(zoir.nodes.items(.ast_node)),
2741 .len = zoir.nodes.len * 4,
2744 .base = @ptrCast(zoir.extra),
2745 .len = zoir.extra.len * 4,
2748 .base = @ptrCast(zoir.limbs),
2749 .len = zoir.limbs.len * 4,
2752 .base = zoir.string_bytes.ptr,
2753 .len = zoir.string_bytes.len,
2756 .base = @ptrCast(zoir.compile_errors),
2757 .len = zoir.compile_errors.len * @sizeOf(Zoir.CompileError),
2760 .base = @ptrCast(zoir.error_notes),
2761 .len = zoir.error_notes.len * @sizeOf(Zoir.CompileError.Note),
2764 try cache_file.writevAll(&iovecs);
2767 pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_file: std.fs.File) !Zoir {
2772 .string_bytes = &.{},
2773 .compile_errors = &.{},
2774 .error_notes = &.{},
2776 errdefer zoir.deinit(gpa);
2778 zoir.nodes = nodes: {
2779 var nodes: std.MultiArrayList(Zoir.Node.Repr) = .empty;
2780 defer nodes.deinit(gpa);
2781 try nodes.setCapacity(gpa, header.nodes_len);
2782 nodes.len = header.nodes_len;
2783 break :nodes nodes.toOwnedSlice();
2786 zoir.extra = try gpa.alloc(u32, header.extra_len);
2787 zoir.limbs = try gpa.alloc(std.math.big.Limb, header.limbs_len);
2788 zoir.string_bytes = try gpa.alloc(u8, header.string_bytes_len);
2790 zoir.compile_errors = try gpa.alloc(Zoir.CompileError, header.compile_errors_len);
2791 zoir.error_notes = try gpa.alloc(Zoir.CompileError.Note, header.error_notes_len);
2793 var iovecs: [8]std.posix.iovec = .{
2795 .base = @ptrCast(zoir.nodes.items(.tag)),
2796 .len = header.nodes_len * @sizeOf(Zoir.Node.Repr.Tag),
2799 .base = @ptrCast(zoir.nodes.items(.data)),
2800 .len = header.nodes_len * 4,
2803 .base = @ptrCast(zoir.nodes.items(.ast_node)),
2804 .len = header.nodes_len * 4,
2807 .base = @ptrCast(zoir.extra),
2808 .len = header.extra_len * 4,
2811 .base = @ptrCast(zoir.limbs),
2812 .len = header.limbs_len * @sizeOf(std.math.big.Limb),
2815 .base = zoir.string_bytes.ptr,
2816 .len = header.string_bytes_len,
2819 .base = @ptrCast(zoir.compile_errors),
2820 .len = header.compile_errors_len * @sizeOf(Zoir.CompileError),
2823 .base = @ptrCast(zoir.error_notes),
2824 .len = header.error_notes_len * @sizeOf(Zoir.CompileError.Note),
2828 const bytes_expected = expected: {
2830 for (iovecs) |v| n += v.len;
2834 const bytes_read = try cache_file.readvAll(&iovecs);
2835 if (bytes_read != bytes_expected) return error.UnexpectedFileSize;
2839 pub fn markDependeeOutdated(
2841 /// When we are diffing ZIR and marking things as outdated, we won't yet have marked the dependencies as PO.
2842 /// However, when we discover during analysis that something was outdated, the `Dependee` was already
2843 /// marked as PO, so we need to decrement the PO dep count for each depender.
2844 marked_po: enum { not_marked_po, marked_po },
2845 dependee: InternPool.Dependee,
2847 log.debug("outdated dependee: {}", .{zcu.fmtDependee(dependee)});
2848 var it = zcu.intern_pool.dependencyIterator(dependee);
2849 while (it.next()) |depender| {
2850 if (zcu.outdated.getPtr(depender)) |po_dep_count| {
2851 switch (marked_po) {
2852 .not_marked_po => {},
2854 po_dep_count.* -= 1;
2855 log.debug("outdated {} => already outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
2856 if (po_dep_count.* == 0) {
2857 log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
2858 try zcu.outdated_ready.put(zcu.gpa, depender, {});
2864 const opt_po_entry = zcu.potentially_outdated.fetchSwapRemove(depender);
2865 const new_po_dep_count = switch (marked_po) {
2866 .not_marked_po => if (opt_po_entry) |e| e.value else 0,
2867 .marked_po => if (opt_po_entry) |e| e.value - 1 else {
2868 // This `AnalUnit` has already been re-analyzed this update, and registered a dependency
2869 // on this thing, but already has sufficiently up-to-date information. Nothing to do.
2873 try zcu.outdated.putNoClobber(
2878 log.debug("outdated {} => new outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
2879 if (new_po_dep_count == 0) {
2880 log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
2881 try zcu.outdated_ready.put(zcu.gpa, depender, {});
2883 // If this is a Decl and was not previously PO, we must recursively
2884 // mark dependencies on its tyval as PO.
2885 if (opt_po_entry == null) {
2886 assert(marked_po == .not_marked_po);
2887 try zcu.markTransitiveDependersPotentiallyOutdated(depender);
2892 pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
2893 log.debug("up-to-date dependee: {}", .{zcu.fmtDependee(dependee)});
2894 var it = zcu.intern_pool.dependencyIterator(dependee);
2895 while (it.next()) |depender| {
2896 if (zcu.outdated.getPtr(depender)) |po_dep_count| {
2897 // This depender is already outdated, but it now has one
2898 // less PO dependency!
2899 po_dep_count.* -= 1;
2900 log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
2901 if (po_dep_count.* == 0) {
2902 log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
2903 try zcu.outdated_ready.put(zcu.gpa, depender, {});
2907 // This depender is definitely at least PO, because this Decl was just analyzed
2908 // due to being outdated.
2909 const ptr = zcu.potentially_outdated.getPtr(depender) orelse {
2910 // This dependency has been registered during in-progress analysis, but the unit is
2911 // not in `potentially_outdated` because analysis is in-progress. Nothing to do.
2916 log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
2920 log.debug("up-to-date {} => {} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
2922 // This dependency is no longer PO, i.e. is known to be up-to-date.
2923 assert(zcu.potentially_outdated.swapRemove(depender));
2924 // If this is a Decl, we must recursively mark dependencies on its tyval
2926 switch (depender.unwrap()) {
2928 .nav_val => |nav| try zcu.markPoDependeeUpToDate(.{ .nav_val = nav }),
2929 .nav_ty => |nav| try zcu.markPoDependeeUpToDate(.{ .nav_ty = nav }),
2930 .type => |ty| try zcu.markPoDependeeUpToDate(.{ .interned = ty }),
2931 .func => |func| try zcu.markPoDependeeUpToDate(.{ .interned = func }),
2932 .memoized_state => |stage| try zcu.markPoDependeeUpToDate(.{ .memoized_state = stage }),
2937 /// Given a AnalUnit which is newly outdated or PO, mark all AnalUnits which may
2938 /// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES.
2939 fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUnit) !void {
2940 const ip = &zcu.intern_pool;
2941 const dependee: InternPool.Dependee = switch (maybe_outdated.unwrap()) {
2942 .@"comptime" => return, // analysis of a comptime decl can't outdate any dependencies
2943 .nav_val => |nav| .{ .nav_val = nav },
2944 .nav_ty => |nav| .{ .nav_ty = nav },
2945 .type => |ty| .{ .interned = ty },
2946 .func => |func_index| .{ .interned = func_index }, // IES
2947 .memoized_state => |stage| .{ .memoized_state = stage },
2949 log.debug("potentially outdated dependee: {}", .{zcu.fmtDependee(dependee)});
2950 var it = ip.dependencyIterator(dependee);
2951 while (it.next()) |po| {
2952 if (zcu.outdated.getPtr(po)) |po_dep_count| {
2953 // This dependency is already outdated, but it now has one more PO
2955 if (po_dep_count.* == 0) {
2956 _ = zcu.outdated_ready.swapRemove(po);
2958 po_dep_count.* += 1;
2959 log.debug("po {} => {} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
2962 if (zcu.potentially_outdated.getPtr(po)) |n| {
2963 // There is now one more PO dependency.
2965 log.debug("po {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
2968 try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1);
2969 log.debug("po {} => {} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
2970 // This AnalUnit was not already PO, so we must recursively mark its dependers as also PO.
2971 try zcu.markTransitiveDependersPotentiallyOutdated(po);
2975 pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
2976 if (!zcu.comp.incremental) return null;
2978 if (zcu.outdated.count() == 0) {
2979 // Any units in `potentially_outdated` must just be stuck in loops with one another: none of those
2980 // units have had any outdated dependencies so far, and all of their remaining PO deps are triggered
2981 // by other units in `potentially_outdated`. So, we can safety assume those units up-to-date.
2982 zcu.potentially_outdated.clearRetainingCapacity();
2983 log.debug("findOutdatedToAnalyze: no outdated depender", .{});
2987 // Our goal is to find an outdated AnalUnit which itself has no outdated or
2988 // PO dependencies. Most of the time, such an AnalUnit will exist - we track
2989 // them in the `outdated_ready` set for efficiency. However, this is not
2990 // necessarily the case, since the Decl dependency graph may contain loops
2991 // via mutually recursive definitions:
2992 // pub const A = struct { b: *B };
2993 // pub const B = struct { b: *A };
2994 // In this case, we must defer to more complex logic below.
2996 if (zcu.outdated_ready.count() > 0) {
2997 const unit = zcu.outdated_ready.keys()[0];
2998 log.debug("findOutdatedToAnalyze: trivial {}", .{zcu.fmtAnalUnit(unit)});
3002 // There is no single AnalUnit which is ready for re-analysis. Instead, we must assume that some
3003 // AnalUnit with PO dependencies is outdated -- e.g. in the above example we arbitrarily pick one of
3004 // A or B. We should definitely not select a function, since a function can't be responsible for the
3005 // loop (IES dependencies can't have loops). We should also, of course, not select a `comptime`
3006 // declaration, since you can't depend on those!
3008 // The choice of this unit could have a big impact on how much total analysis we perform, since
3009 // if analysis concludes any dependencies on its result are up-to-date, then other PO AnalUnit
3010 // may be resolved as up-to-date. To hopefully avoid doing too much work, let's find a unit
3011 // which the most things depend on - the idea is that this will resolve a lot of loops (but this
3012 // is only a heuristic).
3014 log.debug("findOutdatedToAnalyze: no trivial ready, using heuristic; {d} outdated, {d} PO", .{
3015 zcu.outdated.count(),
3016 zcu.potentially_outdated.count(),
3019 const ip = &zcu.intern_pool;
3021 var chosen_unit: ?AnalUnit = null;
3022 var chosen_unit_dependers: u32 = undefined;
3024 inline for (.{ zcu.outdated.keys(), zcu.potentially_outdated.keys() }) |outdated_units| {
3025 for (outdated_units) |unit| {
3027 var it = ip.dependencyIterator(switch (unit.unwrap()) {
3028 .func => continue, // a `func` definitely can't be causing the loop so it is a bad choice
3029 .@"comptime" => continue, // a `comptime` block can't even be depended on so it is a terrible choice
3030 .type => |ty| .{ .interned = ty },
3031 .nav_val => |nav| .{ .nav_val = nav },
3032 .nav_ty => |nav| .{ .nav_ty = nav },
3033 .memoized_state => {
3034 // If we've hit a loop and some `.memoized_state` is outdated, we should make that choice eagerly.
3035 // In general, it's good to resolve this early on, since -- for instance -- almost every function
3036 // references the panic handler.
3040 while (it.next()) |_| n += 1;
3042 if (chosen_unit == null or n > chosen_unit_dependers) {
3044 chosen_unit_dependers = n;
3049 log.debug("findOutdatedToAnalyze: heuristic returned '{}' ({d} dependers)", .{
3050 zcu.fmtAnalUnit(chosen_unit.?),
3051 chosen_unit_dependers,
3054 return chosen_unit.?;
3057 /// During an incremental update, before semantic analysis, call this to flush all values from
3058 /// `retryable_failures` and mark them as outdated so they get re-analyzed.
3059 pub fn flushRetryableFailures(zcu: *Zcu) !void {
3060 const gpa = zcu.gpa;
3061 for (zcu.retryable_failures.items) |depender| {
3062 if (zcu.outdated.contains(depender)) continue;
3063 if (zcu.potentially_outdated.fetchSwapRemove(depender)) |kv| {
3064 // This AnalUnit was already PO, but we now consider it outdated.
3065 // Any transitive dependencies are already marked PO.
3066 try zcu.outdated.put(gpa, depender, kv.value);
3069 // This AnalUnit was not marked PO, but is now outdated. Mark it as
3070 // such, then recursively mark transitive dependencies as PO.
3071 try zcu.outdated.put(gpa, depender, 0);
3072 try zcu.markTransitiveDependersPotentiallyOutdated(depender);
3074 zcu.retryable_failures.clearRetainingCapacity();
3077 pub fn mapOldZirToNew(
3081 inst_map: *std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index),
3082 ) Allocator.Error!void {
3083 // Contain ZIR indexes of namespace declaration instructions, e.g. struct_decl, union_decl, etc.
3084 // Not `declaration`, as this does not create a namespace.
3085 const MatchedZirDecl = struct {
3086 old_inst: Zir.Inst.Index,
3087 new_inst: Zir.Inst.Index,
3089 var match_stack: std.ArrayListUnmanaged(MatchedZirDecl) = .empty;
3090 defer match_stack.deinit(gpa);
3092 // Used as temporary buffers for namespace declaration instructions
3093 var old_contents: Zir.DeclContents = .init;
3094 defer old_contents.deinit(gpa);
3095 var new_contents: Zir.DeclContents = .init;
3096 defer new_contents.deinit(gpa);
3098 // Map the main struct inst (and anything in its fields)
3100 try old_zir.findTrackableRoot(gpa, &old_contents);
3101 try new_zir.findTrackableRoot(gpa, &new_contents);
3103 assert(old_contents.explicit_types.items[0] == .main_struct_inst);
3104 assert(new_contents.explicit_types.items[0] == .main_struct_inst);
3106 assert(old_contents.func_decl == null);
3107 assert(new_contents.func_decl == null);
3109 // We don't have any smart way of matching up these instructions, so we correlate them based on source order
3110 // in their respective arrays.
3112 const num_explicit_types = @min(old_contents.explicit_types.items.len, new_contents.explicit_types.items.len);
3113 try match_stack.ensureUnusedCapacity(gpa, @intCast(num_explicit_types));
3115 old_contents.explicit_types.items[0..num_explicit_types],
3116 new_contents.explicit_types.items[0..num_explicit_types],
3117 ) |old_inst, new_inst| {
3118 // Here we use `match_stack`, so that we will recursively consider declarations on these types.
3119 match_stack.appendAssumeCapacity(.{ .old_inst = old_inst, .new_inst = new_inst });
3122 const num_other = @min(old_contents.other.items.len, new_contents.other.items.len);
3123 try inst_map.ensureUnusedCapacity(gpa, @intCast(num_other));
3125 old_contents.other.items[0..num_other],
3126 new_contents.other.items[0..num_other],
3127 ) |old_inst, new_inst| {
3128 // These instructions don't have declarations, so we just modify `inst_map` directly.
3129 inst_map.putAssumeCapacity(old_inst, new_inst);
3133 while (match_stack.popOrNull()) |match_item| {
3134 // First, a check: if the number of captures of this type has changed, we can't map it, because
3135 // we wouldn't know how to correlate type information with the last update.
3136 // Synchronizes with logic in `Zcu.PerThread.recreateStructType` etc.
3137 if (old_zir.typeCapturesLen(match_item.old_inst) != new_zir.typeCapturesLen(match_item.new_inst)) {
3138 // Don't map this type or anything within it.
3142 // Match the namespace declaration itself
3143 try inst_map.put(gpa, match_item.old_inst, match_item.new_inst);
3145 // Maps decl name to `declaration` instruction.
3146 var named_decls: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty;
3147 defer named_decls.deinit(gpa);
3148 // Maps test name to `declaration` instruction.
3149 var named_tests: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty;
3150 defer named_tests.deinit(gpa);
3151 // Maps test name to `declaration` instruction.
3152 var named_decltests: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty;
3153 defer named_decltests.deinit(gpa);
3154 // All unnamed tests, in order, for a best-effort match.
3155 var unnamed_tests: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty;
3156 defer unnamed_tests.deinit(gpa);
3157 // All comptime declarations, in order, for a best-effort match.
3158 var comptime_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty;
3159 defer comptime_decls.deinit(gpa);
3160 // All usingnamespace declarations, in order, for a best-effort match.
3161 var usingnamespace_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty;
3162 defer usingnamespace_decls.deinit(gpa);
3165 var old_decl_it = old_zir.declIterator(match_item.old_inst);
3166 while (old_decl_it.next()) |old_decl_inst| {
3167 const old_decl = old_zir.getDeclaration(old_decl_inst);
3168 switch (old_decl.kind) {
3169 .@"comptime" => try comptime_decls.append(gpa, old_decl_inst),
3170 .@"usingnamespace" => try usingnamespace_decls.append(gpa, old_decl_inst),
3171 .unnamed_test => try unnamed_tests.append(gpa, old_decl_inst),
3172 .@"test" => try named_tests.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
3173 .decltest => try named_decltests.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
3174 .@"const", .@"var" => try named_decls.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
3179 var unnamed_test_idx: u32 = 0;
3180 var comptime_decl_idx: u32 = 0;
3181 var usingnamespace_decl_idx: u32 = 0;
3183 var new_decl_it = new_zir.declIterator(match_item.new_inst);
3184 while (new_decl_it.next()) |new_decl_inst| {
3185 const new_decl = new_zir.getDeclaration(new_decl_inst);
3186 // Attempt to match this to a declaration in the old ZIR:
3187 // * For named declarations (`const`/`var`/`fn`), we match based on name.
3188 // * For named tests (`test "foo"`) and decltests (`test foo`), we also match based on name.
3189 // * For unnamed tests, we match based on order.
3190 // * For comptime blocks, we match based on order.
3191 // * For usingnamespace decls, we match based on order.
3192 // If we cannot match this declaration, we can't match anything nested inside of it either, so we just `continue`.
3193 const old_decl_inst = switch (new_decl.kind) {
3194 .@"comptime" => inst: {
3195 if (comptime_decl_idx == comptime_decls.items.len) continue;
3196 defer comptime_decl_idx += 1;
3197 break :inst comptime_decls.items[comptime_decl_idx];
3199 .@"usingnamespace" => inst: {
3200 if (usingnamespace_decl_idx == usingnamespace_decls.items.len) continue;
3201 defer usingnamespace_decl_idx += 1;
3202 break :inst usingnamespace_decls.items[usingnamespace_decl_idx];
3204 .unnamed_test => inst: {
3205 if (unnamed_test_idx == unnamed_tests.items.len) continue;
3206 defer unnamed_test_idx += 1;
3207 break :inst unnamed_tests.items[unnamed_test_idx];
3210 const name = new_zir.nullTerminatedString(new_decl.name);
3211 break :inst named_tests.get(name) orelse continue;
3213 .decltest => inst: {
3214 const name = new_zir.nullTerminatedString(new_decl.name);
3215 break :inst named_decltests.get(name) orelse continue;
3217 .@"const", .@"var" => inst: {
3218 const name = new_zir.nullTerminatedString(new_decl.name);
3219 break :inst named_decls.get(name) orelse continue;
3223 // Match the `declaration` instruction
3224 try inst_map.put(gpa, old_decl_inst, new_decl_inst);
3226 // Find trackable instructions within this declaration
3227 try old_zir.findTrackable(gpa, &old_contents, old_decl_inst);
3228 try new_zir.findTrackable(gpa, &new_contents, new_decl_inst);
3230 // We don't have any smart way of matching up these instructions, so we correlate them based on source order
3231 // in their respective arrays.
3233 const num_explicit_types = @min(old_contents.explicit_types.items.len, new_contents.explicit_types.items.len);
3234 try match_stack.ensureUnusedCapacity(gpa, @intCast(num_explicit_types));
3236 old_contents.explicit_types.items[0..num_explicit_types],
3237 new_contents.explicit_types.items[0..num_explicit_types],
3238 ) |old_inst, new_inst| {
3239 // Here we use `match_stack`, so that we will recursively consider declarations on these types.
3240 match_stack.appendAssumeCapacity(.{ .old_inst = old_inst, .new_inst = new_inst });
3243 const num_other = @min(old_contents.other.items.len, new_contents.other.items.len);
3244 try inst_map.ensureUnusedCapacity(gpa, @intCast(num_other));
3246 old_contents.other.items[0..num_other],
3247 new_contents.other.items[0..num_other],
3248 ) |old_inst, new_inst| {
3249 // These instructions don't have declarations, so we just modify `inst_map` directly.
3250 inst_map.putAssumeCapacity(old_inst, new_inst);
3253 if (old_contents.func_decl) |old_func_inst| {
3254 if (new_contents.func_decl) |new_func_inst| {
3255 // There are no declarations on a function either, so again, we just directly add it to `inst_map`.
3256 try inst_map.put(gpa, old_func_inst, new_func_inst);
3263 /// Ensure this function's body is or will be analyzed and emitted. This should
3264 /// be called whenever a potential runtime call of a function is seen.
3266 /// The caller is responsible for ensuring the function decl itself is already
3267 /// analyzed, and for ensuring it can exist at runtime (see
3268 /// `Type.fnHasRuntimeBitsSema`). This function does *not* guarantee that the body
3269 /// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`.
3270 pub fn ensureFuncBodyAnalysisQueued(zcu: *Zcu, func_index: InternPool.Index) !void {
3271 const ip = &zcu.intern_pool;
3272 const func = zcu.funcInfo(func_index);
3274 if (zcu.func_body_analysis_queued.contains(func_index)) return;
3276 if (func.analysisUnordered(ip).is_analyzed) {
3277 if (!zcu.outdated.contains(.wrap(.{ .func = func_index })) and
3278 !zcu.potentially_outdated.contains(.wrap(.{ .func = func_index })))
3280 // This function has been analyzed before and is definitely up-to-date.
3285 try zcu.func_body_analysis_queued.ensureUnusedCapacity(zcu.gpa, 1);
3286 try zcu.comp.queueJob(.{ .analyze_func = func_index });
3287 zcu.func_body_analysis_queued.putAssumeCapacityNoClobber(func_index, {});
3290 pub fn ensureNavValAnalysisQueued(zcu: *Zcu, nav_id: InternPool.Nav.Index) !void {
3291 const ip = &zcu.intern_pool;
3293 if (zcu.nav_val_analysis_queued.contains(nav_id)) return;
3295 if (ip.getNav(nav_id).status == .fully_resolved) {
3296 if (!zcu.outdated.contains(.wrap(.{ .nav_val = nav_id })) and
3297 !zcu.potentially_outdated.contains(.wrap(.{ .nav_val = nav_id })))
3299 // This `Nav` has been analyzed before and is definitely up-to-date.
3304 try zcu.nav_val_analysis_queued.ensureUnusedCapacity(zcu.gpa, 1);
3305 try zcu.comp.queueJob(.{ .analyze_comptime_unit = .wrap(.{ .nav_val = nav_id }) });
3306 zcu.nav_val_analysis_queued.putAssumeCapacityNoClobber(nav_id, {});
3309 pub const ImportFileResult = struct {
3311 file_index: File.Index,
3316 pub fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) Cache.BinDigest {
3317 const want_local_cache = mod == zcu.main_mod;
3318 var path_hash: Cache.HashHelper = .{};
3319 path_hash.addBytes(build_options.version);
3320 path_hash.add(builtin.zig_backend);
3321 if (!want_local_cache) {
3322 path_hash.addOptionalBytes(mod.root.root_dir.path);
3323 path_hash.addBytes(mod.root.sub_path);
3325 path_hash.addBytes(sub_file_path);
3326 var bin: Cache.BinDigest = undefined;
3327 path_hash.hasher.final(&bin);
3331 /// Delete all the Export objects that are caused by this `AnalUnit`. Re-analysis of
3332 /// this `AnalUnit` will cause them to be re-created (or not).
3333 pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void {
3334 const gpa = zcu.gpa;
3336 const exports_base, const exports_len = if (zcu.single_exports.fetchSwapRemove(anal_unit)) |kv|
3337 .{ @intFromEnum(kv.value), 1 }
3338 else if (zcu.multi_exports.fetchSwapRemove(anal_unit)) |info|
3339 .{ info.value.index, info.value.len }
3343 const exports = zcu.all_exports.items[exports_base..][0..exports_len];
3345 // In an only-c build, we're guaranteed to never use incremental compilation, so there are
3346 // guaranteed not to be any exports in the output file that need deleting (since we only call
3347 // `updateExports` on flush).
3348 // This case is needed because in some rare edge cases, `Sema` wants to add and delete exports
3349 // within a single update.
3350 if (dev.env.supports(.incremental)) {
3351 for (exports, exports_base..) |exp, export_index_usize| {
3352 const export_idx: Export.Index = @enumFromInt(export_index_usize);
3353 if (zcu.comp.bin_file) |lf| {
3354 lf.deleteExport(exp.exported, exp.opts.name);
3356 if (zcu.failed_exports.fetchSwapRemove(export_idx)) |failed_kv| {
3357 failed_kv.value.destroy(gpa);
3362 zcu.free_exports.ensureUnusedCapacity(gpa, exports_len) catch {
3363 // This space will be reused eventually, so we need not propagate this error.
3364 // Just leak it for now, and let GC reclaim it later on.
3367 for (exports_base..exports_base + exports_len) |export_idx| {
3368 zcu.free_exports.appendAssumeCapacity(@enumFromInt(export_idx));
3372 /// Delete all references in `reference_table` which are caused by this `AnalUnit`.
3373 /// Re-analysis of the `AnalUnit` will cause appropriate references to be recreated.
3374 pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void {
3375 const gpa = zcu.gpa;
3377 zcu.clearCachedResolvedReferences();
3380 const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse break :unit_refs;
3383 while (idx != std.math.maxInt(u32)) {
3384 zcu.free_references.append(gpa, idx) catch {
3385 // This space will be reused eventually, so we need not propagate this error.
3386 // Just leak it for now, and let GC reclaim it later on.
3389 idx = zcu.all_references.items[idx].next;
3394 const kv = zcu.type_reference_table.fetchSwapRemove(anal_unit) orelse break :type_refs;
3397 while (idx != std.math.maxInt(u32)) {
3398 zcu.free_type_references.append(gpa, idx) catch {
3399 // This space will be reused eventually, so we need not propagate this error.
3400 // Just leak it for now, and let GC reclaim it later on.
3403 idx = zcu.all_type_references.items[idx].next;
3408 pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit, ref_src: LazySrcLoc) Allocator.Error!void {
3409 const gpa = zcu.gpa;
3411 zcu.clearCachedResolvedReferences();
3413 try zcu.reference_table.ensureUnusedCapacity(gpa, 1);
3415 const ref_idx = zcu.free_references.popOrNull() orelse idx: {
3416 _ = try zcu.all_references.addOne(gpa);
3417 break :idx zcu.all_references.items.len - 1;
3420 errdefer comptime unreachable;
3422 const gop = zcu.reference_table.getOrPutAssumeCapacity(src_unit);
3424 zcu.all_references.items[ref_idx] = .{
3425 .referenced = referenced_unit,
3426 .next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32),
3430 gop.value_ptr.* = @intCast(ref_idx);
3433 pub fn addTypeReference(zcu: *Zcu, src_unit: AnalUnit, referenced_type: InternPool.Index, ref_src: LazySrcLoc) Allocator.Error!void {
3434 const gpa = zcu.gpa;
3436 zcu.clearCachedResolvedReferences();
3438 try zcu.type_reference_table.ensureUnusedCapacity(gpa, 1);
3440 const ref_idx = zcu.free_type_references.popOrNull() orelse idx: {
3441 _ = try zcu.all_type_references.addOne(gpa);
3442 break :idx zcu.all_type_references.items.len - 1;
3445 errdefer comptime unreachable;
3447 const gop = zcu.type_reference_table.getOrPutAssumeCapacity(src_unit);
3449 zcu.all_type_references.items[ref_idx] = .{
3450 .referenced = referenced_type,
3451 .next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32),
3455 gop.value_ptr.* = @intCast(ref_idx);
3458 fn clearCachedResolvedReferences(zcu: *Zcu) void {
3459 if (zcu.resolved_references) |*r| r.deinit(zcu.gpa);
3460 zcu.resolved_references = null;
3463 pub fn errorSetBits(zcu: *const Zcu) u16 {
3464 if (zcu.error_limit == 0) return 0;
3465 return @as(u16, std.math.log2_int(ErrorInt, zcu.error_limit)) + 1;
3470 src_loc: LazySrcLoc,
3472 comptime format: []const u8,
3474 ) error{OutOfMemory}!void {
3475 const msg = try std.fmt.allocPrint(zcu.gpa, format, args);
3476 errdefer zcu.gpa.free(msg);
3478 parent.notes = try zcu.gpa.realloc(parent.notes, parent.notes.len + 1);
3479 parent.notes[parent.notes.len - 1] = .{
3485 /// Deprecated. There is no global target for a Zig Compilation Unit. Instead,
3486 /// look up the target based on the Module that contains the source code being
3488 pub fn getTarget(zcu: *const Zcu) Target {
3489 return zcu.root_mod.resolved_target.result;
3492 /// Deprecated. There is no global optimization mode for a Zig Compilation
3493 /// Unit. Instead, look up the optimization mode based on the Module that
3494 /// contains the source code being analyzed.
3495 pub fn optimizeMode(zcu: *const Zcu) std.builtin.OptimizeMode {
3496 return zcu.root_mod.optimize_mode;
3499 pub fn handleUpdateExports(
3501 export_indices: []const Export.Index,
3502 result: link.File.UpdateExportsError!void,
3503 ) Allocator.Error!void {
3504 const gpa = zcu.gpa;
3505 result catch |err| switch (err) {
3506 error.OutOfMemory => return error.OutOfMemory,
3507 error.AnalysisFail => {
3508 const export_idx = export_indices[0];
3509 const new_export = export_idx.ptr(zcu);
3510 new_export.status = .failed_retryable;
3511 try zcu.failed_exports.ensureUnusedCapacity(gpa, 1);
3512 const msg = try ErrorMsg.create(gpa, new_export.src, "unable to export: {s}", .{@errorName(err)});
3513 zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg);
3518 pub fn addGlobalAssembly(zcu: *Zcu, unit: AnalUnit, source: []const u8) !void {
3519 const gpa = zcu.gpa;
3520 const gop = try zcu.global_assembly.getOrPut(gpa, unit);
3521 if (gop.found_existing) {
3522 const new_value = try std.fmt.allocPrint(gpa, "{s}\n{s}", .{ gop.value_ptr.*, source });
3523 gpa.free(gop.value_ptr.*);
3524 gop.value_ptr.* = new_value;
3526 gop.value_ptr.* = try gpa.dupe(u8, source);
3530 pub const Feature = enum {
3531 /// When this feature is enabled, Sema will emit calls to
3532 /// `std.builtin.panic` functions for things like safety checks and
3533 /// unreachables. Otherwise traps will be emitted.
3535 /// When this feature is enabled, Sema will insert tracer functions for gathering a stack
3536 /// trace for error returns.
3538 /// When this feature is enabled, Sema will emit the `is_named_enum_value` AIR instructions
3539 /// and use it to check for corrupt switches. Backends currently need to implement their own
3540 /// logic to determine whether an enum value is in the set of named values.
3541 is_named_enum_value,
3542 error_set_has_value,
3544 /// When this feature is supported, the backend supports the following AIR instructions:
3545 /// * `Air.Inst.Tag.add_safe`
3546 /// * `Air.Inst.Tag.sub_safe`
3547 /// * `Air.Inst.Tag.mul_safe`
3548 /// * `Air.Inst.Tag.intcast_safe`
3549 /// The motivation for this feature is that it makes AIR smaller, and makes it easier
3550 /// to generate better machine code in the backends. All backends should migrate to
3551 /// enabling this feature.
3552 safety_checked_instructions,
3553 /// If the backend supports running from another thread.
3555 /// If the backend supports the following AIR instructions with vector types:
3556 /// * `Air.Inst.Tag.bit_and`
3557 /// * `Air.Inst.Tag.bit_or`
3558 /// * `Air.Inst.Tag.bitcast`
3559 /// * `Air.Inst.Tag.float_from_int`
3560 /// * `Air.Inst.Tag.fptrunc`
3561 /// * `Air.Inst.Tag.int_from_float`
3562 /// If not supported, Sema will scalarize the operation.
3563 all_vector_instructions,
3566 pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool {
3567 const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
3568 return target_util.backendSupportsFeature(backend, feature);
3571 pub const AtomicPtrAlignmentError = error{
3578 pub const AtomicPtrAlignmentDiagnostics = struct {
3579 bits: u16 = undefined,
3580 max_bits: u16 = undefined,
3583 /// If ABI alignment of `ty` is OK for atomic operations, returns 0.
3584 /// Otherwise returns the alignment required on a pointer for the target
3585 /// to perform atomic operations.
3586 // TODO this function does not take into account CPU features, which can affect
3587 // this value. Audit this!
3588 pub fn atomicPtrAlignment(
3591 diags: *AtomicPtrAlignmentDiagnostics,
3592 ) AtomicPtrAlignmentError!Alignment {
3593 const target = zcu.getTarget();
3594 const max_atomic_bits: u16 = switch (target.cpu.arch) {
3649 .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64,
3652 if (ty.toIntern() == .bool_type) return .none;
3653 if (ty.isRuntimeFloat()) {
3654 const bit_count = ty.floatBits(target);
3655 if (bit_count > max_atomic_bits) {
3658 .max_bits = max_atomic_bits,
3660 return error.FloatTooBig;
3664 if (ty.isAbiInt(zcu)) {
3665 const bit_count = ty.intInfo(zcu).bits;
3666 if (bit_count > max_atomic_bits) {
3669 .max_bits = max_atomic_bits,
3671 return error.IntTooBig;
3675 if (ty.isPtrAtRuntime(zcu)) return .none;
3676 return error.BadType;
3679 /// Returns null in the following cases:
3681 pub fn typeToStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType {
3682 if (ty.ip_index == .none) return null;
3683 const ip = &zcu.intern_pool;
3684 return switch (ip.indexToKey(ty.ip_index)) {
3685 .struct_type => ip.loadStructType(ty.ip_index),
3690 pub fn typeToPackedStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType {
3691 const s = zcu.typeToStruct(ty) orelse return null;
3692 if (s.layout != .@"packed") return null;
3696 pub fn typeToUnion(zcu: *const Zcu, ty: Type) ?InternPool.LoadedUnionType {
3697 if (ty.ip_index == .none) return null;
3698 const ip = &zcu.intern_pool;
3699 return switch (ip.indexToKey(ty.ip_index)) {
3700 .union_type => ip.loadUnionType(ty.ip_index),
3705 pub fn typeToFunc(zcu: *const Zcu, ty: Type) ?InternPool.Key.FuncType {
3706 if (ty.ip_index == .none) return null;
3707 return zcu.intern_pool.indexToFuncType(ty.toIntern());
3710 pub fn iesFuncIndex(zcu: *const Zcu, ies_index: InternPool.Index) InternPool.Index {
3711 return zcu.intern_pool.iesFuncIndex(ies_index);
3714 pub fn funcInfo(zcu: *const Zcu, func_index: InternPool.Index) InternPool.Key.Func {
3715 return zcu.intern_pool.toFunc(func_index);
3718 pub const UnionLayout = struct {
3720 abi_align: Alignment,
3721 most_aligned_field: u32,
3722 most_aligned_field_size: u64,
3725 payload_align: Alignment,
3726 tag_align: Alignment,
3730 pub fn tagOffset(layout: UnionLayout) u64 {
3731 return if (layout.tag_align.compare(.lt, layout.payload_align)) layout.payload_size else 0;
3734 pub fn payloadOffset(layout: UnionLayout) u64 {
3735 return if (layout.tag_align.compare(.lt, layout.payload_align)) 0 else layout.tag_size;
3739 /// Returns the index of the active field, given the current tag value
3740 pub fn unionTagFieldIndex(zcu: *const Zcu, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
3741 const ip = &zcu.intern_pool;
3742 if (enum_tag.toIntern() == .none) return null;
3743 assert(ip.typeOf(enum_tag.toIntern()) == loaded_union.enum_tag_ty);
3744 return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
3747 pub const ResolvedReference = struct {
3748 referencer: AnalUnit,
3752 /// Returns a mapping from an `AnalUnit` to where it is referenced.
3753 /// If the value is `null`, the `AnalUnit` is a root of analysis.
3754 /// If an `AnalUnit` is not in the returned map, it is unreferenced.
3755 /// The returned hashmap is owned by the `Zcu`, so should not be freed by the caller.
3756 /// This hashmap is cached, so repeated calls to this function are cheap.
3757 pub fn resolveReferences(zcu: *Zcu) !*const std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
3758 if (zcu.resolved_references == null) {
3759 zcu.resolved_references = try zcu.resolveReferencesInner();
3761 return &zcu.resolved_references.?;
3763 fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
3764 const gpa = zcu.gpa;
3765 const comp = zcu.comp;
3766 const ip = &zcu.intern_pool;
3768 var result: std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .empty;
3769 errdefer result.deinit(gpa);
3771 var checked_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty;
3772 var type_queue: std.AutoArrayHashMapUnmanaged(InternPool.Index, ?ResolvedReference) = .empty;
3773 var unit_queue: std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .empty;
3775 checked_types.deinit(gpa);
3776 type_queue.deinit(gpa);
3777 unit_queue.deinit(gpa);
3780 // This is not a sufficient size, but a lower bound.
3781 try result.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count()));
3783 try type_queue.ensureTotalCapacity(gpa, zcu.analysis_roots.len);
3784 for (zcu.analysis_roots.slice()) |mod| {
3785 // Logic ripped from `Zcu.PerThread.importPkg`.
3786 // TODO: this is silly, `Module` should just store a reference to its root `File`.
3787 const resolved_path = try std.fs.path.resolve(gpa, &.{
3788 mod.root.root_dir.path orelse ".",
3792 defer gpa.free(resolved_path);
3793 const file = zcu.import_table.get(resolved_path).?;
3794 const root_ty = zcu.fileRootType(file);
3795 if (root_ty == .none) continue;
3796 type_queue.putAssumeCapacityNoClobber(root_ty, null);
3800 if (type_queue.pop()) |kv| {
3802 const referencer = kv.value;
3803 try checked_types.putNoClobber(gpa, ty, {});
3805 log.debug("handle type '{}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
3807 // If this type undergoes type resolution, the corresponding `AnalUnit` is automatically referenced.
3808 const has_resolution: bool = switch (ip.indexToKey(ty)) {
3809 .struct_type, .union_type => true,
3810 .enum_type => |k| k != .generated_tag,
3811 .opaque_type => false,
3812 else => unreachable,
3814 if (has_resolution) {
3815 // this should only be referenced by the type
3816 const unit: AnalUnit = .wrap(.{ .type = ty });
3817 assert(!result.contains(unit));
3818 try unit_queue.putNoClobber(gpa, unit, referencer);
3821 // If this is a union with a generated tag, its tag type is automatically referenced.
3822 // We don't add this reference for non-generated tags, as those will already be referenced via the union's type resolution, with a better source location.
3823 if (zcu.typeToUnion(Type.fromInterned(ty))) |union_obj| {
3824 const tag_ty = union_obj.enum_tag_ty;
3825 if (tag_ty != .none) {
3826 if (ip.indexToKey(tag_ty).enum_type == .generated_tag) {
3827 if (!checked_types.contains(tag_ty)) {
3828 try type_queue.put(gpa, tag_ty, referencer);
3834 // Queue any decls within this type which would be automatically analyzed.
3835 // Keep in sync with analysis queueing logic in `Zcu.PerThread.ScanDeclIter.scanDecl`.
3836 const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap().?;
3837 for (zcu.namespacePtr(ns).comptime_decls.items) |cu| {
3838 // `comptime` decls are always analyzed.
3839 const unit: AnalUnit = .wrap(.{ .@"comptime" = cu });
3840 if (!result.contains(unit)) {
3841 log.debug("type '{}': ref comptime %{}", .{
3842 Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
3843 @intFromEnum(ip.getComptimeUnit(cu).zir_index.resolve(ip) orelse continue),
3845 try unit_queue.put(gpa, unit, referencer);
3848 for (zcu.namespacePtr(ns).test_decls.items) |nav_id| {
3849 const nav = ip.getNav(nav_id);
3850 // `test` declarations are analyzed depending on the test filter.
3851 const inst_info = nav.analysis.?.zir_index.resolveFull(ip) orelse continue;
3852 const file = zcu.fileByIndex(inst_info.file);
3853 const decl = file.zir.?.getDeclaration(inst_info.inst);
3855 if (!comp.config.is_test or file.mod != zcu.main_mod) continue;
3857 const want_analysis = switch (decl.kind) {
3858 .@"usingnamespace" => unreachable,
3859 .@"const", .@"var" => unreachable,
3860 .@"comptime" => unreachable,
3861 .unnamed_test => true,
3862 .@"test", .decltest => a: {
3863 const fqn_slice = nav.fqn.toSlice(ip);
3864 for (comp.test_filters) |test_filter| {
3865 if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break;
3866 } else break :a false;
3870 if (want_analysis) {
3871 log.debug("type '{}': ref test %{}", .{
3872 Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
3873 @intFromEnum(inst_info.inst),
3875 const unit: AnalUnit = .wrap(.{ .nav_val = nav_id });
3876 try unit_queue.put(gpa, unit, referencer);
3879 for (zcu.namespacePtr(ns).pub_decls.keys()) |nav| {
3880 // These are named declarations. They are analyzed only if marked `export`.
3881 const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue;
3882 const file = zcu.fileByIndex(inst_info.file);
3883 const decl = file.zir.?.getDeclaration(inst_info.inst);
3884 if (decl.linkage == .@"export") {
3885 const unit: AnalUnit = .wrap(.{ .nav_val = nav });
3886 if (!result.contains(unit)) {
3887 log.debug("type '{}': ref named %{}", .{
3888 Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
3889 @intFromEnum(inst_info.inst),
3891 try unit_queue.put(gpa, unit, referencer);
3895 for (zcu.namespacePtr(ns).priv_decls.keys()) |nav| {
3896 // These are named declarations. They are analyzed only if marked `export`.
3897 const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue;
3898 const file = zcu.fileByIndex(inst_info.file);
3899 const decl = file.zir.?.getDeclaration(inst_info.inst);
3900 if (decl.linkage == .@"export") {
3901 const unit: AnalUnit = .wrap(.{ .nav_val = nav });
3902 if (!result.contains(unit)) {
3903 log.debug("type '{}': ref named %{}", .{
3904 Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
3905 @intFromEnum(inst_info.inst),
3907 try unit_queue.put(gpa, unit, referencer);
3911 // Incremental compilation does not support `usingnamespace`.
3912 // These are only included to keep good reference traces in non-incremental updates.
3913 for (zcu.namespacePtr(ns).pub_usingnamespace.items) |nav| {
3914 const unit: AnalUnit = .wrap(.{ .nav_val = nav });
3915 if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer);
3917 for (zcu.namespacePtr(ns).priv_usingnamespace.items) |nav| {
3918 const unit: AnalUnit = .wrap(.{ .nav_val = nav });
3919 if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer);
3923 if (unit_queue.pop()) |kv| {
3924 const unit = kv.key;
3925 try result.putNoClobber(gpa, unit, kv.value);
3927 // `nav_val` and `nav_ty` reference each other *implicitly* to save memory.
3929 const other: AnalUnit = .wrap(switch (unit.unwrap()) {
3930 .nav_val => |n| .{ .nav_ty = n },
3931 .nav_ty => |n| .{ .nav_val = n },
3932 .@"comptime", .type, .func, .memoized_state => break :queue_paired,
3934 if (result.contains(other)) break :queue_paired;
3935 try unit_queue.put(gpa, other, kv.value); // same reference location
3938 log.debug("handle unit '{}'", .{zcu.fmtAnalUnit(unit)});
3940 if (zcu.reference_table.get(unit)) |first_ref_idx| {
3941 assert(first_ref_idx != std.math.maxInt(u32));
3942 var ref_idx = first_ref_idx;
3943 while (ref_idx != std.math.maxInt(u32)) {
3944 const ref = zcu.all_references.items[ref_idx];
3945 if (!result.contains(ref.referenced)) {
3946 log.debug("unit '{}': ref unit '{}'", .{
3947 zcu.fmtAnalUnit(unit),
3948 zcu.fmtAnalUnit(ref.referenced),
3950 try unit_queue.put(gpa, ref.referenced, .{
3958 if (zcu.type_reference_table.get(unit)) |first_ref_idx| {
3959 assert(first_ref_idx != std.math.maxInt(u32));
3960 var ref_idx = first_ref_idx;
3961 while (ref_idx != std.math.maxInt(u32)) {
3962 const ref = zcu.all_type_references.items[ref_idx];
3963 if (!checked_types.contains(ref.referenced)) {
3964 log.debug("unit '{}': ref type '{}'", .{
3965 zcu.fmtAnalUnit(unit),
3966 Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip),
3968 try type_queue.put(gpa, ref.referenced, .{
3984 pub fn fileByIndex(zcu: *const Zcu, file_index: File.Index) *File {
3985 return zcu.intern_pool.filePtr(file_index);
3988 /// Returns the struct that represents this `File`.
3989 /// If the struct has not been created, returns `.none`.
3990 pub fn fileRootType(zcu: *const Zcu, file_index: File.Index) InternPool.Index {
3991 const ip = &zcu.intern_pool;
3992 const file_index_unwrapped = file_index.unwrap(ip);
3993 const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
3994 return files.view().items(.root_type)[file_index_unwrapped.index];
3997 pub fn setFileRootType(zcu: *Zcu, file_index: File.Index, root_type: InternPool.Index) void {
3998 const ip = &zcu.intern_pool;
3999 const file_index_unwrapped = file_index.unwrap(ip);
4000 const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
4001 files.view().items(.root_type)[file_index_unwrapped.index] = root_type;
4004 pub fn filePathDigest(zcu: *const Zcu, file_index: File.Index) Cache.BinDigest {
4005 const ip = &zcu.intern_pool;
4006 const file_index_unwrapped = file_index.unwrap(ip);
4007 const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
4008 return files.view().items(.bin_digest)[file_index_unwrapped.index];
4011 pub fn navSrcLoc(zcu: *const Zcu, nav_index: InternPool.Nav.Index) LazySrcLoc {
4012 const ip = &zcu.intern_pool;
4014 .base_node_inst = ip.getNav(nav_index).srcInst(ip),
4015 .offset = LazySrcLoc.Offset.nodeOffset(0),
4019 pub fn typeSrcLoc(zcu: *const Zcu, ty_index: InternPool.Index) LazySrcLoc {
4025 pub fn typeFileScope(zcu: *Zcu, ty_index: InternPool.Index) *File {
4031 pub fn navSrcLine(zcu: *Zcu, nav_index: InternPool.Nav.Index) u32 {
4032 const ip = &zcu.intern_pool;
4033 const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip).?;
4034 const zir = zcu.fileByIndex(inst_info.file).zir;
4035 return zir.?.getDeclaration(inst_info.inst).src_line;
4038 pub fn navValue(zcu: *const Zcu, nav_index: InternPool.Nav.Index) Value {
4039 return Value.fromInterned(zcu.intern_pool.getNav(nav_index).status.fully_resolved.val);
4042 pub fn navFileScopeIndex(zcu: *Zcu, nav: InternPool.Nav.Index) File.Index {
4043 const ip = &zcu.intern_pool;
4044 return ip.getNav(nav).srcInst(ip).resolveFile(ip);
4047 pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File {
4048 return zcu.fileByIndex(zcu.navFileScopeIndex(nav));
4051 pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Formatter(formatAnalUnit) {
4052 return .{ .data = .{ .unit = unit, .zcu = zcu } };
4054 pub fn fmtDependee(zcu: *Zcu, d: InternPool.Dependee) std.fmt.Formatter(formatDependee) {
4055 return .{ .data = .{ .dependee = d, .zcu = zcu } };
4058 fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
4059 _ = .{ fmt, options };
4060 const zcu = data.zcu;
4061 const ip = &zcu.intern_pool;
4062 switch (data.unit.unwrap()) {
4063 .@"comptime" => |cu_id| {
4064 const cu = ip.getComptimeUnit(cu_id);
4065 if (cu.zir_index.resolveFull(ip)) |resolved| {
4066 const file_path = zcu.fileByIndex(resolved.file).sub_file_path;
4067 return writer.print("comptime(inst=('{s}', %{}) [{}])", .{ file_path, @intFromEnum(resolved.inst), @intFromEnum(cu_id) });
4069 return writer.print("comptime(inst=<lost> [{}])", .{@intFromEnum(cu_id)});
4072 .nav_val => |nav| return writer.print("nav_val('{}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
4073 .nav_ty => |nav| return writer.print("nav_ty('{}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
4074 .type => |ty| return writer.print("ty('{}' [{}])", .{ Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }),
4076 const nav = zcu.funcInfo(func).owner_nav;
4077 return writer.print("func('{}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(func) });
4079 .memoized_state => return writer.writeAll("memoized_state"),
4082 fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
4083 _ = .{ fmt, options };
4084 const zcu = data.zcu;
4085 const ip = &zcu.intern_pool;
4086 switch (data.dependee) {
4088 const info = ti.resolveFull(ip) orelse {
4089 return writer.writeAll("inst(<lost>)");
4091 const file_path = zcu.fileByIndex(info.file).sub_file_path;
4092 return writer.print("inst('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) });
4095 const fqn = ip.getNav(nav).fqn;
4096 return writer.print("nav_val('{}')", .{fqn.fmt(ip)});
4099 const fqn = ip.getNav(nav).fqn;
4100 return writer.print("nav_ty('{}')", .{fqn.fmt(ip)});
4102 .interned => |ip_index| switch (ip.indexToKey(ip_index)) {
4103 .struct_type, .union_type, .enum_type => return writer.print("type('{}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}),
4104 .func => |f| return writer.print("ies('{}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}),
4105 else => unreachable,
4107 .zon_file => |file| {
4108 const file_path = zcu.fileByIndex(file).sub_file_path;
4109 return writer.print("zon_file('{s}')", .{file_path});
4111 .embed_file => |ef_idx| {
4112 const ef = ef_idx.get(zcu);
4113 return writer.print("embed_file('{s}')", .{std.fs.path.fmtJoin(&.{
4114 ef.owner.root.root_dir.path orelse "",
4115 ef.owner.root.sub_path,
4116 ef.sub_file_path.toSlice(ip),
4119 .namespace => |ti| {
4120 const info = ti.resolveFull(ip) orelse {
4121 return writer.writeAll("namespace(<lost>)");
4123 const file_path = zcu.fileByIndex(info.file).sub_file_path;
4124 return writer.print("namespace('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) });
4126 .namespace_name => |k| {
4127 const info = k.namespace.resolveFull(ip) orelse {
4128 return writer.print("namespace(<lost>, '{}')", .{k.name.fmt(ip)});
4130 const file_path = zcu.fileByIndex(info.file).sub_file_path;
4131 return writer.print("namespace('{s}', %{d}, '{}')", .{ file_path, @intFromEnum(info.inst), k.name.fmt(ip) });
4133 .memoized_state => return writer.writeAll("memoized_state"),
4137 /// Given the `InternPool.Index` of a function, set its resolved IES to `.none` if it
4138 /// may be outdated. `Sema` should do this before ever loading a resolved IES.
4139 pub fn maybeUnresolveIes(zcu: *Zcu, func_index: InternPool.Index) !void {
4140 const unit = AnalUnit.wrap(.{ .func = func_index });
4141 if (zcu.outdated.contains(unit) or zcu.potentially_outdated.contains(unit)) {
4142 // We're consulting the resolved IES now, but the function is outdated, so its
4143 // IES may have changed. We have to assume the IES is outdated and set the resolved
4144 // set back to `.none`.
4146 // This will cause `PerThread.analyzeFnBody` to mark the IES as outdated when it's
4149 // Since the IES needs to be resolved, the function body will now definitely need
4150 // re-analysis (even if the IES turns out to be the same!), so mark it as
4151 // definitely-outdated if it's only PO.
4152 if (zcu.potentially_outdated.fetchSwapRemove(unit)) |kv| {
4153 const gpa = zcu.gpa;
4154 try zcu.outdated.putNoClobber(gpa, unit, kv.value);
4155 if (kv.value == 0) {
4156 try zcu.outdated_ready.put(gpa, unit, {});
4159 zcu.intern_pool.funcSetIesResolved(func_index, .none);
4163 pub fn callconvSupported(zcu: *Zcu, cc: std.builtin.CallingConvention) union(enum) {
4165 bad_arch: []const std.Target.Cpu.Arch, // value is allowed archs for cc
4166 bad_backend: std.builtin.CompilerBackend, // value is current backend
4168 const target = zcu.getTarget();
4169 const backend = target_util.zigBackend(target, zcu.comp.config.use_llvm);
4171 .auto, .@"inline" => return .ok,
4172 .@"async" => return .{ .bad_backend = backend }, // nothing supports async currently
4173 .naked => {}, // depends only on backend
4174 else => for (cc.archs()) |allowed_arch| {
4175 if (allowed_arch == target.cpu.arch) break;
4176 } else return .{ .bad_arch = cc.archs() },
4178 const backend_ok = switch (backend) {
4179 .stage1 => unreachable,
4180 .other => unreachable,
4183 .stage2_llvm => @import("codegen/llvm.zig").toLlvmCallConv(cc, target) != null,
4185 if (target.cCallingConvention()) |default_c| {
4186 if (cc.eql(default_c)) {
4190 break :ok switch (cc) {
4194 .x86_64_regcall_v3_sysv,
4195 .x86_64_regcall_v4_win,
4201 .x86_regcall_v4_win,
4211 => |opts| opts.incoming_stack_alignment == null,
4214 => |opts| opts.incoming_stack_alignment == null and target.os.tag != .watchos,
4216 => |opts| opts.incoming_stack_alignment == null and target.os.tag == .watchos,
4219 => |opts| opts.incoming_stack_alignment == null,
4223 => |opts| opts.incoming_stack_alignment == null,
4227 => |opts| opts.incoming_stack_alignment == null,
4232 => |opts| opts.incoming_stack_alignment == null and opts.register_params == 0,
4243 .stage2_wasm => switch (cc) {
4244 .wasm_watc => |opts| opts.incoming_stack_alignment == null,
4247 .stage2_arm => switch (cc) {
4248 .arm_aapcs => |opts| opts.incoming_stack_alignment == null,
4252 .stage2_x86_64 => switch (cc) {
4253 .x86_64_sysv, .x86_64_win, .naked => true, // incoming stack alignment supported
4256 .stage2_aarch64 => switch (cc) {
4258 .aarch64_aapcs_darwin,
4260 => |opts| opts.incoming_stack_alignment == null,
4264 .stage2_x86 => switch (cc) {
4267 => |opts| opts.incoming_stack_alignment == null and opts.register_params == 0,
4271 .stage2_riscv64 => switch (cc) {
4272 .riscv64_lp64 => |opts| opts.incoming_stack_alignment == null,
4276 .stage2_sparc64 => switch (cc) {
4277 .sparc64_sysv => |opts| opts.incoming_stack_alignment == null,
4281 .stage2_spirv64 => switch (cc) {
4282 .spirv_device, .spirv_kernel => true,
4283 .spirv_fragment, .spirv_vertex => target.os.tag == .vulkan,
4287 if (!backend_ok) return .{ .bad_backend = backend };
4291 /// Given that a `Nav` has value `val`, determine if a ref of that `Nav` gives a `const` pointer.
4292 pub fn navValIsConst(zcu: *const Zcu, val: InternPool.Index) bool {
4293 return switch (zcu.intern_pool.indexToKey(val)) {
4295 .@"extern" => |e| e.is_const,
4300 pub const CodegenFailError = error{
4301 /// Indicates the error message has been already stored at `Zcu.failed_codegen`.
4308 nav_index: InternPool.Nav.Index,
4309 comptime format: []const u8,
4311 ) CodegenFailError {
4312 const gpa = zcu.gpa;
4313 try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1);
4314 const msg = try Zcu.ErrorMsg.create(gpa, zcu.navSrcLoc(nav_index), format, args);
4315 zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, msg);
4316 return error.CodegenFail;
4319 pub fn codegenFailMsg(zcu: *Zcu, nav_index: InternPool.Nav.Index, msg: *ErrorMsg) CodegenFailError {
4320 const gpa = zcu.gpa;
4322 errdefer msg.deinit(gpa);
4323 try zcu.failed_codegen.putNoClobber(gpa, nav_index, msg);
4325 return error.CodegenFail;
4328 pub fn codegenFailType(
4330 ty_index: InternPool.Index,
4331 comptime format: []const u8,
4333 ) CodegenFailError {
4334 const gpa = zcu.gpa;
4335 try zcu.failed_types.ensureUnusedCapacity(gpa, 1);
4336 const msg = try Zcu.ErrorMsg.create(gpa, zcu.typeSrcLoc(ty_index), format, args);
4337 zcu.failed_types.putAssumeCapacityNoClobber(ty_index, msg);
4338 return error.CodegenFail;
4341 pub fn codegenFailTypeMsg(zcu: *Zcu, ty_index: InternPool.Index, msg: *ErrorMsg) CodegenFailError {
4342 const gpa = zcu.gpa;
4344 errdefer msg.deinit(gpa);
4345 try zcu.failed_types.ensureUnusedCapacity(gpa, 1);
4347 zcu.failed_types.putAssumeCapacityNoClobber(ty_index, msg);
4348 return error.CodegenFail;