@@ -64,7 +64,7 @@ state: union(enum) {
64
64
finished ,
65
65
/// The link thread is not running or queued, because it is waiting for this MIR to be populated.
66
66
/// Once codegen completes, it must call `mirReady` which will restart the link thread.
67
- wait_for_mir : * ZcuTask.LinkFunc.SharedMir ,
67
+ wait_for_mir : InternPool.Index ,
68
68
},
69
69
70
70
/// In the worst observed case, MIR is around 50 times as large as AIR. More typically, the ratio is
@@ -113,20 +113,20 @@ pub fn start(q: *Queue, comp: *Compilation) void {
113
113
114
114
/// Called by codegen workers after they have populated a `ZcuTask.LinkFunc.SharedMir`. If the link
115
115
/// thread was waiting for this MIR, it can resume.
116
- pub fn mirReady (q : * Queue , comp : * Compilation , mir : * ZcuTask.LinkFunc.SharedMir ) void {
116
+ pub fn mirReady (q : * Queue , comp : * Compilation , func_index : InternPool.Index , mir : * ZcuTask.LinkFunc.SharedMir ) void {
117
117
// We would like to assert that `mir` is not pending, but that would race with a worker thread
118
118
// potentially freeing it.
119
119
{
120
120
q .mutex .lock ();
121
121
defer q .mutex .unlock ();
122
122
switch (q .state ) {
123
123
.finished , .running = > return ,
124
- .wait_for_mir = > | wait_for | if (wait_for != mir ) return ,
124
+ .wait_for_mir = > | wait_for | if (wait_for != func_index ) return ,
125
125
}
126
126
// We were waiting for `mir`, so we will restart the linker thread.
127
127
q .state = .running ;
128
128
}
129
- assert (mir .status .load (.monotonic ) != .pending );
129
+ assert (mir .status .load (.acquire ) != .pending );
130
130
comp .thread_pool .spawnWgId (& comp .link_task_wait_group , flushTaskQueue , .{ q , comp });
131
131
}
132
132
@@ -170,8 +170,8 @@ pub fn enqueueZcu(q: *Queue, comp: *Compilation, task: ZcuTask) Allocator.Error!
170
170
.finished = > if (q .pending_prelink_tasks != 0 ) return ,
171
171
}
172
172
// Restart the linker thread, unless it would immediately be blocked
173
- if (task == .link_func and task .link_func .mir .status .load (.monotonic ) == .pending ) {
174
- q .state = .{ .wait_for_mir = task .link_func .mir };
173
+ if (task == .link_func and task .link_func .mir .status .load (.acquire ) == .pending ) {
174
+ q .state = .{ .wait_for_mir = task .link_func .func };
175
175
return ;
176
176
}
177
177
q .state = .running ;
@@ -243,12 +243,12 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
243
243
if (task != .link_func ) break :pending ;
244
244
const status_ptr = & task .link_func .mir .status ;
245
245
// First check without the mutex to optimize for the common case where MIR is ready.
246
- if (status_ptr .load (.monotonic ) != .pending ) break :pending ;
246
+ if (status_ptr .load (.acquire ) != .pending ) break :pending ;
247
247
q .mutex .lock ();
248
248
defer q .mutex .unlock ();
249
- if (status_ptr .load (.monotonic ) != .pending ) break :pending ;
249
+ if (status_ptr .load (.acquire ) != .pending ) break :pending ;
250
250
// We will stop for now, and get restarted once this MIR is ready.
251
- q .state = .{ .wait_for_mir = task .link_func .mir };
251
+ q .state = .{ .wait_for_mir = task .link_func .func };
252
252
q .flush_safety .unlock ();
253
253
return ;
254
254
}
@@ -273,6 +273,7 @@ const std = @import("std");
273
273
const assert = std .debug .assert ;
274
274
const Allocator = std .mem .Allocator ;
275
275
const Compilation = @import ("../Compilation.zig" );
276
+ const InternPool = @import ("../InternPool.zig" );
276
277
const link = @import ("../link.zig" );
277
278
const PrelinkTask = link .PrelinkTask ;
278
279
const ZcuTask = link .ZcuTask ;
0 commit comments