srctree

Michael Dusan parent cf4a2099 5ce40e61
bsd: debitrot AtomicOrder renames

  • complete std.builtin.AtomicOrder renames that were missed from 6067d39522f
inlinesplit
lib/compiler_rt/emutls.zig added: 21, removed: 21, total 0
@@ -246,7 +246,7 @@ const emutls_control = extern struct {
// Two threads could race against the same emutls_control.
 
// Use atomic for reading coherent value lockless.
const index_lockless = @atomicLoad(usize, &self.object.index, .Acquire);
const index_lockless = @atomicLoad(usize, &self.object.index, .acquire);
 
if (index_lockless != 0) {
// index is already initialized, return it.
@@ -264,7 +264,7 @@ const emutls_control = extern struct {
}
 
// Store a new index atomically (for having coherent index_lockless reading).
@atomicStore(usize, &self.object.index, emutls_control.next_index, .Release);
@atomicStore(usize, &self.object.index, emutls_control.next_index, .release);
 
// Increment the next available index
emutls_control.next_index += 1;
 
lib/std/Thread/Futex.zig added: 21, removed: 21, total 0
@@ -801,7 +801,7 @@ const PosixImpl = struct {
assert(std.c.pthread_mutex_lock(&bucket.mutex) == .SUCCESS);
defer assert(std.c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS);
 
cancelled = ptr.load(.Monotonic) != expect;
cancelled = ptr.load(.monotonic) != expect;
if (cancelled) {
return;
}
@@ -855,14 +855,14 @@ const PosixImpl = struct {
// The pending count increment in wait() must also now use seq_cst for the update + this pending load
// to be in the same modification order as our load isn't using release/acquire to guarantee it.
bucket.pending.fence(.seq_cst);
if (bucket.pending.load(.Monotonic) == 0) {
if (bucket.pending.load(.monotonic) == 0) {
return;
}
 
// Keep a list of all the waiters notified and wake then up outside the mutex critical section.
var notified = WaitList{};
defer if (notified.len > 0) {
const pending = bucket.pending.fetchSub(notified.len, .Monotonic);
const pending = bucket.pending.fetchSub(notified.len, .monotonic);
assert(pending >= notified.len);
 
while (notified.pop()) |waiter| {
@@ -875,7 +875,7 @@ const PosixImpl = struct {
defer assert(std.c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS);
 
// Another pending check again to avoid the WaitQueue lookup if not necessary.
if (bucket.pending.load(.Monotonic) > 0) {
if (bucket.pending.load(.monotonic) > 0) {
notified = WaitQueue.remove(&bucket.treap, address, max_waiters);
}
}
 
lib/std/Thread/RwLock.zig added: 21, removed: 21, total 0
@@ -375,5 +375,5 @@ test "concurrent access" {
 
try testing.expectEqual(num_writes, runner.writes);
 
//std.debug.print("reads={}\n", .{ runner.reads.load(.Unordered)});
//std.debug.print("reads={}\n", .{ runner.reads.load(.unordered)});
}
 
lib/std/atomic.zig added: 21, removed: 21, total 0
@@ -159,7 +159,7 @@ test Value {
// acquire ensures count decrement and code before
// previous unrefs()s happens-before we call dropFn
// below.
// Another alternative is to use .AcqRel on the
// Another alternative is to use .acq_rel on the
// fetchSub count decrement but it's extra barrier in
// possibly hot path.
rc.count.fence(.acquire);
 
src/Air.zig added: 21, removed: 21, total 0
@@ -727,11 +727,11 @@ pub const Inst = struct {
/// Result type is always `void`.
/// Uses the `bin_op` field. LHS is pointer, RHS is element.
atomic_store_unordered,
/// Same as `atomic_store_unordered` but with `AtomicOrder.Monotonic`.
/// Same as `atomic_store_unordered` but with `AtomicOrder.monotonic`.
atomic_store_monotonic,
/// Same as `atomic_store_unordered` but with `AtomicOrder.Release`.
/// Same as `atomic_store_unordered` but with `AtomicOrder.release`.
atomic_store_release,
/// Same as `atomic_store_unordered` but with `AtomicOrder.SeqCst`.
/// Same as `atomic_store_unordered` but with `AtomicOrder.seq_cst`.
atomic_store_seq_cst,
/// Atomically read-modify-write via a pointer.
/// Result type is the element type of the pointer.
 
src/arch/sparc64/CodeGen.zig added: 21, removed: 21, total 0
@@ -647,10 +647,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.call_never_tail => try self.airCall(inst, .never_tail),
.call_never_inline => try self.airCall(inst, .never_inline),
 
.atomic_store_unordered => @panic("TODO try self.airAtomicStore(inst, .Unordered)"),
.atomic_store_monotonic => @panic("TODO try self.airAtomicStore(inst, .Monotonic)"),
.atomic_store_release => @panic("TODO try self.airAtomicStore(inst, .Release)"),
.atomic_store_seq_cst => @panic("TODO try self.airAtomicStore(inst, .SeqCst)"),
.atomic_store_unordered => @panic("TODO try self.airAtomicStore(inst, .unordered)"),
.atomic_store_monotonic => @panic("TODO try self.airAtomicStore(inst, .monotonic)"),
.atomic_store_release => @panic("TODO try self.airAtomicStore(inst, .release)"),
.atomic_store_seq_cst => @panic("TODO try self.airAtomicStore(inst, .seq_cst)"),
 
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),