Skip to content

Commit

Permalink
update to Zig after #18085 (rework std.atomic) and #18076 (absorb Ite…
Browse files Browse the repository at this point in the history
…rableDir into Dir) (#79)

Also fixes `never mutated var` in a benchmark missed earlier
  • Loading branch information
mitchellh authored Nov 24, 2023
2 parents a5f8b98 + 5da0bfd commit 2ee417a
Show file tree
Hide file tree
Showing 7 changed files with 35 additions and 35 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ jobs:
- name: Install zig
uses: goto-bus-stop/setup-zig@v2
with:
version: 0.12.0-dev.1245+a07f288eb
version: 0.12.0-dev.1710+2bffd8101

- name: test
run: zig build test --summary all
Expand Down
4 changes: 2 additions & 2 deletions build.zig
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ fn benchTargets(

// Open the directory
const c_dir_path = (comptime thisDir()) ++ "/src/bench";
var c_dir = try std.fs.openIterableDirAbsolute(c_dir_path, .{});
var c_dir = try std.fs.openDirAbsolute(c_dir_path, .{ .iterate = true });
defer c_dir.close();

// Go through and add each as a step
Expand Down Expand Up @@ -258,7 +258,7 @@ fn exampleTargets(

// Open the directory
const c_dir_path = (comptime thisDir()) ++ "/examples";
var c_dir = try std.fs.openIterableDirAbsolute(c_dir_path, .{});
var c_dir = try std.fs.openDirAbsolute(c_dir_path, .{ .iterate = true });
defer c_dir.close();

// Go through and add each as a step
Expand Down
6 changes: 3 additions & 3 deletions flake.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

50 changes: 25 additions & 25 deletions src/ThreadPool.zig
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ const ThreadPool = @This();

const std = @import("std");
const assert = std.debug.assert;
const Atomic = std.atomic.Atomic;
const Atomic = std.atomic.Value;

stack_size: u32,
max_threads: u32,
Expand Down Expand Up @@ -192,7 +192,7 @@ noinline fn notifySlow(self: *ThreadPool, is_waking: bool) void {

// Release barrier synchronizes with Acquire in wait()
// to ensure pushes to run queues happen before observing a posted notification.
sync = @bitCast(self.sync.tryCompareAndSwap(
sync = @bitCast(self.sync.cmpxchgWeak(
@bitCast(sync),
@bitCast(new_sync),
.Release,
Expand Down Expand Up @@ -235,7 +235,7 @@ noinline fn wait(self: *ThreadPool, _is_waking: bool) error{Shutdown}!bool {

// Acquire barrier synchronizes with notify()
// to ensure that pushes to run queue are observed after wait() returns.
sync = @bitCast(self.sync.tryCompareAndSwap(
sync = @bitCast(self.sync.cmpxchgWeak(
@bitCast(sync),
@bitCast(new_sync),
.Acquire,
Expand All @@ -252,7 +252,7 @@ noinline fn wait(self: *ThreadPool, _is_waking: bool) error{Shutdown}!bool {
if (is_waking)
new_sync.state = .pending;

sync = @bitCast(self.sync.tryCompareAndSwap(
sync = @bitCast(self.sync.cmpxchgWeak(
@bitCast(sync),
@bitCast(new_sync),
.Monotonic,
Expand Down Expand Up @@ -282,7 +282,7 @@ pub noinline fn shutdown(self: *ThreadPool) void {
new_sync.idle = 0;

// Full barrier to synchronize with both wait() and notify()
sync = @bitCast(self.sync.tryCompareAndSwap(
sync = @bitCast(self.sync.cmpxchgWeak(
@bitCast(sync),
@bitCast(new_sync),
.AcqRel,
Expand All @@ -301,7 +301,7 @@ fn register(noalias self: *ThreadPool, noalias thread: *Thread) void {
var threads = self.threads.load(.Monotonic);
while (true) {
thread.next = threads;
threads = self.threads.tryCompareAndSwap(
threads = self.threads.cmpxchgWeak(
threads,
thread,
.Release,
Expand Down Expand Up @@ -455,14 +455,14 @@ const Event = struct {
// Acquire barrier to ensure operations before the shutdown() are seen after the wait().
// Shutdown is rare so it's better to have an Acquire barrier here instead of on CAS failure + load which are common.
if (state == SHUTDOWN) {
std.atomic.fence(.Acquire);
@fence(.Acquire);
return;
}

// Consume a notification when it pops up.
// Acquire barrier to ensure operations before the notify() appear after the wait().
if (state == NOTIFIED) {
state = self.state.tryCompareAndSwap(
state = self.state.cmpxchgWeak(
state,
acquire_with,
.Acquire,
Expand All @@ -473,7 +473,7 @@ const Event = struct {

// There is no notification to consume, we should wait on the event by ensuring its WAITING.
if (state != WAITING) blk: {
state = self.state.tryCompareAndSwap(
state = self.state.cmpxchgWeak(
state,
WAITING,
.Monotonic,
Expand Down Expand Up @@ -556,7 +556,7 @@ const Node = struct {
new_stack |= (stack & ~PTR_MASK);

// Push to the stack with a release barrier for the consumer to see the proper list links.
stack = self.stack.tryCompareAndSwap(
stack = self.stack.cmpxchgWeak(
stack,
new_stack,
.Release,
Expand All @@ -582,7 +582,7 @@ const Node = struct {

// Acquire barrier on getting the consumer to see cache/Node updates done by previous consumers
// and to ensure our cache/Node updates in pop() happen after that of previous consumers.
stack = self.stack.tryCompareAndSwap(
stack = self.stack.cmpxchgWeak(
stack,
new_stack,
.Acquire,
Expand Down Expand Up @@ -645,7 +645,7 @@ const Node = struct {

fn push(noalias self: *Buffer, noalias list: *List) error{Overflow}!void {
var head = self.head.load(.Monotonic);
var tail = self.tail.loadUnchecked(); // we're the only thread that can change this
var tail = self.tail.raw; // we're the only thread that can change this

while (true) {
var size = tail -% head;
Expand Down Expand Up @@ -677,22 +677,22 @@ const Node = struct {
// Migrating half amortizes the cost of stealing while requiring future pops to still use the buffer.
// Acquire barrier to ensure the linked list creation after the steal only happens after we succesfully steal.
var migrate = size / 2;
head = self.head.tryCompareAndSwap(
head = self.head.cmpxchgWeak(
head,
head +% migrate,
.Acquire,
.Monotonic,
) orelse {
// Link the migrated Nodes together
const first = self.array[head % capacity].loadUnchecked();
const first = self.array[head % capacity].raw;
while (migrate > 0) : (migrate -= 1) {
const prev = self.array[head % capacity].loadUnchecked();
const prev = self.array[head % capacity].raw;
head +%= 1;
prev.next = self.array[head % capacity].loadUnchecked();
prev.next = self.array[head % capacity].raw;
}

// Append the list that was supposed to be pushed to the end of the migrated Nodes
const last = self.array[(head -% 1) % capacity].loadUnchecked();
const last = self.array[(head -% 1) % capacity].raw;
last.next = list.head;
list.tail.next = null;

Expand All @@ -705,7 +705,7 @@ const Node = struct {

fn pop(self: *Buffer) ?*Node {
var head = self.head.load(.Monotonic);
const tail = self.tail.loadUnchecked(); // we're the only thread that can change this
const tail = self.tail.raw; // we're the only thread that can change this

while (true) {
// Quick sanity check and return null when not empty
Expand All @@ -717,12 +717,12 @@ const Node = struct {

// Dequeue with an acquire barrier to ensure any writes done to the Node
// only happen after we succesfully claim it from the array.
head = self.head.tryCompareAndSwap(
head = self.head.cmpxchgWeak(
head,
head +% 1,
.Acquire,
.Monotonic,
) orelse return self.array[head % capacity].loadUnchecked();
) orelse return self.array[head % capacity].raw;
}
}

Expand All @@ -736,7 +736,7 @@ const Node = struct {
defer queue.releaseConsumer(consumer);

const head = self.head.load(.Monotonic);
const tail = self.tail.loadUnchecked(); // we're the only thread that can change this
const tail = self.tail.raw; // we're the only thread that can change this

const size = tail -% head;
assert(size <= capacity);
Expand All @@ -755,7 +755,7 @@ const Node = struct {
const node = queue.pop(&consumer) orelse blk: {
if (pushed == 0) return null;
pushed -= 1;
break :blk self.array[(tail +% pushed) % capacity].loadUnchecked();
break :blk self.array[(tail +% pushed) % capacity].raw;
};

// Update the array tail with the nodes we pushed to it.
Expand All @@ -769,7 +769,7 @@ const Node = struct {

fn steal(noalias self: *Buffer, noalias buffer: *Buffer) ?Stole {
const head = self.head.load(.Monotonic);
const tail = self.tail.loadUnchecked(); // we're the only thread that can change this
const tail = self.tail.raw; // we're the only thread that can change this

const size = tail -% head;
assert(size <= capacity);
Expand Down Expand Up @@ -805,15 +805,15 @@ const Node = struct {
// - an Acquire barrier to ensure that we only interact with the stolen Nodes after the steal was committed.
// - a Release barrier to ensure that the Nodes are copied above prior to the committing of the steal
// because if they're copied after the steal, the could be getting rewritten by the target's push().
_ = buffer.head.compareAndSwap(
_ = buffer.head.cmpxchgStrong(
buffer_head,
buffer_head +% steal_size,
.AcqRel,
.Monotonic,
) orelse {
// Pop one from the nodes we stole as we'll be returning it
const pushed = steal_size - 1;
const node = self.array[(tail +% pushed) % capacity].loadUnchecked();
const node = self.array[(tail +% pushed) % capacity].raw;

// Update the array tail with the nodes we pushed to it.
// Release barrier to synchronize with Acquire barrier in steal()'s to see the written array Nodes.
Expand Down
2 changes: 1 addition & 1 deletion src/backend/iocp.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1272,7 +1272,7 @@ pub const Operation = union(OperationType) {
},

async_wait: struct {
wakeup: std.atomic.Atomic(bool) = .{ .value = false },
wakeup: std.atomic.Value(bool) = .{ .raw = false },
},

job_object: struct {
Expand Down
2 changes: 1 addition & 1 deletion src/bench/async_pummel_1.zig
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ pub fn run(comptime thread_count: comptime_int) !void {
notifier = try xev.Async.init();
defer notifier.deinit();

var userdata: ?*void = null;
const userdata: ?*void = null;
var c: xev.Completion = undefined;
notifier.wait(&loop, &c, void, userdata, &asyncCallback);

Expand Down
4 changes: 2 additions & 2 deletions src/build/ScdocStep.zig
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ fn make(step: *std.build.Step, progress: *std.Progress.Node) !void {
}

// Find all our man pages which are in our src path ending with ".scd".
var dir = try fs.openIterableDirAbsolute(self.src_path, .{});
var dir = try fs.openDirAbsolute(self.src_path, .{ .iterate = true });
defer dir.close();

var iter = dir.iterate();
Expand Down Expand Up @@ -138,7 +138,7 @@ const InstallStep = struct {
}

// Find all our man pages which are in our src path ending with ".scd".
var dir = try fs.openIterableDirAbsolute(path, .{});
var dir = try fs.openDirAbsolute(path, .{ .iterate = true });
defer dir.close();
var iter = dir.iterate();
while (try iter.next()) |*entry| {
Expand Down

0 comments on commit 2ee417a

Please sign in to comment.