Added spinlock #2

Merged
zack merged 1 commits from spinlock into master 2026-03-25 23:49:09 +00:00

View File

@@ -2,6 +2,10 @@ package levsync
import "base:intrinsics" import "base:intrinsics"
// ---------------------------------------------------------------------------------------------------------------------
// ----- Atomic Float Ops ------------------------
// ---------------------------------------------------------------------------------------------------------------------
@(private) @(private)
Flop :: enum { Flop :: enum {
Add, Add,
@@ -96,6 +100,30 @@ atomic_div_float :: #force_inline proc "contextless" (
return atomic_float_op_cas(dst, val, .Divide, ORDER) return atomic_float_op_cas(dst, val, .Divide, ORDER)
} }
// ---------------------------------------------------------------------------------------------------------------------
// ----- Spinlock ------------------------
// ---------------------------------------------------------------------------------------------------------------------
// Spinlock for when you need manual control over trying to acquire. If you always loop try_lock then
// Odin `core:sync -> Ticket_Mutex' is probably a better fit.
Spinlock :: distinct bool
// Returns true if lock successfuly acquired, false otherwise
spinlock_try_lock :: #force_inline proc "contextless" (lock: ^Spinlock) -> bool {
was_locked, lock_acquired := intrinsics.atomic_compare_exchange_weak_explicit(
lock,
false,
true,
.Acq_Rel,
.Relaxed,
)
return lock_acquired
}
spinlock_unlock :: #force_inline proc "contextless" (lock: ^Spinlock) {
intrinsics.atomic_store_explicit(lock, false, .Release)
}
// --------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------------------
// ----- Tests ------------------------ // ----- Tests ------------------------
// --------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------------------
@@ -389,3 +417,109 @@ test_atomic_release_acquire_publish_visibility :: proc(t: ^testing.T) {
testing.expectf(t, rd.data_valid, "Reader %d saw flag but data was not visible (memory ordering bug)", i) testing.expectf(t, rd.data_valid, "Reader %d saw flag but data was not visible (memory ordering bug)", i)
} }
} }
@(test)
test_spinlock_try_lock_mutual_exclusion :: proc(t: ^testing.T) {
// Stress test for spinlock_try_lock: N threads spin-acquire the lock and
// perform a deliberate non-atomic read-modify-write on shared data.
//
// If mutual exclusion holds:
// - `counter` ends at exactly NUM_THREADS * ITERATIONS_PER_THREAD
// - `concurrent_holders` never exceeds 1
//
// A multi-step RMW (read → relax → write) widens the critical section so
// any failure to exclude is virtually guaranteed to corrupt the counter.
NUM_THREADS :: 8
ITERATIONS_PER_THREAD :: 50_000
Shared :: struct {
lock: Spinlock,
// Padding to avoid false sharing between the lock and the data it protects.
_pad0: [64]u8,
counter: int,
// Tracks how many threads believe they hold the lock simultaneously.
// Must never exceed 1.
concurrent_holders: int,
max_holders: int,
_pad1: [64]u8,
}
Thread_Data :: struct {
shared: ^Shared,
barrier: ^sync.Barrier,
}
shared: Shared
barrier: sync.Barrier
sync.barrier_init(&barrier, NUM_THREADS)
thread_proc :: proc(th: ^thread.Thread) {
ctx := cast(^Thread_Data)th.data
s := ctx.shared
// All threads rendezvous here for maximum contention.
sync.barrier_wait(ctx.barrier)
for _ in 0 ..< ITERATIONS_PER_THREAD {
// Spin on try_lock until we acquire it.
for !spinlock_try_lock(&s.lock) {
intrinsics.cpu_relax()
}
// --- critical section start ---
// Atomically bump the holder count so we can detect overlapping holders.
holders := intrinsics.atomic_add_explicit(&s.concurrent_holders, 1, .Relaxed)
// Track the maximum we ever observed (relaxed is fine, this is
// purely diagnostic and protected by the spinlock for writes).
if holders + 1 > s.max_holders {
s.max_holders = holders + 1
}
// Non-atomic RMW: read, spin a tiny bit, then write.
// This deliberately creates a wide window where a second holder
// would cause a lost update.
val := s.counter
intrinsics.cpu_relax()
intrinsics.cpu_relax()
s.counter = val + 1
intrinsics.atomic_sub_explicit(&s.concurrent_holders, 1, .Relaxed)
// --- critical section end ---
spinlock_unlock(&s.lock)
}
}
td := Thread_Data{&shared, &barrier}
threads: [NUM_THREADS]^thread.Thread
for &th in threads {
th = thread.create(thread_proc)
th.data = &td
}
for th in threads {
thread.start(th)
}
for th in threads {
thread.join(th)
thread.destroy(th)
}
expected := NUM_THREADS * ITERATIONS_PER_THREAD
testing.expectf(
t,
shared.counter == expected,
"Counter mismatch: got %d, expected %d (mutual exclusion violated — lost updates)",
shared.counter,
expected,
)
testing.expectf(
t,
shared.max_holders == 1,
"Max concurrent lock holders was %d (expected 1 — lock was held by multiple threads simultaneously)",
shared.max_holders,
)
}