In the beginning...

This commit is contained in:
Zachary Levy
2026-03-08 19:00:41 -07:00
commit f10bf7e3c3
21 changed files with 7536 additions and 0 deletions

7
.gitignore vendored Normal file
View File

@@ -0,0 +1,7 @@
# Executables
*.bin
*.exe
out
# Debug
/debug

12
.zed/debug.json Normal file
View File

@@ -0,0 +1,12 @@
[
{
"label": "Debug",
"program": "out/debug/debug",
"adapter": "CodeLLDB",
"request": "launch",
"build": {
"command": "odin build debug -debug -out=out/debug/debug"
},
"cwd": "$ZED_WORKTREE_ROOT"
}
]

41
.zed/tasks.json Normal file
View File

@@ -0,0 +1,41 @@
[
// ---------------------------------------------------------------------------------------------------------------------
// ----- Odin Tests ------------------------
// ---------------------------------------------------------------------------------------------------------------------
{
"label": "Test many_bits",
"command": "odin test many_bits -out=out/debug/test_many_bits",
"cwd": "$ZED_WORKTREE_ROOT"
},
{
"label": "Test ring",
"command": "odin test ring -out=out/debug/test_ring",
"cwd": "$ZED_WORKTREE_ROOT"
},
{
"label": "Test levsort",
"command": "odin test levsort -out=out/debug/test_levsort",
"cwd": "$ZED_WORKTREE_ROOT"
},
{
"label": "Test levsync",
"command": "odin test levsync -out=out/debug/test_levsync",
"cwd": "$ZED_WORKTREE_ROOT"
},
// ---------------------------------------------------------------------------------------------------------------------
// ----- LMDB Examples ------------------------
// ---------------------------------------------------------------------------------------------------------------------
{
"label": "Run lmdb example",
"command": "odin run vendor/lmdb/examples -debug -out=out/debug/lmdb-examples",
"cwd": "$ZED_WORKTREE_ROOT"
},
// ---------------------------------------------------------------------------------------------------------------------
// ----- Other ------------------------
// ---------------------------------------------------------------------------------------------------------------------
{
"label": "Run debug",
"command": "odin run debug -debug -out=out/debug/debug",
"cwd": "$ZED_WORKTREE_ROOT"
}
]

3
README.md Normal file
View File

@@ -0,0 +1,3 @@
# LevLib
Narya + BFPOWER unified Odin library collection.

63
basic/basic.odin Normal file
View File

@@ -0,0 +1,63 @@
package basic
import "base:runtime"
import "core:prof/spall"
//----- Spall ----------------------------------
SPALL_TRACE :: #config(SPALL_TRACE, false)
spall_ctx: spall.Context
@(thread_local)
spall_buffer: spall.Buffer
//----- Compile globals ----------------------------------
ODIN_BOUNDS_CHECK :: !ODIN_NO_BOUNDS_CHECK
INT_NUM_BITS :: size_of(int) * 8
//----- Array ----------------------------------
// append unless array doesn't room, then panic
append_elem_capped :: #force_inline proc(
array: ^$T/[dynamic]$E,
arg: E,
loc := #caller_location,
) -> (
n: int,
err: runtime.Allocator_Error,
) #optional_allocator_error {
when ODIN_BOUNDS_CHECK {
if len(array) == cap(array) {
panic("Array would have to expand to accomodate append.")
} else {
return append_elem(array, arg, loc)
}
} else {
return append_elem(array, arg, loc)
}
}
append_soa_elem_capped :: proc(
array: ^$T/#soa[dynamic]$E,
arg: E,
loc := #caller_location,
) -> (
n: int,
err: runtime.Allocator_Error,
) #optional_allocator_error {
when ODIN_BOUNDS_CHECK {
if len(array) == cap(array) {
panic("Array would have to expand to accomodate append.")
} else {
return append_soa_elem(array, arg, loc)
}
} else {
return append_soa_elem(array, arg, loc)
}
}
append_capped :: proc {
append_elem_capped,
append_soa_elem_capped,
}

2439
levsort/levsort.odin Normal file

File diff suppressed because it is too large Load Diff

391
levsync/levsync.odin Normal file
View File

@@ -0,0 +1,391 @@
package levsync
import "base:intrinsics"
@(private)
Flop :: enum {
Add,
Subtract,
Multiply,
Divide,
}
// Returns the value at `dst` that was atomically replaced by the result of the operation.
@(private)
atomic_float_op_cas :: #force_inline proc "contextless" (
dst: ^$FLOAT,
val: FLOAT,
$OP: Flop,
$ORDER: intrinsics.Atomic_Memory_Order,
) -> FLOAT where intrinsics.type_is_float(FLOAT) {
when FLOAT == f16 {
dst_i := cast(^u16)(dst)
} else when FLOAT == f32 {
dst_i := cast(^u32)(dst)
} else when FLOAT == f64 {
dst_i := cast(^u64)(dst)
} else {
#panic("atomic_float_op only supports f16, f32, and f64")
}
for {
old_f := intrinsics.atomic_load_explicit(dst, .Relaxed)
when OP == .Add {
new_f := old_f + val
} else when OP == .Subtract {
new_f := old_f - val
} else when OP == .Multiply {
new_f := old_f * val
} else when OP == .Divide {
new_f := old_f / val
} else {
#panic("Flop support not yet added for operation. This should never happen.")
}
when FLOAT == f16 {
old_i := transmute(u16)old_f
new_i := transmute(u16)new_f
} else when FLOAT == f32 {
old_i := transmute(u32)old_f
new_i := transmute(u32)new_f
} else when FLOAT == f64 {
old_i := transmute(u64)old_f
new_i := transmute(u64)new_f
}
// Setting order of compare exchange success alone guarentees overall order of the flop.
_, ok := intrinsics.atomic_compare_exchange_weak_explicit(dst_i, old_i, new_i, ORDER, .Relaxed)
if ok do return old_f
}
}
// Returns the value at `dst` that was atomically replaced by the result of the operation.
atomic_add_float :: #force_inline proc "contextless" (
dst: ^$FLOAT,
val: FLOAT,
$ORDER: intrinsics.Atomic_Memory_Order,
) -> FLOAT where intrinsics.type_is_float(FLOAT) {
return atomic_float_op_cas(dst, val, .Add, ORDER)
}
// Returns the value at `dst` that was atomically replaced by the result of the operation.
atomic_sub_float :: #force_inline proc "contextless" (
dst: ^$FLOAT,
val: FLOAT,
$ORDER: intrinsics.Atomic_Memory_Order,
) -> FLOAT where intrinsics.type_is_float(FLOAT) {
return atomic_float_op_cas(dst, val, .Subtract, ORDER)
}
// Returns the value at `dst` that was atomically replaced by the result of the operation.
atomic_mul_float :: #force_inline proc "contextless" (
dst: ^$FLOAT,
val: FLOAT,
$ORDER: intrinsics.Atomic_Memory_Order,
) -> FLOAT where intrinsics.type_is_float(FLOAT) {
return atomic_float_op_cas(dst, val, .Multiply, ORDER)
}
// Returns the value at `dst` that was atomically replaced by the result of the operation.
atomic_div_float :: #force_inline proc "contextless" (
dst: ^$FLOAT,
val: FLOAT,
$ORDER: intrinsics.Atomic_Memory_Order,
) -> FLOAT where intrinsics.type_is_float(FLOAT) {
return atomic_float_op_cas(dst, val, .Divide, ORDER)
}
// ---------------------------------------------------------------------------------------------------------------------
// ----- Tests ------------------------
// ---------------------------------------------------------------------------------------------------------------------
import "core:sync"
import "core:testing"
import "core:thread"
@(test)
test_concurrent_atomic_add_no_lost_updates :: proc(t: ^testing.T) {
// Multiple threads will each add 1.0 this many times.
// If any updates are lost due to race conditions, the final sum will be wrong.
NUM_THREADS :: 8
ITERATIONS_PER_THREAD :: 10_000
shared_value: f64 = 0.0
barrier: sync.Barrier
sync.barrier_init(&barrier, NUM_THREADS)
Thread_Data :: struct {
value: ^f64,
barrier: ^sync.Barrier,
}
thread_proc :: proc(th: ^thread.Thread) {
ctx := cast(^Thread_Data)th.data
// Wait for all threads to be ready before starting
sync.barrier_wait(ctx.barrier)
for _ in 0 ..< ITERATIONS_PER_THREAD {
atomic_add_float(ctx.value, 1.0, .Relaxed)
}
}
thread_data := Thread_Data{&shared_value, &barrier}
threads: [NUM_THREADS]^thread.Thread
for &th in threads {
th = thread.create(thread_proc)
th.data = &thread_data
}
for th in threads {
thread.start(th)
}
for th in threads {
thread.join(th)
thread.destroy(th)
}
expected := f64(NUM_THREADS * ITERATIONS_PER_THREAD)
testing.expect_value(t, shared_value, expected)
}
@(test)
test_concurrent_atomic_sub_no_lost_updates :: proc(t: ^testing.T) {
// Start with a known value, multiple threads subtract.
// If any updates are lost due to race conditions, the final result will be wrong.
NUM_THREADS :: 8
ITERATIONS_PER_THREAD :: 10_000
shared_value: f64 = f64(NUM_THREADS * ITERATIONS_PER_THREAD)
barrier: sync.Barrier
sync.barrier_init(&barrier, NUM_THREADS)
Thread_Data :: struct {
value: ^f64,
barrier: ^sync.Barrier,
}
thread_proc :: proc(th: ^thread.Thread) {
ctx := cast(^Thread_Data)th.data
// Wait for all threads to be ready before starting
sync.barrier_wait(ctx.barrier)
for _ in 0 ..< ITERATIONS_PER_THREAD {
atomic_sub_float(ctx.value, 1.0, .Relaxed)
}
}
thread_data := Thread_Data{&shared_value, &barrier}
threads: [NUM_THREADS]^thread.Thread
for &th in threads {
th = thread.create(thread_proc)
th.data = &thread_data
}
for th in threads {
thread.start(th)
}
for th in threads {
thread.join(th)
thread.destroy(th)
}
testing.expect_value(t, shared_value, 0.0)
}
@(test)
test_concurrent_atomic_mul_div_round_trip :: proc(t: ^testing.T) {
// Each thread multiplies by 2.0 then divides by 2.0.
// Since these are inverses, the final value should equal the starting value
// regardless of how operations interleave.
NUM_THREADS :: 8
ITERATIONS_PER_THREAD :: 10_000
shared_value: f64 = 1000.0 // Start with a value that won't underflow/overflow
barrier: sync.Barrier
sync.barrier_init(&barrier, NUM_THREADS)
Thread_Data :: struct {
value: ^f64,
barrier: ^sync.Barrier,
}
thread_proc :: proc(th: ^thread.Thread) {
ctx := cast(^Thread_Data)th.data
// Wait for all threads to be ready before starting
sync.barrier_wait(ctx.barrier)
for _ in 0 ..< ITERATIONS_PER_THREAD {
atomic_mul_float(ctx.value, 2.0, .Relaxed)
atomic_div_float(ctx.value, 2.0, .Relaxed)
}
}
thread_data := Thread_Data{&shared_value, &barrier}
threads: [NUM_THREADS]^thread.Thread
for &th in threads {
th = thread.create(thread_proc)
th.data = &thread_data
}
for th in threads {
thread.start(th)
}
for th in threads {
thread.join(th)
thread.destroy(th)
}
testing.expect_value(t, shared_value, 1000.0)
}
@(test)
test_atomic_add_with_f32 :: proc(t: ^testing.T) {
// Verify the f32 type dispatch works correctly under contention.
// Same approach as the f64 add test but with f32.
NUM_THREADS :: 8
ITERATIONS_PER_THREAD :: 10_000
shared_value: f32 = 0.0
barrier: sync.Barrier
sync.barrier_init(&barrier, NUM_THREADS)
Thread_Data :: struct {
value: ^f32,
barrier: ^sync.Barrier,
}
thread_proc :: proc(th: ^thread.Thread) {
ctx := cast(^Thread_Data)th.data
// Wait for all threads to be ready before starting
sync.barrier_wait(ctx.barrier)
for _ in 0 ..< ITERATIONS_PER_THREAD {
atomic_add_float(ctx.value, 1.0, .Relaxed)
}
}
thread_data := Thread_Data{&shared_value, &barrier}
threads: [NUM_THREADS]^thread.Thread
for &th in threads {
th = thread.create(thread_proc)
th.data = &thread_data
}
for th in threads {
thread.start(th)
}
for th in threads {
thread.join(th)
thread.destroy(th)
}
expected := f32(NUM_THREADS * ITERATIONS_PER_THREAD)
testing.expect_value(t, shared_value, expected)
}
@(test)
test_atomic_release_acquire_publish_visibility :: proc(t: ^testing.T) {
// Tests that the memory order passed to atomic_float_op's CAS success condition
// provides full ordering guarantees for the entire float operation.
//
// Both sides use atomic_add_float (not raw intrinsics) to verify:
// - Release on CAS success publishes prior non-atomic writes
// - Acquire on CAS success makes those writes visible to the reader
//
// NOTE: This test may pass even with Relaxed ordering on x86 due to its strong memory model.
// On ARM or other weak-memory architectures, using Relaxed here would likely cause failures.
NUM_READERS :: 4
Shared_State :: struct {
flag: f64,
// Padding to avoid false sharing between flag and data
_padding: [64]u8,
published_data: [4]int,
}
shared: Shared_State
barrier: sync.Barrier
sync.barrier_init(&barrier, NUM_READERS + 1) // +1 for writer
Reader_Data :: struct {
shared: ^Shared_State,
barrier: ^sync.Barrier,
saw_data: bool,
data_valid: bool,
}
Writer_Data :: struct {
shared: ^Shared_State,
barrier: ^sync.Barrier,
}
writer_proc :: proc(th: ^thread.Thread) {
ctx := cast(^Writer_Data)th.data
sync.barrier_wait(ctx.barrier)
// Write data that readers will verify
ctx.shared.published_data[0] = 42
ctx.shared.published_data[1] = 43
ctx.shared.published_data[2] = 44
ctx.shared.published_data[3] = 45
// Release via the float op: CAS success ordering must publish all writes above
atomic_add_float(&ctx.shared.flag, 1.0, .Release)
}
reader_proc :: proc(th: ^thread.Thread) {
ctx := cast(^Reader_Data)th.data
sync.barrier_wait(ctx.barrier)
// Spin using the float op with Acquire ordering.
// Adding 0.0 is a no-op on the value but exercises the full CAS loop.
// When the CAS succeeds with Acquire, all writes before the writer's Release must be visible.
for {
old := atomic_add_float(&ctx.shared.flag, 0.0, .Acquire)
if old > 0.0 do break
intrinsics.cpu_relax()
}
// If the CAS success ordering provides full guarantees, we MUST see all published data
ctx.saw_data = true
d0 := ctx.shared.published_data[0]
d1 := ctx.shared.published_data[1]
d2 := ctx.shared.published_data[2]
d3 := ctx.shared.published_data[3]
ctx.data_valid = (d0 == 42 && d1 == 43 && d2 == 44 && d3 == 45)
}
writer_data := Writer_Data{&shared, &barrier}
reader_data: [NUM_READERS]Reader_Data
for &rd in reader_data {
rd = Reader_Data{&shared, &barrier, false, false}
}
writer_thread := thread.create(writer_proc)
writer_thread.data = &writer_data
reader_threads: [NUM_READERS]^thread.Thread
for &th, i in reader_threads {
th = thread.create(reader_proc)
th.data = &reader_data[i]
}
thread.start(writer_thread)
for th in reader_threads {
thread.start(th)
}
thread.join(writer_thread)
thread.destroy(writer_thread)
for th in reader_threads {
thread.join(th)
thread.destroy(th)
}
// Verify all readers saw the data correctly
for rd, i in reader_data {
testing.expectf(t, rd.saw_data, "Reader %d didn't observe the flag", i)
testing.expectf(t, rd.data_valid, "Reader %d saw flag but data was not visible (memory ordering bug)", i)
}
}

986
many_bits/many_bits.odin Normal file
View File

@@ -0,0 +1,986 @@
package many_bits
import "base:builtin"
import "base:intrinsics"
import "core:fmt"
import "core:slice"
@(private)
ODIN_BOUNDS_CHECK :: !ODIN_NO_BOUNDS_CHECK
// Number of bits in system uint
UINT_NUM_BITS :: size_of(uint) * 8
UINT_MAX: uint : 1 << UINT_NUM_BITS - 1
// Power to which 2 is raised to get the size of uint in bits
// For bitshift division which gives index of integer in int_bits_array
INDEX_SHIFT :: uint(intrinsics.count_trailing_zeros(UINT_NUM_BITS))
// Value to & overall index by to get bit position
BIT_POS_MASK :: UINT_NUM_BITS - 1
Int_Bits :: bit_set[0 ..< UINT_NUM_BITS;uint]
// Use `core:container.Bit_Array` if dynamic length is needed.
// This has a more specific purpose.
Bits :: struct {
int_array: []Int_Bits,
length: int, // Total number of bits being stored
}
delete :: proc(using bits: Bits, allocator := context.allocator) {
delete_slice(int_array, allocator)
}
make :: proc(#any_int length: int, allocator := context.allocator) -> Bits {
return Bits {
int_array = make_slice([]Int_Bits, ((length - 1) >> INDEX_SHIFT) + 1, allocator),
length = length,
}
}
// Sets all bits to 0 (false)
zero :: #force_inline proc(bits: Bits) {
slice.zero(bits.int_array)
}
set :: #force_inline proc(bits: Bits, #any_int index: int, set_to: bool) {
when ODIN_BOUNDS_CHECK {
if index >= bits.length {
panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length))
}
}
if set_to == true {
bits.int_array[index >> INDEX_SHIFT] += {index & BIT_POS_MASK}
} else {
bits.int_array[index >> INDEX_SHIFT] -= {index & BIT_POS_MASK}
}
}
set_true :: #force_inline proc(bits: Bits, #any_int index: int) {
when ODIN_BOUNDS_CHECK {
if index >= bits.length {
panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length))
}
}
bits.int_array[index >> INDEX_SHIFT] += {index & BIT_POS_MASK}
}
set_one :: set_true
set_false :: #force_inline proc(bits: Bits, #any_int index: int) {
when ODIN_BOUNDS_CHECK {
if index >= bits.length {
panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length))
}
}
bits.int_array[index >> INDEX_SHIFT] -= {index & BIT_POS_MASK}
}
set_zero :: set_false
get :: #force_inline proc(bits: Bits, #any_int index: int) -> bool {
when ODIN_BOUNDS_CHECK {
if index >= bits.length {
panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length))
}
}
return (index & BIT_POS_MASK) in bits.int_array[index >> INDEX_SHIFT]
}
// Returns true if all bits in range [start, end) are set [start is inclusive, end is exclusive)
range_true :: proc(bits: Bits, #any_int start, end: int) -> bool {
when ODIN_BOUNDS_CHECK {
if start < 0 {
panic(fmt.tprintf("Start %i is negative.", start))
}
if start > end {
panic(fmt.tprintf("Start %i is greater than end %i.", start, end))
}
if end > bits.length {
panic(fmt.tprintf("End %i out of bounds for length %i.", end, bits.length))
}
}
// Empty range is vacuously true
if start == end do return true
start_u := uint(start)
end_u := uint(end)
start_word := start_u >> INDEX_SHIFT
end_word := (end_u - 1) >> INDEX_SHIFT
start_bit := start_u & BIT_POS_MASK
end_bit := end_u & BIT_POS_MASK // end is exclusive; 0 means "to end of word"
// Range is within a single word
if start_word == end_word {
word := transmute(uint)bits.int_array[start_word]
low_mask: uint = (uint(1) << start_bit) - 1
high_mask: uint = ((uint(1) << end_bit) - 1) | (UINT_MAX * uint(end_bit == 0))
mask := high_mask & ~low_mask
return word & mask == mask
}
// Range spans multiple words
// First word: [start_bit, UINT_NUM_BITS)
if start_bit != 0 {
first_word := transmute(uint)bits.int_array[start_word]
start_mask: uint = ~((uint(1) << start_bit) - 1)
if first_word & start_mask != start_mask {
return false
}
start_word += 1
}
// Last word: [0, end_bit)
// If end_bit == 0, we need the whole last word, so include it in the middle scan.
if end_bit != 0 {
last_word := transmute(uint)bits.int_array[end_word]
end_mask: uint = (uint(1) << end_bit) - 1
if last_word & end_mask != end_mask {
return false
}
} else {
end_word += 1
}
// Middle words: all bits must be set
for i := start_word; i < end_word; i += 1 {
if transmute(uint)bits.int_array[i] != UINT_MAX {
return false
}
}
return true
}
range_ones :: range_true
all_true :: proc(bits: Bits) -> bool {
// Empty bit array is vacuously true
if bits.length == 0 do return true
bit_index := uint(bits.length - 1)
int_index := bit_index >> INDEX_SHIFT
// The last int needs special treatment because we only want to check part of it
last_bit_pos := bit_index & BIT_POS_MASK
last_bit_mask: uint = (1 << (last_bit_pos + 1)) - 1
int_val := transmute(uint)bits.int_array[int_index]
if int_val & last_bit_mask != last_bit_mask {
return false
}
if int_index == 0 { // If there was only 1 int in the array
return true
}
int_index -= 1
// All other ints should be all 1s
for {
int_val := transmute(uint)bits.int_array[int_index]
if int_val != UINT_MAX {
return false
}
if int_index == 0 {
return true
}
int_index -= 1
}
}
all_ones :: all_true
// Returns ok = false if there are no 1 bits in the entire array.
nearest_true :: proc(bits: Bits, index: int) -> (nearest: int, ok: bool) {
when ODIN_BOUNDS_CHECK {
if index >= bits.length {
panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length))
}
}
bit_index := uint(index)
word_index := bit_index >> INDEX_SHIFT
bit_pos := bit_index & BIT_POS_MASK
word_index_int := int(word_index)
total_words := len(bits.int_array)
max_left := word_index_int
max_right := total_words - 1 - word_index_int
max_offset := max(max_left, max_right)
word_val := transmute(uint)bits.int_array[word_index_int]
if word_val != 0 {
if (word_val & (uint(1) << bit_pos)) != 0 do return index, true
left_mask := (uint(1) << bit_pos) | ((uint(1) << bit_pos) - 1)
left_bits_value := word_val & left_mask
right_mask := ~((uint(1) << bit_pos) - 1)
right_bits_value := word_val & right_mask
nearest_left := 0
left_found := false
if left_bits_value != 0 {
left_offset_from_top := intrinsics.count_leading_zeros(left_bits_value)
left_bit := (UINT_NUM_BITS - 1) - left_offset_from_top
nearest_left = (word_index_int << INDEX_SHIFT) + int(left_bit)
left_found = true
}
nearest_right := 0
right_found := false
if right_bits_value != 0 {
right_offset := intrinsics.count_trailing_zeros(right_bits_value)
nearest_right = (word_index_int << INDEX_SHIFT) + int(right_offset)
right_found = true
}
if left_found && right_found {
left_dist := index - nearest_left
right_dist := nearest_right - index
if left_dist <= right_dist {
return nearest_left, true
} else {
return nearest_right, true
}
} else if left_found {
return nearest_left, true
} else if right_found {
return nearest_right, true
}
}
for offset := 1; offset <= max_offset; offset += 1 {
right_found := false
left_found := false
nearest_right := 0
nearest_left := 0
right_dist := 0
left_dist := 0
right_index := word_index_int + offset
if right_index < total_words {
word_val := transmute(uint)bits.int_array[right_index]
if word_val != 0 {
right_offset := intrinsics.count_trailing_zeros(word_val)
nearest_right = (right_index << INDEX_SHIFT) + int(right_offset)
right_found = true
right_dist = nearest_right - index
}
}
left_index := word_index_int - offset
if left_index >= 0 {
word_val := transmute(uint)bits.int_array[left_index]
if word_val != 0 {
left_offset_from_top := intrinsics.count_leading_zeros(word_val)
left_bit := (UINT_NUM_BITS - 1) - left_offset_from_top
nearest_left = (left_index << INDEX_SHIFT) + int(left_bit)
left_found = true
left_dist = index - nearest_left
}
}
if left_found && right_found {
if left_dist <= right_dist {
return nearest_left, true
} else {
return nearest_right, true
}
} else if left_found {
return nearest_left, true
} else if right_found {
return nearest_right, true
}
}
return
}
nearest_one :: nearest_true
// Returns ok = false if there are no 0 bits in the entire array.
nearest_false :: proc(bits: Bits, index: int) -> (nearest: int, ok: bool) {
when ODIN_BOUNDS_CHECK {
if index >= bits.length {
panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length))
}
}
bit_index := uint(index)
word_index := bit_index >> INDEX_SHIFT
bit_pos := bit_index & BIT_POS_MASK
word_index_int := int(word_index)
total_words := len(bits.int_array)
max_left := word_index_int
max_right := total_words - 1 - word_index_int
max_offset := max(max_left, max_right)
last_bit_index := uint(bits.length - 1)
last_word_index := int(last_bit_index >> INDEX_SHIFT)
last_bit_pos := last_bit_index & BIT_POS_MASK
valid_bits_mask: uint
if last_bit_pos == UINT_NUM_BITS - 1 {
valid_bits_mask = UINT_MAX
} else {
valid_bits_mask = (uint(1) << (last_bit_pos + 1)) - 1
}
word_val := transmute(uint)bits.int_array[word_index_int]
word_val_search := ~word_val
if word_index_int == last_word_index {
word_val_search &= valid_bits_mask
}
if word_val_search != 0 {
if (word_val & (uint(1) << bit_pos)) == 0 do return index, true
left_mask := (uint(1) << bit_pos) | ((uint(1) << bit_pos) - 1)
left_bits_value := word_val_search & left_mask
right_mask := ~((uint(1) << bit_pos) - 1)
right_bits_value := word_val_search & right_mask
nearest_left := 0
left_found := false
if left_bits_value != 0 {
left_offset_from_top := intrinsics.count_leading_zeros(left_bits_value)
left_bit := (UINT_NUM_BITS - 1) - left_offset_from_top
nearest_left = (word_index_int << INDEX_SHIFT) + int(left_bit)
left_found = true
}
nearest_right := 0
right_found := false
if right_bits_value != 0 {
right_offset := intrinsics.count_trailing_zeros(right_bits_value)
nearest_right = (word_index_int << INDEX_SHIFT) + int(right_offset)
right_found = true
}
if left_found && right_found {
left_dist := index - nearest_left
right_dist := nearest_right - index
if left_dist <= right_dist {
return nearest_left, true
} else {
return nearest_right, true
}
} else if left_found {
return nearest_left, true
} else if right_found {
return nearest_right, true
}
}
for offset := 1; offset <= max_offset; offset += 1 {
right_found := false
left_found := false
nearest_right := 0
nearest_left := 0
right_dist := 0
left_dist := 0
right_index := word_index_int + offset
if right_index < total_words {
word_val := transmute(uint)bits.int_array[right_index]
word_val_search := ~word_val
if right_index == last_word_index {
word_val_search &= valid_bits_mask
}
if word_val_search != 0 {
right_offset := intrinsics.count_trailing_zeros(word_val_search)
nearest_right = (right_index << INDEX_SHIFT) + int(right_offset)
right_found = true
right_dist = nearest_right - index
}
}
left_index := word_index_int - offset
if left_index >= 0 {
word_val := transmute(uint)bits.int_array[left_index]
word_val_search := ~word_val
if word_val_search != 0 {
left_offset_from_top := intrinsics.count_leading_zeros(word_val_search)
left_bit := (UINT_NUM_BITS - 1) - left_offset_from_top
nearest_left = (left_index << INDEX_SHIFT) + int(left_bit)
left_found = true
left_dist = index - nearest_left
}
}
if left_found && right_found {
if left_dist <= right_dist {
return nearest_left, true
} else {
return nearest_right, true
}
} else if left_found {
return nearest_left, true
} else if right_found {
return nearest_right, true
}
}
return
}
nearest_zero :: nearest_false
Iterator :: struct {
bits: ^Bits,
word_idx: int,
bit_idx: uint,
}
iterator :: #force_inline proc(bits: ^Bits) -> Iterator {
return {bits = bits}
}
iterate :: proc(iterator: ^Iterator) -> (is_true: bool, idx: int, cond: bool) {
idx = iterator.word_idx * UINT_NUM_BITS + int(iterator.bit_idx)
if idx >= iterator.bits.length {
return false, 0, false
}
word := transmute(uint)iterator.bits.int_array[iterator.word_idx]
is_true = (word >> iterator.bit_idx & 1) == 1
iterator.bit_idx += 1
if iterator.bit_idx >= UINT_NUM_BITS {
iterator.bit_idx = 0
iterator.word_idx += 1
}
return is_true, idx, true
}
@(private = "file")
_iterate_kind :: #force_inline proc(iterator: ^Iterator, $ITERATE_ZEROS: bool) -> (idx: int, cond: bool) {
for iterator.word_idx < len(iterator.bits.int_array) {
word := transmute(uint)iterator.bits.int_array[iterator.word_idx]
when ITERATE_ZEROS do word = ~word
word >>= iterator.bit_idx // Mask out already-processed bits
if word != 0 {
// Found a bit - count_trailing_zeros gives position in shifted word
iterator.bit_idx += uint(intrinsics.count_trailing_zeros(word))
idx = iterator.word_idx * UINT_NUM_BITS + int(iterator.bit_idx)
// Advance for next call
iterator.bit_idx += 1
if iterator.bit_idx >= UINT_NUM_BITS {
iterator.bit_idx = 0
iterator.word_idx += 1
}
return idx, idx < iterator.bits.length
}
// Word exhausted, move to next
iterator.word_idx += 1
iterator.bit_idx = 0
}
return 0, false
}
iterate_true :: proc(iterator: ^Iterator) -> (idx: int, cond: bool) {
return _iterate_kind(iterator, ITERATE_ZEROS = false)
}
iterate_ones :: iterate_true
iterate_false :: proc(iterator: ^Iterator) -> (idx: int, cond: bool) {
return _iterate_kind(iterator, ITERATE_ZEROS = true)
}
iterate_zeros :: iterate_false
// ---------------------------------------------------------------------------------------------------------------------
// ----- Tests ------------------------
// ---------------------------------------------------------------------------------------------------------------------
import "core:testing"
@(test)
test_set :: proc(t: ^testing.T) {
bits := make(128)
defer delete(bits)
set(bits, 0, true)
testing.expect_value(t, bits.int_array[0], Int_Bits{0})
set(bits, 3, true)
testing.expect_value(t, bits.int_array[0], Int_Bits{0, 3})
set(bits, 64, true)
testing.expect_value(t, bits.int_array[1], Int_Bits{0})
set(bits, 127, true)
testing.expect_value(t, bits.int_array[1], Int_Bits{0, 63})
set(bits, 127, false)
testing.expect_value(t, bits.int_array[1], Int_Bits{0})
}
@(test)
test_get :: proc(t: ^testing.T) {
bits := make(128)
defer delete(bits)
// Default is false
testing.expect(t, !get(bits, 0))
testing.expect(t, !get(bits, 63))
testing.expect(t, !get(bits, 64))
testing.expect(t, !get(bits, 127))
// Set and verify within first uint
set(bits, 0, true)
testing.expect(t, get(bits, 0))
testing.expect(t, !get(bits, 1))
set(bits, 3, true)
testing.expect(t, get(bits, 3))
testing.expect(t, !get(bits, 2))
testing.expect(t, !get(bits, 4))
// Cross uint boundary
set(bits, 64, true)
testing.expect(t, get(bits, 64))
testing.expect(t, !get(bits, 63))
testing.expect(t, !get(bits, 65))
// Last bit
set(bits, 127, true)
testing.expect(t, get(bits, 127))
// Unset and verify
set(bits, 127, false)
testing.expect(t, !get(bits, 127))
}
@(test)
test_set_true_set_false :: proc(t: ^testing.T) {
bits := make(128)
defer delete(bits)
// set_true within first uint
set_true(bits, 0)
testing.expect_value(t, bits.int_array[0], Int_Bits{0})
testing.expect(t, get(bits, 0))
set_true(bits, 3)
testing.expect_value(t, bits.int_array[0], Int_Bits{0, 3})
testing.expect(t, get(bits, 3))
// set_true across uint boundary
set_true(bits, 64)
testing.expect_value(t, bits.int_array[1], Int_Bits{0})
testing.expect(t, get(bits, 64))
testing.expect(t, !get(bits, 63))
testing.expect(t, !get(bits, 65))
// set_true on last bit
set_true(bits, 127)
testing.expect_value(t, bits.int_array[1], Int_Bits{0, 63})
testing.expect(t, get(bits, 127))
// set_false to clear bits
set_false(bits, 127)
testing.expect_value(t, bits.int_array[1], Int_Bits{0})
testing.expect(t, !get(bits, 127))
set_false(bits, 0)
testing.expect_value(t, bits.int_array[0], Int_Bits{3})
testing.expect(t, !get(bits, 0))
testing.expect(t, get(bits, 3)) // bit 3 still set
// set_false on already-false bit (should be no-op)
set_false(bits, 1)
testing.expect_value(t, bits.int_array[0], Int_Bits{3})
testing.expect(t, !get(bits, 1))
}
@(test)
all_true_test :: proc(t: ^testing.T) {
uint_max := UINT_MAX
all_ones := transmute(Int_Bits)uint_max
bits := make(132)
defer delete(bits)
bits.int_array[0] = all_ones
bits.int_array[1] = all_ones
bits.int_array[2] = {0, 1, 2, 3}
testing.expect(t, all_true(bits))
bits.int_array[2] = {0, 1, 2}
testing.expect(t, !all_true(bits))
bits2 := make(1)
defer delete(bits2)
bits2.int_array[0] = {0}
testing.expect(t, all_true(bits2))
}
@(test)
test_range_true :: proc(t: ^testing.T) {
uint_max := UINT_MAX
all_ones := transmute(Int_Bits)uint_max
bits := make(192)
defer delete(bits)
// Empty range is vacuously true
testing.expect(t, range_true(bits, 0, 0))
testing.expect(t, range_true(bits, 50, 50))
// inverted range should panic under bounds checking; keep this test case out of here
// Single word, partial range
bits.int_array[0] = {3, 4, 5, 6}
testing.expect(t, range_true(bits, 3, 7))
testing.expect(t, !range_true(bits, 2, 7)) // bit 2 not set
testing.expect(t, !range_true(bits, 3, 8)) // bit 7 not set
// Single word, full word
bits.int_array[0] = all_ones
testing.expect(t, range_true(bits, 0, 64))
// Range spanning two words
bits.int_array[0] = all_ones
bits.int_array[1] = {0, 1, 2, 3}
testing.expect(t, range_true(bits, 60, 68)) // bits 60-63 in word 0, bits 0-3 in word 1
testing.expect(t, !range_true(bits, 60, 69)) // bit 68 (4 in word 1) not set
// Range spanning three words with full middle word
bits.int_array[0] = all_ones
bits.int_array[1] = all_ones
bits.int_array[2] = {0, 1, 2, 3}
testing.expect(t, range_true(bits, 60, 132)) // partial first, full middle, partial last
testing.expect(t, !range_true(bits, 60, 133)) // bit 132 (4 in word 2) not set
// Middle word not all set
bits.int_array[1] = all_ones - {32}
testing.expect(t, !range_true(bits, 60, 132))
// Boundary: range ends exactly at word boundary
bits.int_array[0] = all_ones
bits.int_array[1] = all_ones
testing.expect(t, range_true(bits, 32, 128))
// Boundary: range starts exactly at word boundary
bits.int_array[1] = all_ones
bits.int_array[2] = all_ones
testing.expect(t, range_true(bits, 64, 192))
}
@(test)
nearest_true_handles_same_word_and_boundaries :: proc(t: ^testing.T) {
bits := make(128, context.temp_allocator)
set_true(bits, 0)
set_true(bits, 10)
set_true(bits, 20)
set_true(bits, 63)
nearest, ok := nearest_true(bits, 10)
testing.expect(t, ok)
testing.expect_value(t, nearest, 10)
nearest, ok = nearest_true(bits, 12)
testing.expect(t, ok)
testing.expect_value(t, nearest, 10)
nearest, ok = nearest_true(bits, 17)
testing.expect(t, ok)
testing.expect_value(t, nearest, 20)
nearest, ok = nearest_true(bits, 15)
testing.expect(t, ok)
testing.expect_value(t, nearest, 10)
nearest, ok = nearest_true(bits, 0)
testing.expect(t, ok)
testing.expect_value(t, nearest, 0)
nearest, ok = nearest_true(bits, 63)
testing.expect(t, ok)
testing.expect_value(t, nearest, 63)
}
@(test)
nearest_false_handles_same_word_and_boundaries :: proc(t: ^testing.T) {
bits := make(128, context.temp_allocator)
// Start with all bits true, then clear a few to false.
for i := 0; i < bits.length; i += 1 {
set_true(bits, i)
}
set_false(bits, 0)
set_false(bits, 10)
set_false(bits, 20)
set_false(bits, 63)
nearest, ok := nearest_false(bits, 10)
testing.expect(t, ok)
testing.expect_value(t, nearest, 10)
nearest, ok = nearest_false(bits, 12)
testing.expect(t, ok)
testing.expect_value(t, nearest, 10)
nearest, ok = nearest_false(bits, 17)
testing.expect(t, ok)
testing.expect_value(t, nearest, 20)
nearest, ok = nearest_false(bits, 15)
testing.expect(t, ok)
testing.expect_value(t, nearest, 10)
nearest, ok = nearest_false(bits, 0)
testing.expect(t, ok)
testing.expect_value(t, nearest, 0)
nearest, ok = nearest_false(bits, 63)
testing.expect(t, ok)
testing.expect_value(t, nearest, 63)
}
@(test)
nearest_false_scans_across_words_and_returns_false_when_all_true :: proc(t: ^testing.T) {
bits := make(192, context.temp_allocator)
// Start with all bits true, then clear a couple far apart.
for i := 0; i < bits.length; i += 1 {
set_true(bits, i)
}
set_false(bits, 5)
set_false(bits, 130)
nearest, ok := nearest_false(bits, 96)
testing.expect(t, ok)
testing.expect_value(t, nearest, 130)
// Restore the only zero bits so there are no zeros left.
set_true(bits, 5)
set_true(bits, 130)
nearest, ok = nearest_false(bits, 96)
testing.expect(t, !ok)
}
@(test)
nearest_true_scans_across_words_and_returns_false_when_empty :: proc(t: ^testing.T) {
bits := make(192, context.temp_allocator)
set_true(bits, 5)
set_true(bits, 130)
nearest, ok := nearest_true(bits, 96)
testing.expect(t, ok)
testing.expect_value(t, nearest, 130)
zero(bits)
nearest, ok = nearest_true(bits, 96)
testing.expect(t, !ok)
}
@(test)
nearest_false_handles_last_word_partial_length :: proc(t: ^testing.T) {
bits := make(130, context.temp_allocator)
// Start with all bits true, then clear the first and last valid bits.
for i := 0; i < bits.length; i += 1 {
set_true(bits, i)
}
set_false(bits, 0)
set_false(bits, 129)
nearest, ok := nearest_false(bits, 128)
testing.expect(t, ok)
testing.expect_value(t, nearest, 129)
nearest, ok = nearest_false(bits, 127)
testing.expect(t, ok)
testing.expect_value(t, nearest, 129)
}
@(test)
nearest_true_handles_last_word_partial_length :: proc(t: ^testing.T) {
bits := make(130, context.temp_allocator)
set_true(bits, 0)
set_true(bits, 129)
nearest, ok := nearest_true(bits, 128)
testing.expect(t, ok)
testing.expect_value(t, nearest, 129)
nearest, ok = nearest_true(bits, 127)
testing.expect(t, ok)
testing.expect_value(t, nearest, 129)
}
@(test)
iterator_basic_mixed_bits :: proc(t: ^testing.T) {
// Use non-word-aligned length to test partial last word handling
bits := make(100, context.temp_allocator)
// Set specific bits: 0, 3, 64, 99 (last valid index)
set_true(bits, 0)
set_true(bits, 3)
set_true(bits, 64)
set_true(bits, 99)
expected_true_indices := [?]int{0, 3, 64, 99}
// Test iterate - should return all 100 bits with correct set state
{
it := iterator(&bits)
count := 0
for is_set, idx in iterate(&it) {
expected_set := slice.contains(expected_true_indices[:], idx)
testing.expectf(
t,
is_set == expected_set,
"iterate: bit %d expected is_set=%v, got %v",
idx,
expected_set,
is_set,
)
testing.expectf(t, idx == count, "iterate: expected sequential idx=%d, got %d", count, idx)
count += 1
}
testing.expectf(t, count == 100, "iterate: expected 100 iterations, got %d", count)
}
// Test iterate_true - should only return the 4 set bits
{
it := iterator(&bits)
result_indices := builtin.make([dynamic]int, allocator = context.temp_allocator)
for idx in iterate_true(&it) {
append(&result_indices, idx)
}
testing.expectf(
t,
len(result_indices) == 4,
"iterate_true: expected 4 set bits, got %d",
len(result_indices),
)
for expected_idx, i in expected_true_indices {
testing.expectf(
t,
result_indices[i] == expected_idx,
"iterate_true: at position %d expected idx=%d, got %d",
i,
expected_idx,
result_indices[i],
)
}
}
// Test iterate_false - should return all 96 unset bits
{
it := iterator(&bits)
count := 0
for idx in iterate_false(&it) {
testing.expectf(
t,
!slice.contains(expected_true_indices[:], idx),
"iterate_false: returned set bit index %d",
idx,
)
count += 1
}
testing.expectf(t, count == 96, "iterate_false: expected 96 unset bits, got %d", count)
}
}
@(test)
iterator_all_false_bits :: proc(t: ^testing.T) {
// Use non-word-aligned length
bits := make(100, context.temp_allocator)
// All bits default to false, no need to set anything
// Test iterate - should return all 100 bits as false
{
it := iterator(&bits)
count := 0
for is_set, idx in iterate(&it) {
testing.expectf(t, !is_set, "iterate: bit %d expected is_set=false, got true", idx)
testing.expectf(t, idx == count, "iterate: expected sequential idx=%d, got %d", count, idx)
count += 1
}
testing.expectf(t, count == 100, "iterate: expected 100 iterations, got %d", count)
}
// Test iterate_true - should return nothing
{
it := iterator(&bits)
count := 0
for idx in iterate_true(&it) {
testing.expectf(t, false, "iterate_true: unexpectedly returned idx=%d when all bits are false", idx)
count += 1
}
testing.expectf(t, count == 0, "iterate_true: expected 0 iterations, got %d", count)
}
// Test iterate_false - should return all 100 indices
{
it := iterator(&bits)
count := 0
for idx in iterate_false(&it) {
testing.expectf(t, idx == count, "iterate_false: expected sequential idx=%d, got %d", count, idx)
count += 1
}
testing.expectf(t, count == 100, "iterate_false: expected 100 iterations, got %d", count)
}
}
@(test)
iterator_all_true_bits :: proc(t: ^testing.T) {
// Use non-word-aligned length
bits := make(100, context.temp_allocator)
// Set all bits to true
for i := 0; i < bits.length; i += 1 {
set_true(bits, i)
}
// Test iterate - should return all 100 bits as true
{
it := iterator(&bits)
count := 0
for is_set, idx in iterate(&it) {
testing.expectf(t, is_set, "iterate: bit %d expected is_set=true, got false", idx)
testing.expectf(t, idx == count, "iterate: expected sequential idx=%d, got %d", count, idx)
count += 1
}
testing.expectf(t, count == 100, "iterate: expected 100 iterations, got %d", count)
}
// Test iterate_true - should return all 100 indices
{
it := iterator(&bits)
count := 0
for idx in iterate_true(&it) {
testing.expectf(t, idx == count, "iterate_true: expected sequential idx=%d, got %d", count, idx)
count += 1
}
testing.expectf(t, count == 100, "iterate_true: expected 100 iterations, got %d", count)
}
// Test iterate_false - should return nothing
{
it := iterator(&bits)
count := 0
for idx in iterate_false(&it) {
testing.expectf(t, false, "iterate_false: unexpectedly returned idx=%d when all bits are true", idx)
count += 1
}
testing.expectf(t, count == 0, "iterate_false: expected 0 iterations, got %d", count)
}
}

5
odinfmt.json Normal file
View File

@@ -0,0 +1,5 @@
{
"$schema": "https://raw.githubusercontent.com/DanielGavin/ols/master/misc/odinfmt.schema.json",
"character_width": 110,
"tabs_width": 1
}

7
quantity/irradiance.odin Normal file
View File

@@ -0,0 +1,7 @@
package quantity
import "base:intrinsics"
Watts_Per_Square_Meter :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}

134
quantity/pressure.odin Normal file
View File

@@ -0,0 +1,134 @@
package quantity
import "base:intrinsics"
PASCALS_PER_TORR :: 101325.0 / 760.0
KILO_PASCALS_PER_PSI :: 6.894757293168364
//----- Pascals ----------------------------------
Pascals :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
@(private = "file")
pascals_to_kilo_pascals :: #force_inline proc "contextless" (
pascals: Pascals($V),
) -> Kilo_Pascals(V) where intrinsics.type_is_numeric(V) {
return Kilo_Pascals(V){pascals.v / KILO}
}
@(private = "file")
pascals_to_torr :: #force_inline proc "contextless" (
pascals: Pascals($V),
) -> Torr(V) where intrinsics.type_is_float(V) {
return Torr(V){pascals.v / PASCALS_PER_TORR}
}
//----- Kilopascals ----------------------------------
Kilo_Pascals :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
@(private = "file")
kilo_pascals_to_pascals :: #force_inline proc "contextless" (
kilo_pascals: Kilo_Pascals($V),
) -> Pascals(V) where intrinsics.type_is_numeric(V) {
return Pascals(V){kilo_pascals.v * KILO}
}
kilo_pascals_to_psi :: #force_inline proc "contextless" (
kilo_pascals: Kilo_Pascals($V),
) -> Psi(V) where intrinsics.type_is_float(V) {
return Psi(V){kilo_pascals.v / KILO_PASCALS_PER_PSI}
}
//----- Torr ----------------------------------
Torr :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
@(private = "file")
torr_to_pascals :: #force_inline proc "contextless" (
torr: Torr($V),
) -> Pascals(V) where intrinsics.type_is_float(V) {
return Pascals(V){torr.v * PASCALS_PER_TORR}
}
//----- PSI ----------------------------------
Psi :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
psi_to_kilo_pascals :: #force_inline proc "contextless" (
psi: Psi($V),
) -> Kilo_Pascals(V) where intrinsics.type_is_float(V) {
return Kilo_Pascals(V){psi.v * KILO_PASCALS_PER_PSI}
}
// ---------------------------------------------------------------------------------------------------------------------
// ----- Conversion Overloads ------------------------
// ---------------------------------------------------------------------------------------------------------------------
to_pascals :: proc {
kilo_pascals_to_pascals,
torr_to_pascals,
}
to_kilo_pascals :: proc {
pascals_to_kilo_pascals,
psi_to_kilo_pascals,
}
to_torr :: proc {
pascals_to_torr,
}
to_psi :: proc {
kilo_pascals_to_psi,
}
// ---------------------------------------------------------------------------------------------------------------------
// ----- Tests ------------------------
// ---------------------------------------------------------------------------------------------------------------------
import "core:testing"
@(test)
test_pascals_to_kilo_pascals :: proc(t: ^testing.T) {
pascals := Pascals(int){1000}
kilo_pascals := to_kilo_pascals(pascals)
testing.expect_value(t, kilo_pascals, Kilo_Pascals(int){1})
}
@(test)
test_kilo_pascals_to_pascals :: proc(t: ^testing.T) {
kilo_pascals := Kilo_Pascals(int){1}
pascals := to_pascals(kilo_pascals)
testing.expect_value(t, pascals, Pascals(int){1000})
}
@(test)
test_pascals_to_torr :: proc(t: ^testing.T) {
pascals := Pascals(f32){1000}
torr := to_torr(pascals)
testing.expect(t, torr.v > 7.49 && torr.v < 7.51)
}
@(test)
test_torr_to_pascals :: proc(t: ^testing.T) {
torr := Torr(f32){7.5}
pascals := to_pascals(torr)
testing.expect(t, pascals.v > 999.91 && pascals.v < 999.92)
}
@(test)
test_psi_kilo_pascals :: proc(t: ^testing.T) {
psi := Psi(f32){2.5}
kilo_pascals := Kilo_Pascals(f32){17.23689323292091}
testing.expect(t, to_kilo_pascals(psi).v > 17.22 && to_kilo_pascals(psi).v < 17.24)
testing.expect(t, to_psi(kilo_pascals).v > 2.49 && to_psi(kilo_pascals).v < 2.51)
}

12
quantity/quantity.odin Normal file
View File

@@ -0,0 +1,12 @@
package quantity
DECA :: 10;
DECI :: 10;
HECTO :: 100;
CENTI :: 100;
KILO :: 1_000;
MILLI :: 1_000;
MEGA :: 1_000_000;
MICRO :: 1_000_000;
GIGA :: 1_000_000_000;
NANO :: 1_000_000_000;

7
quantity/resistance.odin Normal file
View File

@@ -0,0 +1,7 @@
package quantity
import "base:intrinsics"
Ohms :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}

157
quantity/temperature.odin Normal file
View File

@@ -0,0 +1,157 @@
package quantity
import "base:intrinsics"
// ---------------------------------------------------------------------------------------------------------------------
// ----- Constants ------------------------
// ---------------------------------------------------------------------------------------------------------------------
@(private = "file")
kelvins_celsius_offset :: #force_inline proc "contextless" (
$V: typeid,
) -> V where intrinsics.type_is_numeric(V) {
when intrinsics.type_is_float(V) {
OFFSET :: 273.15
} else {
OFFSET :: 273
}
return OFFSET
}
// ---------------------------------------------------------------------------------------------------------------------
// ----- Types ------------------------
// ---------------------------------------------------------------------------------------------------------------------
//----- Kelvins ----------------------------------
Kelvins :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
@(private = "file")
kelvins_to_celsius :: #force_inline proc "contextless" (
kelvins: Kelvins($V),
) -> Celsius(V) where intrinsics.type_is_numeric(V) {
return Celsius(V){kelvins.v - kelvins_celsius_offset(V)}
}
@(private = "file")
kelvins_to_deci_kelvins :: #force_inline proc "contextless" (
kelvins: Kelvins($V),
) -> Deci_Kelvins(V) where intrinsics.type_is_numeric(V) {
return Deci_Kelvins(V){kelvins.v * DECI}
}
//----- Decikelvins ----------------------------------
Deci_Kelvins :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
@(private = "file")
deci_kelvins_to_kelvins :: #force_inline proc "contextless" (
deci_kelvins: Deci_Kelvins($V),
) -> Kelvins(V) where intrinsics.type_is_numeric(V) {
return Kelvins(V){deci_kelvins.v / DECI}
}
//----- Degrees Celsius ----------------------------------
Celsius :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
@(private = "file")
celsius_to_kelvins :: #force_inline proc "contextless" (
degrees_celsius: Celsius($V),
) -> Kelvins(V) where intrinsics.type_is_numeric(V) {
return Kelvins(V){degrees_celsius.v + kelvins_celsius_offset(V)}
}
@(private = "file")
celsius_to_deci_celsius :: #force_inline proc "contextless" (
degrees_celsius: Celsius($V),
) -> Deci_Celsius(V) where intrinsics.type_is_numeric(V) {
return Deci_Celsius(V){degrees_celsius.v * DECI}
}
//----- Deci Degrees Celsius ----------------------------------
Deci_Celsius :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
@(private = "file")
deci_celsius_to_celsius :: #force_inline proc "contextless" (
deci_degrees_celsius: Deci_Celsius($V),
) -> Celsius(V) where intrinsics.type_is_numeric(V) {
return Celsius(V){deci_degrees_celsius.v / DECI}
}
// ---------------------------------------------------------------------------------------------------------------------
// ----- Conversion Overloads ------------------------
// ---------------------------------------------------------------------------------------------------------------------
to_kelvins :: proc {
deci_kelvins_to_kelvins,
celsius_to_kelvins,
}
to_deci_kelvins :: proc {
kelvins_to_deci_kelvins,
}
to_celsius :: proc {
kelvins_to_celsius,
deci_celsius_to_celsius,
}
to_deci_celsius :: proc {
celsius_to_deci_celsius,
}
// ---------------------------------------------------------------------------------------------------------------------
// ----- Tests ------------------------
// ---------------------------------------------------------------------------------------------------------------------
import "core:testing"
@(test)
test_kelvins_to_celsius :: proc(t: ^testing.T) {
kelvins := Kelvins(f32){273.15}
celsius := to_celsius(kelvins)
testing.expect_value(t, celsius, Celsius(f32){0})
}
@(test)
test_kelvins_to_deci_kelvins :: proc(t: ^testing.T) {
kelvins := Kelvins(int){100}
deci_kelvins := to_deci_kelvins(kelvins)
testing.expect_value(t, deci_kelvins, Deci_Kelvins(int){1000})
}
@(test)
test_deci_kelvins_to_kelvins :: proc(t: ^testing.T) {
deci_kelvins := Deci_Kelvins(int){1000}
kelvins := to_kelvins(deci_kelvins)
testing.expect_value(t, kelvins, Kelvins(int){100})
}
@(test)
test_celsius_to_kelvins :: proc(t: ^testing.T) {
degrees_celsius := Celsius(f32){0}
kelvins := to_kelvins(degrees_celsius)
testing.expect_value(t, kelvins, Kelvins(f32){273.15})
}
@(test)
test_celsius_to_deci_celsius :: proc(t: ^testing.T) {
degrees_celsius := Celsius(int){100}
deci_degrees_celsius := to_deci_celsius(degrees_celsius)
testing.expect_value(t, deci_degrees_celsius, Deci_Celsius(int){1000})
}
@(test)
test_deci_celsius_to_celsius :: proc(t: ^testing.T) {
deci_degrees_celsius := Deci_Celsius(int){1000}
degrees_celsius := to_celsius(deci_degrees_celsius)
testing.expect_value(t, degrees_celsius, Celsius(int){100})
}

59
quantity/voltage.odin Normal file
View File

@@ -0,0 +1,59 @@
package quantity
import "base:intrinsics"
//----- Volts ----------------------------------
Volts :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
@(private = "file")
volts_to_milli_volts :: #force_inline proc "contextless" (
volts: Volts($V),
) -> Milli_Volts(V) where intrinsics.type_is_numeric(V) {
return Milli_Volts(V){volts.v * MILLI}
}
//----- Millivolts ----------------------------------
Milli_Volts :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
@(private = "file")
milli_volts_to_volts :: #force_inline proc "contextless" (
milli_volts: Milli_Volts($V),
) -> Volts(V) where intrinsics.type_is_numeric(V) {
return Volts(V){milli_volts.v / MILLI}
}
// ---------------------------------------------------------------------------------------------------------------------
// ----- Conversion Overloads ------------------------
// ---------------------------------------------------------------------------------------------------------------------
to_volts :: proc {
milli_volts_to_volts,
}
to_milli_volts :: proc {
volts_to_milli_volts,
}
// ---------------------------------------------------------------------------------------------------------------------
// ----- Tests ------------------------
// ---------------------------------------------------------------------------------------------------------------------
import "core:testing"
@(test)
test_volts_to_milli_volts :: proc(t: ^testing.T) {
volts := Volts(int){1}
milli_volts := to_milli_volts(volts)
testing.expect_value(t, milli_volts, Milli_Volts(int){1000})
}
@(test)
test_milli_volts_to_volts :: proc(t: ^testing.T) {
milli_volts := Milli_Volts(int){1000}
volts := to_volts(milli_volts)
testing.expect_value(t, volts, Volts(int){1})
}

59
quantity/volume.odin Normal file
View File

@@ -0,0 +1,59 @@
package quantity
import "base:intrinsics"
//----- Liters ----------------------------------
Liters :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
@(private = "file")
liters_to_milli_liters :: #force_inline proc "contextless" (
liters: Liters($V),
) -> Milli_Liters(V) where intrinsics.type_is_numeric(V) {
return Milli_Liters(V){liters.v * MILLI}
}
//----- Milliliters ----------------------------------
Milli_Liters :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}
@(private = "file")
milli_liters_to_liters :: #force_inline proc "contextless" (
milli_liters: Milli_Liters($V),
) -> Liters(V) where intrinsics.type_is_numeric(V) {
return Liters(V){milli_liters.v / MILLI}
}
// ---------------------------------------------------------------------------------------------------------------------
// ----- Conversion Overloads ------------------------
// ---------------------------------------------------------------------------------------------------------------------
to_liters :: proc {
milli_liters_to_liters,
}
to_milli_liters :: proc {
liters_to_milli_liters,
}
// ---------------------------------------------------------------------------------------------------------------------
// ----- Tests ------------------------
// ---------------------------------------------------------------------------------------------------------------------
import "core:testing"
@(test)
test_liters_to_milli_liters :: proc(t: ^testing.T) {
liters := Liters(int){12}
milli_liters := to_milli_liters(liters)
testing.expect_value(t, milli_liters, Milli_Liters(int){12_000})
}
@(test)
test_milli_liters_to_liters :: proc(t: ^testing.T) {
milli_liters := Milli_Liters(int){12_000}
liters := to_liters(milli_liters)
testing.expect_value(t, liters, Liters(int){12})
}

View File

@@ -0,0 +1,7 @@
package quantity
import "base:intrinsics"
Liters_Per_Minute :: struct($V: typeid) where intrinsics.type_is_numeric(V) {
v: V,
}

269
ring/ring.odin Normal file
View File

@@ -0,0 +1,269 @@
package ring
import "core:fmt"
@(private)
ODIN_BOUNDS_CHECK :: !ODIN_NO_BOUNDS_CHECK
Ring :: struct($T: typeid) {
data: []T,
_end_index, len: int,
}
Ring_Soa :: struct($T: typeid) {
data: #soa[]T,
_end_index, len: int,
}
from_slice_raos :: #force_inline proc(data: $T/[]$E) -> Ring(E) {
return {data = data, _end_index = -1}
}
from_slice_rsoa :: #force_inline proc(data: $T/#soa[]$E) -> Ring_Soa(E) {
return {data = data, _end_index = -1}
}
from_slice :: proc {
from_slice_raos,
from_slice_rsoa,
}
// Index in the backing array where the ring starts
_start_index_raos :: proc(ring: Ring($T)) -> int {
if ring.len < len(ring.data) {
return 0
} else {
start_index := ring._end_index + 1
return 0 if start_index == len(ring.data) else start_index
}
}
// Index in the backing array where the ring starts
_start_index_rsoa :: proc(ring: Ring_Soa($T)) -> int {
if ring.len < len(ring.data) {
return 0
} else {
start_index := ring._end_index + 1
return 0 if start_index == len(ring.data) else start_index
}
}
advance_raos :: proc(ring: ^Ring($T)) {
// Length
if ring.len != len(ring.data) do ring.len += 1
// End index
if ring._end_index == len(ring.data) - 1 { // If we are at the end of the backing array
ring._end_index = 0 // Overflow end to 0
} else {
ring._end_index += 1
}
}
advance_rsoa :: proc(ring: ^Ring_Soa($T)) {
// Length
if ring.len != len(ring.data) do ring.len += 1
// End index
if ring._end_index == len(ring.data) - 1 { // If we are at the end of the backing array
ring._end_index = 0 // Overflow end to 0
} else {
ring._end_index += 1
}
}
advance :: proc {
advance_raos,
advance_rsoa,
}
append_raos :: proc(ring: ^Ring($T), element: T) {
advance(ring)
ring.data[ring._end_index] = element
}
append_rsoa :: proc(ring: ^Ring_Soa($T), element: T) {
advance(ring)
ring.data[ring._end_index] = element
}
append :: proc {
append_raos,
append_rsoa,
}
get_raos :: proc(ring: Ring($T), index: int) -> ^T {
when ODIN_BOUNDS_CHECK {
if index >= ring.len {
panic(fmt.tprintf("Ring index %i out of bounds for length %i", index, ring.len))
}
}
array_index := _start_index_raos(ring) + index
if array_index < len(ring.data) {
return &ring.data[array_index]
} else {
array_index = array_index - len(ring.data)
return &ring.data[array_index]
}
}
// SOA can't return soa pointer to parapoly T.
get_rsoa :: proc(ring: Ring_Soa($T), index: int) -> T {
when ODIN_BOUNDS_CHECK {
if index >= ring.len {
panic(fmt.tprintf("Ring index %i out of bounds for length %i", index, ring.len))
}
}
array_index := _start_index_rsoa(ring) + index
if array_index < len(ring.data) {
return ring.data[array_index]
} else {
array_index = array_index - len(ring.data)
return ring.data[array_index]
}
}
get :: proc {
get_raos,
get_rsoa,
}
get_last_raos :: #force_inline proc(ring: Ring($T)) -> ^T {
return get(ring, ring.len - 1)
}
get_last_rsoa :: #force_inline proc(ring: Ring_Soa($T)) -> T {
return get(ring, ring.len - 1)
}
get_last :: proc {
get_last_raos,
get_last_rsoa,
}
clear_raos :: #force_inline proc "contextless" (ring: ^Ring($T)) {
ring.len = 0
ring._end_index = -1
}
clear_rsoa :: #force_inline proc "contextless" (ring: ^Ring_Soa($T)) {
ring.len = 0
ring._end_index = -1
}
clear :: proc {
clear_raos,
clear_rsoa,
}
// ---------------------------------------------------------------------------------------------------------------------
// ----- Tests ------------------------
// ---------------------------------------------------------------------------------------------------------------------
import "core:log"
import "core:testing"
@(test)
test_ring_aos :: proc(t: ^testing.T) {
data := make_slice([]int, 10)
ring := from_slice(data)
defer delete(ring.data)
for i in 1 ..= 5 {
append(&ring, i)
log.debug("Length:", ring.len)
log.debug("Start index:", _start_index_raos(ring))
log.debug("End index:", ring._end_index)
log.debug(ring.data)
}
testing.expect_value(t, get(ring, 0)^, 1)
testing.expect_value(t, get(ring, 4)^, 5)
testing.expect_value(t, ring.len, 5)
testing.expect_value(t, ring._end_index, 4)
testing.expect_value(t, _start_index_raos(ring), 0)
for i in 6 ..= 15 {
append(&ring, i)
log.debug("Length:", ring.len)
log.debug("Start index:", _start_index_raos(ring))
log.debug("End index:", ring._end_index)
log.debug(ring.data)
}
testing.expect_value(t, get(ring, 0)^, 6)
testing.expect_value(t, get(ring, 4)^, 10)
testing.expect_value(t, get(ring, 9)^, 15)
testing.expect_value(t, get_last(ring)^, 15)
testing.expect_value(t, ring.len, 10)
testing.expect_value(t, ring._end_index, 4)
testing.expect_value(t, _start_index_raos(ring), 5)
for i in 15 ..= 25 {
append(&ring, i)
log.debug("Length:", ring.len)
log.debug("Start index:", _start_index_raos(ring))
log.debug("End index:", ring._end_index)
log.debug(ring.data)
}
testing.expect_value(t, get(ring, 0)^, 16)
testing.expect_value(t, ring._end_index, 5)
testing.expect_value(t, get_last(ring)^, 25)
clear(&ring)
append(&ring, 1)
testing.expect_value(t, ring.len, 1)
testing.expect_value(t, get(ring, 0)^, 1)
}
@(test)
test_ring_soa :: proc(t: ^testing.T) {
Ints :: struct {
x, y: int,
}
data := make_soa_slice(#soa[]Ints, 10)
ring := from_slice(data)
defer delete(ring.data)
for i in 1 ..= 5 {
append(&ring, Ints{i, i})
log.debug("Length:", ring.len)
log.debug("Start index:", _start_index_rsoa(ring))
log.debug("End index:", ring._end_index)
log.debug(ring.data)
}
testing.expect_value(t, get(ring, 0), Ints{1, 1})
testing.expect_value(t, get(ring, 4), Ints{5, 5})
testing.expect_value(t, ring.len, 5)
testing.expect_value(t, ring._end_index, 4)
testing.expect_value(t, _start_index_rsoa(ring), 0)
for i in 6 ..= 15 {
append(&ring, Ints{i, i})
log.debug("Length:", ring.len)
log.debug("Start index:", _start_index_rsoa(ring))
log.debug("End index:", ring._end_index)
log.debug(ring.data)
}
testing.expect_value(t, get(ring, 0), Ints{6, 6})
testing.expect_value(t, get(ring, 4), Ints{10, 10})
testing.expect_value(t, get(ring, 9), Ints{15, 15})
testing.expect_value(t, get_last(ring), Ints{15, 15})
testing.expect_value(t, ring.len, 10)
testing.expect_value(t, ring._end_index, 4)
testing.expect_value(t, _start_index_rsoa(ring), 5)
for i in 15 ..= 25 {
append(&ring, Ints{i, i})
log.debug("Length:", ring.len)
log.debug("Start index:", _start_index_rsoa(ring))
log.debug("End index:", ring._end_index)
log.debug(ring.data)
}
testing.expect_value(t, get(ring, 0), Ints{16, 16})
testing.expect_value(t, ring._end_index, 5)
testing.expect_value(t, get_last(ring), Ints{25, 25})
clear(&ring)
append(&ring, Ints{1, 1})
testing.expect_value(t, ring.len, 1)
testing.expect_value(t, get(ring, 0), Ints{1, 1})
}

1233
vendor/libusb/libusb.odin vendored Normal file

File diff suppressed because it is too large Load Diff

43
vendor/lmdb/examples/examples.odin vendored Normal file
View File

@@ -0,0 +1,43 @@
package examples
import "core:fmt"
import "core:os"
import "core:sys/posix"
import mdb "../../lmdb"
// 0o660
DB_MODE :: posix.mode_t{.IWGRP, .IRGRP, .IWUSR, .IRUSR}
DB_PATH :: "out/debug/lmdb_example_db"
main :: proc() {
environment: ^mdb.Env
// Create environment for lmdb
mdb.panic_on_err(mdb.env_create(&environment))
// Create directory for databases. Won't do anything if it already exists.
// 0o774 gives all permissions for owner and group, read for everyone else.
os.make_directory(DB_PATH, 0o774)
// Open the database files (creates them if they don't already exist)
mdb.panic_on_err(mdb.env_open(environment, DB_PATH, 0, DB_MODE))
// Transactions
txn_handle: ^mdb.Txn
db_handle: mdb.Dbi
// Put transaction
key := 7
key_val := mdb.autoval(&key)
put_data := 12
put_data_val := mdb.autoval(&put_data)
mdb.panic_on_err(mdb.txn_begin(environment, nil, 0, &txn_handle))
mdb.panic_on_err(mdb.dbi_open(txn_handle, nil, 0, &db_handle))
mdb.panic_on_err(mdb.put(txn_handle, db_handle, &key_val.raw, &put_data_val.raw, 0))
mdb.panic_on_err(mdb.txn_commit(txn_handle))
// Get transaction
get_data_val := mdb.nil_autoval(int)
mdb.panic_on_err(mdb.txn_begin(environment, nil, 0, &txn_handle))
mdb.panic_on_err(mdb.get(txn_handle, db_handle, &key_val.raw, &get_data_val.raw))
mdb.panic_on_err(mdb.txn_commit(txn_handle))
data_cpy := mdb.autoval_get_data(&get_data_val)^
fmt.println("Get result:", data_cpy)
}

1602
vendor/lmdb/lmdb.odin vendored Normal file

File diff suppressed because it is too large Load Diff