commit f10bf7e3c30382fcb0a2fcc33031bc6065443753 Author: Zachary Levy Date: Sun Mar 8 19:00:41 2026 -0700 In the beginning... diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..80ed13b --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +# Executables +*.bin +*.exe +out + +# Debug +/debug diff --git a/.zed/debug.json b/.zed/debug.json new file mode 100644 index 0000000..f2a07b1 --- /dev/null +++ b/.zed/debug.json @@ -0,0 +1,12 @@ +[ + { + "label": "Debug", + "program": "out/debug/debug", + "adapter": "CodeLLDB", + "request": "launch", + "build": { + "command": "odin build debug -debug -out=out/debug/debug" + }, + "cwd": "$ZED_WORKTREE_ROOT" + } +] diff --git a/.zed/tasks.json b/.zed/tasks.json new file mode 100644 index 0000000..63f392a --- /dev/null +++ b/.zed/tasks.json @@ -0,0 +1,41 @@ +[ + // --------------------------------------------------------------------------------------------------------------------- + // ----- Odin Tests ------------------------ + // --------------------------------------------------------------------------------------------------------------------- + { + "label": "Test many_bits", + "command": "odin test many_bits -out=out/debug/test_many_bits", + "cwd": "$ZED_WORKTREE_ROOT" + }, + { + "label": "Test ring", + "command": "odin test ring -out=out/debug/test_ring", + "cwd": "$ZED_WORKTREE_ROOT" + }, + { + "label": "Test levsort", + "command": "odin test levsort -out=out/debug/test_levsort", + "cwd": "$ZED_WORKTREE_ROOT" + }, + { + "label": "Test levsync", + "command": "odin test levsync -out=out/debug/test_levsync", + "cwd": "$ZED_WORKTREE_ROOT" + }, + // --------------------------------------------------------------------------------------------------------------------- + // ----- LMDB Examples ------------------------ + // --------------------------------------------------------------------------------------------------------------------- + { + "label": "Run lmdb example", + "command": "odin run vendor/lmdb/examples -debug -out=out/debug/lmdb-examples", + "cwd": "$ZED_WORKTREE_ROOT" + }, + // --------------------------------------------------------------------------------------------------------------------- + // ----- Other ------------------------ + // --------------------------------------------------------------------------------------------------------------------- + { + "label": "Run debug", + "command": "odin run debug -debug -out=out/debug/debug", + "cwd": "$ZED_WORKTREE_ROOT" + } +] diff --git a/README.md b/README.md new file mode 100644 index 0000000..7fe8894 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# LevLib + +Narya + BFPOWER unified Odin library collection. diff --git a/basic/basic.odin b/basic/basic.odin new file mode 100644 index 0000000..6e3315b --- /dev/null +++ b/basic/basic.odin @@ -0,0 +1,63 @@ +package basic + +import "base:runtime" +import "core:prof/spall" + +//----- Spall ---------------------------------- + +SPALL_TRACE :: #config(SPALL_TRACE, false) + +spall_ctx: spall.Context +@(thread_local) +spall_buffer: spall.Buffer + +//----- Compile globals ---------------------------------- + +ODIN_BOUNDS_CHECK :: !ODIN_NO_BOUNDS_CHECK +INT_NUM_BITS :: size_of(int) * 8 + +//----- Array ---------------------------------- + +// append unless array doesn't room, then panic +append_elem_capped :: #force_inline proc( + array: ^$T/[dynamic]$E, + arg: E, + loc := #caller_location, +) -> ( + n: int, + err: runtime.Allocator_Error, +) #optional_allocator_error { + when ODIN_BOUNDS_CHECK { + if len(array) == cap(array) { + panic("Array would have to expand to accomodate append.") + } else { + return append_elem(array, arg, loc) + } + } else { + return append_elem(array, arg, loc) + } +} + +append_soa_elem_capped :: proc( + array: ^$T/#soa[dynamic]$E, + arg: E, + loc := #caller_location, +) -> ( + n: int, + err: runtime.Allocator_Error, +) #optional_allocator_error { + when ODIN_BOUNDS_CHECK { + if len(array) == cap(array) { + panic("Array would have to expand to accomodate append.") + } else { + return append_soa_elem(array, arg, loc) + } + } else { + return append_soa_elem(array, arg, loc) + } +} + +append_capped :: proc { + append_elem_capped, + append_soa_elem_capped, +} diff --git a/levsort/levsort.odin b/levsort/levsort.odin new file mode 100644 index 0000000..e42e69f --- /dev/null +++ b/levsort/levsort.odin @@ -0,0 +1,2439 @@ +package levsort + +import "base:intrinsics" +import "core:math/bits" + +// Threshold for switching to heap-based selection when k is small. +@(private = "file") +HEAP_SELECT_K_THRESHOLD :: 32 + +// Threshold for switching to insertion sort for small arrays. +@(private = "file") +INSERTION_THRESHOLD :: 32 + +// MSD select threshold - below this size, finish with insertion sort. +@(private = "file") +MSD_SMALL_THRESHOLD :: 64 + +// Threshold for using 11-bit radix in LSD sort (needs enough elements to amortize larger counts array) +@(private = "file") +LSD_11BIT_THRESHOLD :: 8192 + +// Radix-based partial sort for arrays of floats. +// Sorts the smallest k elements to the front of the slice. +// Elements after position k are in unspecified order. +// +// The `allocator` parameter specifies where to allocate the temporary buffer +// used during radix sorting. +partial_sort_float :: #force_inline proc( + data: []$FLOAT, + k: int, + allocator := context.temp_allocator, +) where intrinsics.type_is_float(FLOAT) { + partial_sort_by_fkey(data, k, proc(x: FLOAT) -> FLOAT {return x}, allocator) +} + +// Radix-based partial sort using a float key extraction function. +// This is faster than comparison-based sorting for large arrays. +// The `key` procedure extracts a float value from each element for comparison. +// Sorts the smallest k elements to the front of the slice. +// Elements after position k are in unspecified order. +// +// The `allocator` parameter specifies where to allocate the temporary buffer +// used during radix sorting. +partial_sort_by_fkey :: proc( + data: []$T, + k: int, + key: proc(val: T) -> $FLOAT, + allocator := context.temp_allocator, +) where intrinsics.type_is_float(FLOAT) { + n := len(data) + if k <= 0 || n <= 1 do return + + k := min(k, n) + + when FLOAT == f16 { + NUM_BITS :: 16 + Key_Type :: u16 + } else when FLOAT == f32 { + NUM_BITS :: 32 + Key_Type :: u32 + } else when FLOAT == f64 { + NUM_BITS :: 64 + Key_Type :: u64 + } else { + #panic("partial_sort_by_fkey only supports f16, f32, and f64") + } + + // Algorithm selection based on k and n (integer math avoids float division) + // Using 2*k >= n for ratio >= 0.5 + if k <= HEAP_SELECT_K_THRESHOLD { + // Heap-select for small k: O(n + k log k) + // Don't allocate keys[n] - compute keys on-the-fly during heap selection + // This avoids an extra full pass over memory for the "tiny k, huge n" case + heap_keys := make([]Key_Type, k, allocator) + defer delete(heap_keys, allocator) + tmp_items := make([]T, k, allocator) + defer delete(tmp_items, allocator) + tmp_keys := make([]Key_Type, k, allocator) + defer delete(tmp_keys, allocator) + + heap_select_streaming(data, heap_keys, k, key) + radix_sort_lsd_pingpong(data[:k], heap_keys, tmp_items, tmp_keys, NUM_BITS) + } else if 2 * k >= n { + // Full radix sort when k is a large fraction of n + // Precompute sortable keys once + keys := make([]Key_Type, n, allocator) + defer delete(keys, allocator) + for i := 0; i < n; i += 1 { + keys[i] = float_to_sortable_typed(key(data[i])) + } + tmp_items := make([]T, n, allocator) + defer delete(tmp_items, allocator) + tmp_keys := make([]Key_Type, n, allocator) + defer delete(tmp_keys, allocator) + + radix_sort_lsd_pingpong(data, keys, tmp_items, tmp_keys, NUM_BITS) + } else { + // MSD select path - precompute all keys + keys := make([]Key_Type, n, allocator) + defer delete(keys, allocator) + for i := 0; i < n; i += 1 { + keys[i] = float_to_sortable_typed(key(data[i])) + } + tmp_items := make([]T, n, allocator) + defer delete(tmp_items, allocator) + tmp_keys := make([]Key_Type, n, allocator) + defer delete(tmp_keys, allocator) + + // MSD radix select to partition k smallest elements to front + // Pass k-1 as the 0-based rank of the kth smallest element + radix_select_msd_digit(data, keys, k - 1, NUM_BITS, tmp_items, tmp_keys) + // Sort the k smallest elements using LSD radix sort + radix_sort_lsd_pingpong(data[:k], keys[:k], tmp_items[:k], tmp_keys[:k], NUM_BITS) + } +} + +// Convert float bits to a sortable unsigned integer representation. +// For positive floats: flip the sign bit (makes them larger than negatives) +// For negative floats: flip all bits (reverses their order correctly) +// NaNs are mapped to max value so they sort to the end. +// +// Uses bit-pattern NaN detection for better performance in hot loops. +// Returns properly-sized key type to minimize memory bandwidth. +@(private = "file") +float_to_sortable_typed :: proc { + float_to_sortable_f16, + float_to_sortable_f32, + float_to_sortable_f64, +} + +@(private = "file") +float_to_sortable_f16 :: #force_inline proc "contextless" (f: f16) -> u16 { + bits := transmute(u16)f + // NaN detection: exponent all 1s (0x7C00) and mantissa nonzero (0x03FF) + exp_mask :: u16(0x7C00) + mant_mask :: u16(0x03FF) + if (bits & exp_mask) == exp_mask && (bits & mant_mask) != 0 { + return max(u16) + } + mask := u16(i16(bits) >> 15) + return bits ~ (mask | (1 << 15)) +} + +@(private = "file") +float_to_sortable_f32 :: #force_inline proc "contextless" (f: f32) -> u32 { + bits := transmute(u32)f + // NaN detection: exponent all 1s (0x7F800000) and mantissa nonzero (0x007FFFFF) + exp_mask :: u32(0x7F800000) + mant_mask :: u32(0x007FFFFF) + if (bits & exp_mask) == exp_mask && (bits & mant_mask) != 0 { + return max(u32) + } + mask := u32(i32(bits) >> 31) + return bits ~ (mask | (1 << 31)) +} + +@(private = "file") +float_to_sortable_f64 :: #force_inline proc "contextless" (f: f64) -> u64 { + bits := transmute(u64)f + // NaN detection: exponent all 1s (0x7FF0000000000000) and mantissa nonzero + exp_mask :: u64(0x7FF0000000000000) + mant_mask :: u64(0x000FFFFFFFFFFFFF) + if (bits & exp_mask) == exp_mask && (bits & mant_mask) != 0 { + return max(u64) + } + mask := u64(i64(bits) >> 63) + return bits ~ (mask | (1 << 63)) +} + +// Legacy version returning uint (for backward compatibility with heap_select) +@(private = "file") +float_to_sortable :: #force_inline proc "contextless" ( + f: $FLOAT, +) -> ( + result: uint, +) where intrinsics.type_is_float(FLOAT) { + when FLOAT == f16 { + return uint(float_to_sortable_typed(f)) + } else when FLOAT == f32 { + return uint(float_to_sortable_typed(f)) + } else when FLOAT == f64 { + return uint(float_to_sortable_typed(f)) + } else { + #panic("float_to_sortable only supports f16, f32, and f64") + } +} + +// Wide-digit MSD radix select - partitions data so smallest k elements are in data[:k]. +// Uses 8-11 bit digits instead of 1-bit-at-a-time for fewer passes and less memory traffic. +// k_rank is the 0-based rank of the target element (for "smallest k", pass k-1). +// Uses real ping-pong buffers - scatter src→dst, swap pointers, copy back only once at end. +@(private = "file") +radix_select_msd_digit :: proc( + data: []$T, + keys: []$Key, + k_rank: int, + total_bits: int, + tmp_items: []T, + tmp_keys: []Key, +) where Key == u16 || + Key == u32 || + Key == u64 { + n := len(data) + if n <= 1 || k_rank < 0 do return + + // Working range [lo, hi) and relative rank within that range + lo := 0 + hi := n + k_rel := k_rank + + // Ping-pong buffer state - swap whole arrays, copy back once at end + src_items := data + src_keys := keys + dst_items := tmp_items + dst_keys := tmp_keys + in_temp := false + + // Choose radix bits based on float type + // For f64: use 11 bits for large ranges, 8 bits otherwise + // For f32/f16: use 8 bits + radix_bits_large :: 11 + radix_bits_small :: 8 + + // Start from most significant bit + bit_pos := total_bits + + for bit_pos > 0 && hi - lo > MSD_SMALL_THRESHOLD { + m := hi - lo + + // Choose digit width based on range size and type + radix_bits: int + if total_bits == 64 && m >= 32768 { + radix_bits = radix_bits_large + } else { + radix_bits = radix_bits_small + } + + // Clamp to remaining bits + if bit_pos < radix_bits { + radix_bits = bit_pos + } + + radix_size := 1 << uint(radix_bits) + radix_mask := Key(radix_size - 1) + shift := uint(bit_pos - radix_bits) + + base_lo := lo + + // Early check: sample first few elements to detect likely single-bucket case + first_digit: Key = (src_keys[lo] >> shift) & radix_mask + all_same_digit := true + sample_end := min(lo + 16, hi) + for i := lo + 1; i < sample_end; i += 1 { + if ((src_keys[i] >> shift) & radix_mask) != first_digit { + all_same_digit = false + break + } + } + + // Full histogram with single-bucket tracking + bucket_start := 0 + bucket_end := 0 + + if radix_bits <= 8 { + counts: [256]u32 + for i := lo; i < hi; i += 1 { + digit := (src_keys[i] >> shift) & radix_mask + counts[digit] += 1 + if digit != first_digit do all_same_digit = false + } + + // Skip this digit level if all in one bucket + if all_same_digit { + bit_pos -= radix_bits + continue + } + + // Find the bucket containing k_rel (using > for 0-based rank) + cumsum := 0 + for d := 0; d < radix_size; d += 1 { + cd := int(counts[d]) + if cumsum + cd > k_rel { + bucket_start = cumsum + bucket_end = cumsum + cd + break + } + cumsum += cd + } + + // Convert counts to offsets for scatter + offset: u32 = 0 + for i := 0; i < radix_size; i += 1 { + c := counts[i] + counts[i] = offset + offset += c + } + + // Scatter [lo, hi) from src to dst by digit + for i := lo; i < hi; i += 1 { + digit := (src_keys[i] >> shift) & radix_mask + dst_idx := base_lo + int(counts[digit]) + dst_items[dst_idx] = src_items[i] + dst_keys[dst_idx] = src_keys[i] + counts[digit] += 1 + } + } else { + // 11-bit radix path + counts: [2048]u32 + for i := lo; i < hi; i += 1 { + digit := (src_keys[i] >> shift) & radix_mask + counts[digit] += 1 + if digit != first_digit do all_same_digit = false + } + + // Skip this digit level if all in one bucket + if all_same_digit { + bit_pos -= radix_bits + continue + } + + // Find the bucket containing k_rel + cumsum := 0 + for d := 0; d < radix_size; d += 1 { + cd := int(counts[d]) + if cumsum + cd > k_rel { + bucket_start = cumsum + bucket_end = cumsum + cd + break + } + cumsum += cd + } + + // Convert counts to offsets for scatter + offset: u32 = 0 + for i := 0; i < radix_size; i += 1 { + c := counts[i] + counts[i] = offset + offset += c + } + + // Scatter [lo, hi) from src to dst by digit + for i := lo; i < hi; i += 1 { + digit := (src_keys[i] >> shift) & radix_mask + dst_idx := base_lo + int(counts[digit]) + dst_items[dst_idx] = src_items[i] + dst_keys[dst_idx] = src_keys[i] + counts[digit] += 1 + } + } + + // Preserve untouched segments so dst becomes a full valid array + for i := 0; i < lo; i += 1 { + dst_items[i] = src_items[i] + dst_keys[i] = src_keys[i] + } + for i := hi; i < n; i += 1 { + dst_items[i] = src_items[i] + dst_keys[i] = src_keys[i] + } + + // Swap whole buffers (ping-pong) + src_items, dst_items = dst_items, src_items + src_keys, dst_keys = dst_keys, src_keys + in_temp = !in_temp + + // Narrow to bucket containing k_rel + lo = base_lo + bucket_start + hi = base_lo + bucket_end + k_rel = k_rel - bucket_start + + bit_pos -= radix_bits + } + + // Finish with insertion sort on small range if needed + if hi - lo > 1 && hi - lo <= MSD_SMALL_THRESHOLD { + insertion_sort_with_keys(src_items[lo:hi], src_keys[lo:hi]) + } + + // Copy back once if we ended in temp buffers + if in_temp { + for i := 0; i < n; i += 1 { + data[i] = src_items[i] + keys[i] = src_keys[i] + } + } +} + +// LSD radix sort with ping-pong buffers - no per-pass copy overhead. +// Operates on parallel (items, keys) arrays. +// Uses ctz to skip identical low digits. +@(private = "file") +radix_sort_lsd_pingpong :: proc( + data: []$T, + keys: []$Key, + tmp_items: []T, + tmp_keys: []Key, + total_bits: int, +) where Key == u16 || + Key == u32 || + Key == u64 { + n := len(data) + if n <= 1 do return + + // For small arrays, use insertion sort + if n <= INSERTION_THRESHOLD { + insertion_sort_with_keys(data, keys) + return + } + + // Choose radix bits: 11-bit for f64 with large n, 8-bit otherwise + radix_bits: int + if total_bits == 64 && n >= LSD_11BIT_THRESHOLD { + radix_bits = 11 + } else { + radix_bits = 8 + } + + radix_mask := Key((1 << uint(radix_bits)) - 1) + + // Compute true variability mask: var_bits has a 1 wherever ANY element differs from base + // This is safe for both low and high pass skipping (unlike min^max which misses intermediate values) + base := keys[0] + var_bits: Key = 0 + for i := 1; i < n; i += 1 { + var_bits |= (keys[i] ~ base) + } + if var_bits == 0 do return // All keys identical + + // Safe skip of identical low digits + safe limit of high digits + start_pass: int + end_pass: int + when Key == u16 { + low_bit := int(bits.count_trailing_zeros(u16(var_bits))) + high_bit := 15 - int(bits.count_leading_zeros(u16(var_bits))) + start_pass = low_bit / radix_bits + end_pass = (high_bit + radix_bits) / radix_bits + } else when Key == u32 { + low_bit := int(bits.count_trailing_zeros(u32(var_bits))) + high_bit := 31 - int(bits.count_leading_zeros(u32(var_bits))) + start_pass = low_bit / radix_bits + end_pass = (high_bit + radix_bits) / radix_bits + } else { + low_bit := int(bits.count_trailing_zeros(u64(var_bits))) + high_bit := 63 - int(bits.count_leading_zeros(u64(var_bits))) + start_pass = low_bit / radix_bits + end_pass = (high_bit + radix_bits) / radix_bits + } + + // Ping-pong buffer state + src_items := data + src_keys := keys + dst_items := tmp_items + dst_keys := tmp_keys + in_temp := false + + // Process passes from start_pass to end_pass (skipping identical low digits) + if radix_bits == 8 { + for pass := start_pass; pass < end_pass; pass += 1 { + shift := uint(pass * radix_bits) + + // Count occurrences, tracking if all same digit + counts: [256]u32 + first_digit := (src_keys[0] >> shift) & radix_mask + all_same := true + + for i := 0; i < n; i += 1 { + digit := (src_keys[i] >> shift) & radix_mask + counts[digit] += 1 + if digit != first_digit do all_same = false + } + + // Skip pass if all elements in one bucket + if all_same do continue + + // Convert counts to offsets (prefix sum) + offset: u32 = 0 + for i := 0; i < 256; i += 1 { + c := counts[i] + counts[i] = offset + offset += c + } + + // Scatter from src to dst + for i := 0; i < n; i += 1 { + digit := (src_keys[i] >> shift) & radix_mask + idx := int(counts[digit]) + dst_items[idx] = src_items[i] + dst_keys[idx] = src_keys[i] + counts[digit] += 1 + } + + // Swap src and dst (ping-pong) + src_items, dst_items = dst_items, src_items + src_keys, dst_keys = dst_keys, src_keys + in_temp = !in_temp + } + } else { + // 11-bit radix path + for pass := start_pass; pass < end_pass; pass += 1 { + shift := uint(pass * radix_bits) + + // Count occurrences, tracking if all same digit + counts: [2048]u32 + first_digit := (src_keys[0] >> shift) & radix_mask + all_same := true + + for i := 0; i < n; i += 1 { + digit := (src_keys[i] >> shift) & radix_mask + counts[digit] += 1 + if digit != first_digit do all_same = false + } + + // Skip pass if all elements in one bucket + if all_same do continue + + // Convert counts to offsets (prefix sum) + offset: u32 = 0 + for i := 0; i < 2048; i += 1 { + c := counts[i] + counts[i] = offset + offset += c + } + + // Scatter from src to dst + for i := 0; i < n; i += 1 { + digit := (src_keys[i] >> shift) & radix_mask + idx := int(counts[digit]) + dst_items[idx] = src_items[i] + dst_keys[idx] = src_keys[i] + counts[digit] += 1 + } + + // Swap src and dst (ping-pong) + src_items, dst_items = dst_items, src_items + src_keys, dst_keys = dst_keys, src_keys + in_temp = !in_temp + } + } + + // If result ended up in temp buffer, copy back to original + if in_temp { + for i := 0; i < n; i += 1 { + data[i] = src_items[i] + keys[i] = src_keys[i] + } + } +} + +// Streaming heap-based selection - computes keys on-the-fly. +// Avoids allocating keys[n] for the "tiny k, huge n" case. +// heap_keys is pre-allocated to size k. +@(private = "file") +heap_select_streaming :: proc( + data: []$T, + heap_keys: []$Key, + k: int, + key_fn: proc(_: T) -> $FLOAT, +) where Key == u16 || + Key == u32 || + Key == u64, + intrinsics.type_is_float(FLOAT) { + n := len(data) + if k <= 0 || n <= 1 do return + + k := min(k, n) + + // Initialize heap with first k elements, computing keys on the fly + for i := 0; i < k; i += 1 { + heap_keys[i] = float_to_sortable_typed(key_fn(data[i])) + } + + // Build max-heap from first k elements + for i := k / 2 - 1; i >= 0; i -= 1 { + sift_down_max_with_keys(data[:k], heap_keys[:k], i, k) + } + + // Scan remaining elements, computing key for each candidate + for i := k; i < n; i += 1 { + candidate_key := float_to_sortable_typed(key_fn(data[i])) + if candidate_key < heap_keys[0] { + // Swap item into heap root + data[0], data[i] = data[i], data[0] + heap_keys[0] = candidate_key + sift_down_max_with_keys(data[:k], heap_keys[:k], 0, k) + } + } + + // The k smallest are now in data[:k] with their keys in heap_keys[:k] + // Sorting will be done by the caller with radix sort +} + +// Heap-based selection using precomputed keys (for non-heap paths that already have keys). +@(private = "file") +heap_select_with_keys :: proc( + data: []$T, + keys: []$Key, + k: int, +) where Key == u16 || + Key == u32 || + Key == u64 { + n := len(data) + if k <= 0 || n <= 1 do return + + k := min(k, n) + + // Build max-heap from first k elements + for i := k / 2 - 1; i >= 0; i -= 1 { + sift_down_max_with_keys(data[:k], keys[:k], i, k) + } + + // Scan remaining elements, replace max if smaller found + for i := k; i < n; i += 1 { + if keys[i] < keys[0] { + data[0], data[i] = data[i], data[0] + keys[0], keys[i] = keys[i], keys[0] + sift_down_max_with_keys(data[:k], keys[:k], 0, k) + } + } + + // The k smallest are now in data[:k] but not sorted +} + +@(private = "file") +sift_down_max_with_keys :: proc( + data: []$T, + keys: []$Key, + root: int, + n: int, +) where Key == u16 || + Key == u32 || + Key == u64 { + i := root + for { + largest := i + left := 2 * i + 1 + right := 2 * i + 2 + + if left < n && keys[left] > keys[largest] { + largest = left + } + if right < n && keys[right] > keys[largest] { + largest = right + } + + if largest == i do break + + data[i], data[largest] = data[largest], data[i] + keys[i], keys[largest] = keys[largest], keys[i] + i = largest + } +} + +// Insertion sort with parallel keys array. +@(private = "file") +insertion_sort_with_keys :: proc(data: []$T, keys: []$Key) where Key == u16 || Key == u32 || Key == u64 { + n := len(data) + for i := 1; i < n; i += 1 { + temp_item := data[i] + temp_key := keys[i] + j := i - 1 + for j >= 0 && keys[j] > temp_key { + data[j + 1] = data[j] + keys[j + 1] = keys[j] + j -= 1 + } + data[j + 1] = temp_item + keys[j + 1] = temp_key + } +} + +// Legacy heap select (kept for potential direct usage) +@(private = "file") +heap_select :: proc(data: []$T, k: int, key: proc(_: T) -> $FLOAT) where intrinsics.type_is_float(FLOAT) { + n := len(data) + if k <= 0 || n <= 1 do return + + k := min(k, n) + + // Build max-heap from first k elements + heap := data[:k] + for i := k / 2 - 1; i >= 0; i -= 1 { + sift_down_max_by_key(heap, i, k, key) + } + + // Scan remaining elements, replace max if smaller found + for i := k; i < n; i += 1 { + if float_to_sortable(key(data[i])) < float_to_sortable(key(heap[0])) { + heap[0], data[i] = data[i], heap[0] + sift_down_max_by_key(heap, 0, k, key) + } + } + + // Extract elements from heap in sorted order (heapsort the prefix) + for i := k - 1; i > 0; i -= 1 { + heap[0], heap[i] = heap[i], heap[0] + sift_down_max_by_key(heap[:i], 0, i, key) + } +} + +@(private = "file") +sift_down_max_by_key :: proc( + heap: []$T, + root: int, + n: int, + key: proc(_: T) -> $FLOAT, +) where intrinsics.type_is_float(FLOAT) { + i := root + for { + largest := i + left := 2 * i + 1 + right := 2 * i + 2 + + if left < n && float_to_sortable(key(heap[left])) > float_to_sortable(key(heap[largest])) { + largest = left + } + if right < n && float_to_sortable(key(heap[right])) > float_to_sortable(key(heap[largest])) { + largest = right + } + + if largest == i do break + heap[i], heap[largest] = heap[largest], heap[i] + i = largest + } +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Testing ------------------------------------------------------------------------------------------------------- +// --------------------------------------------------------------------------------------------------------------------- +import "core:math" +import "core:math/rand" +import "core:slice" +import "core:sort" +import "core:testing" + +//----- Helper Procedures ---------------------------------- + +@(private = "file") +f64_key :: proc(x: f64) -> f64 {return x} + +@(private = "file") +f32_key :: proc(x: f32) -> f32 {return x} + +@(private = "file") +f16_key :: proc(x: f16) -> f16 {return x} + +@(private = "file") +is_prefix_sorted :: proc(data: []$T, k: int) -> bool { + if k <= 1 do return true + for i := 0; i < k - 1; i += 1 { + if data[i] > data[i + 1] do return false + } + return true +} + +@(private = "file") +partition_property_holds :: proc(data: []$T, k: int) -> bool { + if k <= 0 || k >= len(data) do return true + + max_in_prefix := data[0] + for i := 1; i < k; i += 1 { + if data[i] > max_in_prefix do max_in_prefix = data[i] + } + + for i := k; i < len(data); i += 1 { + if data[i] < max_in_prefix do return false + } + return true +} + +@(private = "file") +has_correct_elements :: proc(original: []$T, result: []T, k: int) -> bool { + if k <= 0 do return true + + truth := make([]T, len(original)) + defer delete(truth) + copy(truth, original) + sort.quick_sort(truth) + + result_prefix := make([]T, k) + defer delete(result_prefix) + copy(result_prefix, result[:k]) + sort.quick_sort(result_prefix) + + for i := 0; i < k; i += 1 { + if result_prefix[i] != truth[i] do return false + } + return true +} + +@(private = "file") +elements_preserved :: proc(original: []$T, result: []T) -> bool { + if len(original) != len(result) do return false + + orig_sorted := make([]T, len(original)) + defer delete(orig_sorted) + copy(orig_sorted, original) + sort.quick_sort(orig_sorted) + + res_sorted := make([]T, len(result)) + defer delete(res_sorted) + copy(res_sorted, result) + sort.quick_sort(res_sorted) + + for i := 0; i < len(orig_sorted); i += 1 { + if orig_sorted[i] != res_sorted[i] do return false + } + return true +} + +@(private = "file") +validate_partial_sort :: proc(original: []$T, result: []T, k: int) -> (ok: bool, reason: string) { + k := min(k, len(result)) + + if !elements_preserved(original, result) { + return false, "Elements were lost or corrupted" + } + if !is_prefix_sorted(result, k) { + return false, "Prefix is not sorted" + } + if !partition_property_holds(result, k) { + return false, "Partition property violated" + } + if !has_correct_elements(original, result, k) { + return false, "Wrong elements in prefix" + } + return true, "" +} + +//----- partial_sort_float wrapper test ---------------------------------- + +@(test) +test_partial_sort_float_wrapper :: proc(t: ^testing.T) { + size := 100 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31() % 1000) + } + + data1 := make([]f64, size) + defer delete(data1) + copy(data1, original) + + data2 := make([]f64, size) + defer delete(data2) + copy(data2, original) + + k := 25 + + partial_sort_float(data1, k) + partial_sort_by_fkey(data2, k, f64_key) + + for i := 0; i < k; i += 1 { + testing.expectf( + t, + data1[i] == data2[i], + "Mismatch at position %d: partial_sort_float=%v, partial_sort_by_fkey=%v", + i, + data1[i], + data2[i], + ) + } +} + +//----- Edge case tests ---------------------------------- + +@(test) +test_empty_array :: proc(t: ^testing.T) { + data: []f64 + partial_sort_float(data, 5) + testing.expect(t, len(data) == 0, "Empty array should remain empty") +} + +@(test) +test_k_zero :: proc(t: ^testing.T) { + original := []f64{5, 3, 8, 1, 9} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + + partial_sort_float(data, 0) + + for i := 0; i < len(data); i += 1 { + testing.expectf(t, data[i] == original[i], "k=0 should not modify array, index %d changed", i) + } +} + +@(test) +test_k_negative :: proc(t: ^testing.T) { + original := []f64{5, 3, 8, 1, 9} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + + partial_sort_float(data, -5) + + for i := 0; i < len(data); i += 1 { + testing.expectf(t, data[i] == original[i], "Negative k should not modify array, index %d changed", i) + } +} + +@(test) +test_single_element :: proc(t: ^testing.T) { + data := []f64{42} + partial_sort_float(data, 1) + testing.expect(t, data[0] == 42, "Single element should be unchanged") +} + +@(test) +test_k_one :: proc(t: ^testing.T) { + original := []f64{9, 5, 2, 8, 1, 7, 3} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + + partial_sort_float(data, 1) + + testing.expect(t, data[0] == 1, "First element should be minimum when k=1") + testing.expect(t, elements_preserved(original, data), "Elements should be preserved") +} + +@(test) +test_k_equals_length :: proc(t: ^testing.T) { + original := []f64{5, 2, 8, 1, 9, 3, 7, 4, 6} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + + partial_sort_float(data, len(data)) + + testing.expect(t, slice.is_sorted(data), "k=n should fully sort the array") + testing.expect(t, elements_preserved(original, data), "Elements should be preserved") +} + +@(test) +test_k_exceeds_length :: proc(t: ^testing.T) { + original := []f64{5, 2, 8, 1, 9} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + + partial_sort_float(data, 100) + + testing.expect(t, slice.is_sorted(data), "k>n should fully sort the array") + testing.expect(t, elements_preserved(original, data), "Elements should be preserved") +} + +@(test) +test_two_elements_sorted :: proc(t: ^testing.T) { + data := []f64{1, 2} + partial_sort_float(data, 1) + testing.expect(t, data[0] == 1, "Minimum should be first") +} + +@(test) +test_two_elements_reversed :: proc(t: ^testing.T) { + data := []f64{2, 1} + partial_sort_float(data, 1) + testing.expect(t, data[0] == 1, "Minimum should be first after sort") +} + +@(test) +test_two_elements_k_two :: proc(t: ^testing.T) { + data := []f64{2, 1} + partial_sort_float(data, 2) + testing.expect(t, data[0] == 1 && data[1] == 2, "Two elements should be fully sorted") +} + +//----- Input pattern tests ---------------------------------- + +@(test) +test_already_sorted :: proc(t: ^testing.T) { + original := []f64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 5 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Already sorted input failed: %s", reason) + + for i := 0; i < k; i += 1 { + testing.expectf(t, data[i] == f64(i + 1), "Expected %v at position %d, got %v", f64(i + 1), i, data[i]) + } +} + +@(test) +test_reverse_sorted :: proc(t: ^testing.T) { + original := []f64{10, 9, 8, 7, 6, 5, 4, 3, 2, 1} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 5 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Reverse sorted input failed: %s", reason) +} + +@(test) +test_all_equal_elements :: proc(t: ^testing.T) { + original := []f64{7, 7, 7, 7, 7, 7, 7, 7} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 4 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "All equal elements failed: %s", reason) + + for i := 0; i < len(data); i += 1 { + testing.expect(t, data[i] == 7, "Element value changed unexpectedly") + } +} + +@(test) +test_two_distinct_values :: proc(t: ^testing.T) { + original := []f64{1, 0, 1, 0, 1, 0, 1, 0} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 4 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Two distinct values failed: %s", reason) + + for i := 0; i < k; i += 1 { + testing.expectf(t, data[i] == 0, "Expected 0 at position %d, got %v", i, data[i]) + } +} + +@(test) +test_many_duplicates :: proc(t: ^testing.T) { + original := []f64{3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 7 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Many duplicates failed: %s", reason) +} + +@(test) +test_negative_numbers :: proc(t: ^testing.T) { + original := []f64{-5, 3, -8, 0, 2, -1, 7, -3} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 4 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Negative numbers failed: %s", reason) + + expected := []f64{-8, -5, -3, -1} + for i := 0; i < k; i += 1 { + testing.expectf(t, data[i] == expected[i], "Expected %v at position %d, got %v", expected[i], i, data[i]) + } +} + +@(test) +test_mixed_positive_negative_zero :: proc(t: ^testing.T) { + original := []f64{0, -1, 1, 0, -2, 2, 0, -3, 3} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 5 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Mixed positive/negative/zero failed: %s", reason) +} + +//----- Worst-case pattern tests ---------------------------------- + +@(test) +test_pipe_organ_pattern :: proc(t: ^testing.T) { + original := []f64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 5 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Pipe organ pattern failed: %s", reason) +} + +@(test) +test_sawtooth_pattern :: proc(t: ^testing.T) { + original := []f64{10, 1, 9, 2, 8, 3, 7, 4, 6, 5} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 5 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Sawtooth pattern failed: %s", reason) +} + +@(test) +test_median_of_three_killer :: proc(t: ^testing.T) { + original := make([]f64, 16) + defer delete(original) + + for i := 0; i < 8; i += 1 { + original[i] = f64(i + 1) + original[15 - i] = f64(i + 1) + } + + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 8 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Median-of-three killer failed: %s", reason) +} + +@(test) +test_all_same_except_one_small :: proc(t: ^testing.T) { + original := []f64{9, 9, 9, 9, 1, 9, 9, 9, 9, 9} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + testing.expect(t, data[0] == 1, "Single minimum should be first") + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "All same except one failed: %s", reason) +} + +@(test) +test_all_same_except_one_large :: proc(t: ^testing.T) { + original := []f64{1, 1, 1, 1, 9, 1, 1, 1, 1, 1} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "All same except one large failed: %s", reason) + + for i := 0; i < k; i += 1 { + testing.expect(t, data[i] == 1, "Prefix should contain only 1s") + } +} + +//----- Floating point specific tests ---------------------------------- + +@(test) +test_float_basic :: proc(t: ^testing.T) { + original := []f64{3.14, 2.71, 1.41, 1.73, 2.23, 0.57} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Float basic failed: %s", reason) +} + +@(test) +test_float_with_negative :: proc(t: ^testing.T) { + original := []f64{-1.5, 2.5, -3.5, 0.0, 1.0, -0.5} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Float with negative failed: %s", reason) +} + +@(test) +test_float_very_close_values :: proc(t: ^testing.T) { + original := []f64{1.0000001, 1.0000002, 1.0000000, 1.0000003, 1.0000004} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + testing.expect(t, is_prefix_sorted(data, k), "Prefix should be sorted") + testing.expect(t, elements_preserved(original, data), "Elements should be preserved") +} + +@(test) +test_float_subnormal :: proc(t: ^testing.T) { + // Test with subnormal (denormalized) floats + original := []f64{1e-310, 1e-320, 1e-300, 1e-315, 0.0} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + testing.expect(t, is_prefix_sorted(data, k), "Prefix should be sorted with subnormals") + testing.expect(t, elements_preserved(original, data), "Elements should be preserved") +} + +@(test) +test_float_infinity :: proc(t: ^testing.T) { + inf := math.inf_f64(1) + neg_inf := math.inf_f64(-1) + original := []f64{inf, 0, neg_inf, 1, -1, inf} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + testing.expect(t, data[0] == neg_inf, "Negative infinity should be first") + testing.expect(t, is_prefix_sorted(data, k), "Prefix should be sorted") +} + +@(test) +test_float_negative_zero :: proc(t: ^testing.T) { + neg_zero := transmute(f64)u64(1 << 63) + pos_zero := f64(0.0) + original := []f64{1.0, neg_zero, -1.0, pos_zero, 0.5} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + testing.expect(t, is_prefix_sorted(data, k), "Prefix should be sorted") + testing.expect(t, elements_preserved(original, data), "Elements should be preserved") +} + +@(test) +test_float_nan_positive :: proc(t: ^testing.T) { + nan := math.nan_f64() + original := []f64{3.0, nan, 1.0, 5.0, 2.0, nan} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + // NaNs should be at the end, not in the prefix + for i := 0; i < k; i += 1 { + testing.expectf(t, !math.is_nan(data[i]), "NaN should not be in prefix at position %d", i) + } + // The k smallest non-NaN values should be in the prefix + testing.expect(t, data[0] == 1.0, "First should be 1.0") + testing.expect(t, data[1] == 2.0, "Second should be 2.0") + testing.expect(t, data[2] == 3.0, "Third should be 3.0") +} + +@(test) +test_float_nan_negative :: proc(t: ^testing.T) { + // Create a negative NaN (sign bit set) + neg_nan := transmute(f64)(u64(0xFFF8_0000_0000_0000)) + pos_nan := math.nan_f64() + original := []f64{3.0, neg_nan, 1.0, pos_nan, -5.0, 2.0} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + // Neither positive nor negative NaNs should be in the prefix + for i := 0; i < k; i += 1 { + testing.expectf(t, !math.is_nan(data[i]), "NaN should not be in prefix at position %d", i) + } + // The k smallest non-NaN values should be in the prefix: -5, 1, 2 + testing.expect(t, data[0] == -5.0, "First should be -5.0") + testing.expect(t, data[1] == 1.0, "Second should be 1.0") + testing.expect(t, data[2] == 2.0, "Third should be 2.0") +} + +@(test) +test_float_all_nan :: proc(t: ^testing.T) { + nan := math.nan_f64() + neg_nan := transmute(f64)(u64(0xFFF8_0000_0000_0000)) + original := []f64{nan, neg_nan, nan, neg_nan} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 2 + + partial_sort_float(data, k) + + // All elements are NaN, so prefix will contain NaNs + // Just verify no crash and elements preserved + for i := 0; i < len(data); i += 1 { + testing.expect(t, math.is_nan(data[i]), "All elements should still be NaN") + } +} + +@(test) +test_float_nan_with_infinity :: proc(t: ^testing.T) { + nan := math.nan_f64() + inf := math.inf_f64(1) + neg_inf := math.inf_f64(-1) + original := []f64{inf, nan, neg_inf, 0.0, nan, 1.0} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + // NaNs should sort after +infinity, so prefix should have: neg_inf, 0, 1 + for i := 0; i < k; i += 1 { + testing.expectf(t, !math.is_nan(data[i]), "NaN should not be in prefix at position %d", i) + } + testing.expect(t, data[0] == neg_inf, "First should be -inf") + testing.expect(t, data[1] == 0.0, "Second should be 0.0") + testing.expect(t, data[2] == 1.0, "Third should be 1.0") +} + +//----- f32 tests ---------------------------------- + +@(test) +test_f32_basic :: proc(t: ^testing.T) { + original := []f32{3.14, 2.71, 1.41, 1.73, 2.23, 0.57} + data := make([]f32, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "f32 basic failed: %s", reason) +} + +@(test) +test_f32_with_negative :: proc(t: ^testing.T) { + original := []f32{-1.5, 2.5, -3.5, 0.0, 1.0, -0.5} + data := make([]f32, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "f32 with negative failed: %s", reason) +} + +@(test) +test_f32_large_array :: proc(t: ^testing.T) { + size := 1000 + original := make([]f32, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f32(rand.int31() % 10000) - 5000 + } + + data := make([]f32, size) + defer delete(data) + copy(data, original) + k := 50 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "f32 large array failed: %s", reason) +} + +//----- f16 tests ---------------------------------- + +@(test) +test_f16_basic :: proc(t: ^testing.T) { + original := []f16{3.14, 2.71, 1.41, 1.73, 2.23, 0.57} + data := make([]f16, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "f16 basic failed: %s", reason) +} + +@(test) +test_f16_with_negative :: proc(t: ^testing.T) { + original := []f16{-1.5, 2.5, -3.5, 0.0, 1.0, -0.5} + data := make([]f16, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "f16 with negative failed: %s", reason) +} + +@(test) +test_f32_nan :: proc(t: ^testing.T) { + nan := math.nan_f32() + neg_nan := transmute(f32)(u32(0xFFC0_0000)) + original := []f32{3.0, nan, 1.0, neg_nan, -5.0, 2.0} + data := make([]f32, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_float(data, k) + + // Neither positive nor negative NaNs should be in the prefix + for i := 0; i < k; i += 1 { + testing.expectf(t, !math.is_nan(data[i]), "NaN should not be in prefix at position %d", i) + } + // The k smallest non-NaN values should be in the prefix: -5, 1, 2 + testing.expect(t, data[0] == -5.0, "First should be -5.0") + testing.expect(t, data[1] == 1.0, "Second should be 1.0") + testing.expect(t, data[2] == 2.0, "Third should be 2.0") +} + +//----- Early termination / all-equal tests ---------------------------------- + +@(test) +test_all_equal_early_termination :: proc(t: ^testing.T) { + // This tests the early termination optimization when all elements are equal + size := 1000 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = 42.0 + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 100 + + partial_sort_float(data, k) + + // All elements should still be 42.0 + for i := 0; i < size; i += 1 { + testing.expectf(t, data[i] == 42.0, "Element at %d changed from 42.0 to %v", i, data[i]) + } +} + +@(test) +test_mostly_equal_with_outliers :: proc(t: ^testing.T) { + // Tests partition behavior when most elements are equal + size := 100 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = 5.0 + } + // Add a few outliers + original[10] = 1.0 + original[50] = 2.0 + original[90] = 3.0 + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 5 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Mostly equal with outliers failed: %s", reason) + + // First 3 should be the outliers + testing.expect(t, data[0] == 1.0, "First should be 1.0") + testing.expect(t, data[1] == 2.0, "Second should be 2.0") + testing.expect(t, data[2] == 3.0, "Third should be 3.0") +} + +//----- Boundary K value tests ---------------------------------- + +@(test) +test_k_equals_length_minus_one :: proc(t: ^testing.T) { + original := []f64{5, 1, 9, 3, 7, 2, 8, 4, 6} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := len(data) - 1 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "k=n-1 failed: %s", reason) +} + +@(test) +test_k_half_length :: proc(t: ^testing.T) { + original := []f64{10, 2, 8, 4, 6, 5, 7, 3, 9, 1} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := len(data) / 2 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "k=n/2 failed: %s", reason) +} + +//----- Randomized stress tests ---------------------------------- + +@(test) +test_random_small_arrays :: proc(t: ^testing.T) { + for size := 2; size <= 20; size += 1 { + for k := 1; k <= size; k += 1 { + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31() % 100) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Random small (size=%d, k=%d) failed: %s", size, k, reason) + } + } +} + +@(test) +test_random_medium_array :: proc(t: ^testing.T) { + size := 1000 + + for trial := 0; trial < 10; trial += 1 { + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31()) + } + + for _, k in ([]int{1, 10, 50, 100, 500, 999, 1000}) { + data := make([]f64, size) + defer delete(data) + copy(data, original) + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Random medium (trial=%d, k=%d) failed: %s", trial, k, reason) + } + } +} + +@(test) +test_random_large_array :: proc(t: ^testing.T) { + size := 10000 + + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31()) + } + + for _, k in ([]int{1, 10, 100, 1000, 5000, 9999, 10000}) { + data := make([]f64, size) + defer delete(data) + copy(data, original) + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Random large (k=%d) failed: %s", k, reason) + } +} + +//----- Regression / specific bug pattern tests ---------------------------------- + +@(test) +test_three_elements_all_permutations :: proc(t: ^testing.T) { + perms := [][3]f64{{1, 2, 3}, {1, 3, 2}, {2, 1, 3}, {2, 3, 1}, {3, 1, 2}, {3, 2, 1}} + + for perm in perms { + for k := 1; k <= 3; k += 1 { + data := []f64{perm[0], perm[1], perm[2]} + original := []f64{perm[0], perm[1], perm[2]} + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Permutation %v with k=%d failed: %s", perm, k, reason) + } + } +} + +@(test) +test_duplicate_at_kth_position :: proc(t: ^testing.T) { + original := []f64{1, 3, 3, 3, 5, 6, 7} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 4 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Duplicate at kth position failed: %s", reason) + + testing.expect(t, data[0] == 1, "First element should be 1") + count_threes := 0 + for i := 1; i < k; i += 1 { + if data[i] == 3 do count_threes += 1 + } + testing.expect(t, count_threes == 3, "Should have exactly 3 threes in prefix") +} + +//----- Special sequence tests ---------------------------------- + +@(test) +test_fibonacci_sequence :: proc(t: ^testing.T) { + original := []f64{1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 6 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Fibonacci sequence failed: %s", reason) +} + +@(test) +test_powers_of_two_reversed :: proc(t: ^testing.T) { + original := []f64{256, 128, 64, 32, 16, 8, 4, 2, 1} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 5 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Powers of two reversed failed: %s", reason) + + expected := []f64{1, 2, 4, 8, 16} + for i := 0; i < k; i += 1 { + testing.expectf(t, data[i] == expected[i], "Expected %v at position %d, got %v", expected[i], i, data[i]) + } +} + +@(test) +test_arithmetic_sequence :: proc(t: ^testing.T) { + original := make([]f64, 20) + defer delete(original) + for i := 0; i < 20; i += 1 { + original[i] = f64(100 - i * 5) + } + + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 10 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Arithmetic sequence failed: %s", reason) +} + +//----- Float boundary tests ---------------------------------- + +@(test) +test_max_min_float :: proc(t: ^testing.T) { + original := []f64{max(f64), -max(f64), 0, max(f64), -max(f64), 1, -1} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 4 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Max/min float failed: %s", reason) +} + +//----- Repeated partial sort tests ---------------------------------- + +@(test) +test_idempotent :: proc(t: ^testing.T) { + original := []f64{9, 1, 8, 2, 7, 3, 6, 4, 5} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 5 + + partial_sort_float(data, k) + first_result := make([]f64, len(data)) + defer delete(first_result) + copy(first_result, data) + + partial_sort_float(data, k) + + for i := 0; i < k; i += 1 { + testing.expectf(t, data[i] == first_result[i], "Partial sort not idempotent at position %d", i) + } +} + +@(test) +test_increasing_k :: proc(t: ^testing.T) { + original := []f64{10, 2, 8, 4, 6, 1, 9, 3, 7, 5} + + for k := 1; k <= len(original); k += 1 { + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Increasing k=%d failed: %s", k, reason) + } +} + +//----- Key extraction tests (partial_sort_by_fkey) ---------------------------------- + +@(test) +test_struct_field_f64_key :: proc(t: ^testing.T) { + Item :: struct { + priority: f64, + name: string, + } + + original := []Item{{5.0, "e"}, {2.0, "b"}, {8.0, "h"}, {1.0, "a"}, {9.0, "i"}, {3.0, "c"}} + data := make([]Item, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_by_fkey(data, k, proc(x: Item) -> f64 { + return x.priority + }) + + testing.expect(t, data[0].priority == 1, "First should have priority 1") + testing.expect(t, data[1].priority == 2, "Second should have priority 2") + testing.expect(t, data[2].priority == 3, "Third should have priority 3") + + for i := 0; i < k - 1; i += 1 { + testing.expect(t, data[i].priority <= data[i + 1].priority, "Prefix should be sorted by priority") + } +} + +@(test) +test_struct_field_f32_key :: proc(t: ^testing.T) { + Item :: struct { + score: f32, + id: int, + } + + original := []Item{{5.5, 0}, {2.2, 1}, {8.8, 2}, {1.1, 3}, {9.9, 4}, {3.3, 5}} + data := make([]Item, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_by_fkey(data, k, proc(x: Item) -> f32 { + return x.score + }) + + testing.expect(t, data[0].score == 1.1, "First should have score 1.1") + testing.expect(t, data[1].score == 2.2, "Second should have score 2.2") + testing.expect(t, data[2].score == 3.3, "Third should have score 3.3") +} + +@(test) +test_negative_key :: proc(t: ^testing.T) { + // Sort by negative value (effectively descending order) + original := []f64{1, 5, 2, 8, 3, 9, 4} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 3 + + partial_sort_by_fkey(data, k, proc(x: f64) -> f64 { + return -x + }) + + // Should get largest 3 values in descending order + testing.expect(t, data[0] == 9, "First should be 9") + testing.expect(t, data[1] == 8, "Second should be 8") + testing.expect(t, data[2] == 5, "Third should be 5") +} + +@(test) +test_absolute_value_key :: proc(t: ^testing.T) { + original := []f64{-5, 3, -1, 4, -2, 0, 2, -3} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 4 + + partial_sort_by_fkey(data, k, proc(x: f64) -> f64 { + return x if x >= 0 else -x + }) + + abs_values := make([]f64, k) + defer delete(abs_values) + for i := 0; i < k; i += 1 { + abs_values[i] = data[i] if data[i] >= 0 else -data[i] + } + + testing.expect(t, abs_values[0] == 0, "First should be 0") + testing.expect(t, abs_values[1] == 1, "Second should have abs value 1") + testing.expect(t, abs_values[2] == 2, "Third should have abs value 2") + testing.expect(t, abs_values[3] == 2, "Fourth should have abs value 2") + + for i := 0; i < k - 1; i += 1 { + testing.expect(t, abs_values[i] <= abs_values[i + 1], "Prefix should be sorted by absolute value") + } +} + +@(test) +test_squared_key :: proc(t: ^testing.T) { + // Sort by squared value + original := []f64{-3, 1, -2, 0, 2, -1, 3} + data := make([]f64, len(original)) + defer delete(data) + copy(data, original) + k := 4 + + partial_sort_by_fkey(data, k, proc(x: f64) -> f64 { + return x * x + }) + + // The 4 smallest squares are 0, 1, 1, 4 (from 0, 1, -1, 2 or -2) + squared := make([]f64, k) + defer delete(squared) + for i := 0; i < k; i += 1 { + squared[i] = data[i] * data[i] + } + + testing.expect(t, squared[0] == 0, "First squared should be 0") + testing.expect(t, squared[1] == 1, "Second squared should be 1") + testing.expect(t, squared[2] == 1, "Third squared should be 1") + testing.expect(t, squared[3] == 4, "Fourth squared should be 4") +} + +//----- Pre-computed sort path tests (large k) ---------------------------------- + +@(test) +test_precompute_sort_path :: proc(t: ^testing.T) { + // This test specifically exercises the pre-computed keys sort path + // k >= PRECOMPUTE_SORT_THRESHOLD (256) + size := 1000 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31() % 10000) - 5000 + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 300 // >= 256, triggers SIMD path + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Precompute sort path failed: %s", reason) +} + +@(test) +test_precompute_sort_f32 :: proc(t: ^testing.T) { + size := 500 + original := make([]f32, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f32(rand.int31() % 10000) - 5000 + } + + data := make([]f32, size) + defer delete(data) + copy(data, original) + k := 300 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Precompute sort f32 failed: %s", reason) +} + +@(test) +test_precompute_sort_with_nan :: proc(t: ^testing.T) { + size := 500 + nan := math.nan_f64() + neg_nan := transmute(f64)(u64(0xFFF8_0000_0000_0000)) + + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31() % 10000) - 5000 + } + // Sprinkle in some NaNs + original[10] = nan + original[100] = neg_nan + original[200] = nan + original[300] = neg_nan + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 300 + + partial_sort_float(data, k) + + // Verify no NaNs in prefix (they should sort to end) + nan_in_prefix := false + for i := 0; i < k; i += 1 { + if math.is_nan(data[i]) do nan_in_prefix = true + } + testing.expect(t, !nan_in_prefix, "NaNs should not be in prefix with precompute sort") +} + +@(test) +test_precompute_sort_all_negative :: proc(t: ^testing.T) { + size := 500 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = -f64(rand.int31() % 10000) - 1 + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 300 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Precompute sort all negative failed: %s", reason) +} + +//----- Heap selection path tests (small k, large n) ---------------------------------- + +@(test) +test_heap_select_path :: proc(t: ^testing.T) { + // This test specifically exercises the heap selection path + // k < HEAP_SELECT_K_THRESHOLD (64) and n > HEAP_SELECT_N_THRESHOLD (131072) + size := 150000 // > 131072 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31()) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 10 // < 64, triggers heap path + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Heap select path failed: %s", reason) +} + +@(test) +test_heap_select_with_key :: proc(t: ^testing.T) { + Item :: struct { + value: f64, + id: int, + } + + size := 150000 + original := make([]Item, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = Item{f64(rand.int31()), i} + } + + data := make([]Item, size) + defer delete(data) + copy(data, original) + k := 20 + + partial_sort_by_fkey(data, k, proc(x: Item) -> f64 { + return x.value + }) + + // Verify prefix is sorted by value + for i := 0; i < k - 1; i += 1 { + testing.expectf(t, data[i].value <= data[i + 1].value, "Heap select prefix not sorted at %d", i) + } +} + +@(test) +test_heap_select_negative_values :: proc(t: ^testing.T) { + size := 150000 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31()) - f64(max(i32) / 2) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 32 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Heap select with negatives failed: %s", reason) +} + +//----- Skip empty passes tests ---------------------------------- + +@(test) +test_skip_empty_passes_uniform :: proc(t: ^testing.T) { + // All elements have same lower bits, should skip some radix passes + size := 100 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + // Values that differ only in higher bits + original[i] = f64(i) * 1000.0 + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 20 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Skip empty passes uniform failed: %s", reason) +} + +@(test) +test_skip_empty_passes_integers :: proc(t: ^testing.T) { + // Integer values as floats - lower mantissa bits are zero + size := 100 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(size - i) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 30 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Skip empty passes integers failed: %s", reason) + + // Verify exact order + for i := 0; i < k; i += 1 { + testing.expectf(t, data[i] == f64(i + 1), "Expected %v at %d, got %v", f64(i + 1), i, data[i]) + } +} + +//----- Key extraction tests (partial_sort_by_fkey) ---------------------------------- + +@(test) +test_large_struct_array_by_key :: proc(t: ^testing.T) { + Record :: struct { + data: [10]int, + score: f64, + } + + size := 500 + original := make([]Record, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i].score = f64(rand.int31() % 10000) + } + + data := make([]Record, size) + defer delete(data) + copy(data, original) + k := 20 + + partial_sort_by_fkey(data, k, proc(x: Record) -> f64 { + return x.score + }) + + // Verify prefix is sorted by score + for i := 0; i < k - 1; i += 1 { + testing.expectf( + t, + data[i].score <= data[i + 1].score, + "Prefix not sorted at position %d: %v > %v", + i, + data[i].score, + data[i + 1].score, + ) + } + + // Verify partition property + if k < size { + max_in_prefix := data[0].score + for i := 1; i < k; i += 1 { + if data[i].score > max_in_prefix do max_in_prefix = data[i].score + } + for i := k; i < size; i += 1 { + testing.expectf(t, data[i].score >= max_in_prefix, "Partition property violated at %d", i) + } + } +} + +//----- var_bits ctz skip tests ---------------------------------- + +@(test) +test_var_bits_low_bits_identical :: proc(t: ^testing.T) { + // Keys that differ only in high bits - should skip low passes via ctz(var_bits) + size := 200 + original := make([]f64, size) + defer delete(original) + // Values 256, 512, 768, ... differ only in bits 8+ (low byte is 0) + for i := 0; i < size; i += 1 { + original[i] = f64((i + 1) * 256) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 50 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "var_bits low bits identical failed: %s", reason) +} + +@(test) +test_var_bits_high_bits_identical :: proc(t: ^testing.T) { + // Keys that differ only in low bits - should limit high passes via clz(var_bits) + size := 200 + original := make([]f64, size) + defer delete(original) + // Small positive values that only differ in low bits + for i := 0; i < size; i += 1 { + original[i] = f64(i % 256) + 1000.0 + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 50 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "var_bits high bits identical failed: %s", reason) +} + +@(test) +test_var_bits_single_differing_bit :: proc(t: ^testing.T) { + // Only one bit differs across all elements + size := 100 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + if i % 2 == 0 { + original[i] = 1000.0 + } else { + original[i] = 1001.0 + } + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 50 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "var_bits single differing bit failed: %s", reason) + + // First 50 should all be 1000.0 + for i := 0; i < k; i += 1 { + testing.expectf(t, data[i] == 1000.0, "Expected 1000.0 at %d, got %v", i, data[i]) + } +} + +//----- MSD ping-pong tests ---------------------------------- + +@(test) +test_msd_multiple_levels :: proc(t: ^testing.T) { + // Force multiple MSD levels by having widely distributed values + size := 1000 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31()) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 100 // > 32 (heap threshold), 2*100 < 1000 (triggers MSD path) + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "MSD multiple levels failed: %s", reason) +} + +@(test) +test_msd_narrow_range :: proc(t: ^testing.T) { + // Values in narrow range - MSD should terminate quickly + size := 500 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31() % 1000) + 50000.0 + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 100 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "MSD narrow range failed: %s", reason) +} + +@(test) +test_msd_to_insertion_sort :: proc(t: ^testing.T) { + // Force MSD to fall through to insertion sort on small range + size := 200 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31() % 100) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 50 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "MSD to insertion sort failed: %s", reason) +} + +//----- 11-bit radix threshold tests ---------------------------------- + +@(test) +test_11bit_threshold_below :: proc(t: ^testing.T) { + // n = 8000 < 8192, should use 8-bit radix + size := 8000 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31()) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := size // Full sort to exercise LSD path + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "11-bit threshold below failed: %s", reason) +} + +@(test) +test_11bit_threshold_at :: proc(t: ^testing.T) { + // n = 8192, should use 11-bit radix for f64 + size := 8192 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31()) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := size + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "11-bit threshold at failed: %s", reason) +} + +@(test) +test_11bit_threshold_above :: proc(t: ^testing.T) { + // n = 10000 > 8192, should use 11-bit radix for f64 + size := 10000 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31()) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := size + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "11-bit threshold above failed: %s", reason) +} + +//----- Streaming heap edge cases ---------------------------------- + +@(test) +test_streaming_heap_k_at_threshold :: proc(t: ^testing.T) { + // k = 32 exactly at HEAP_SELECT_K_THRESHOLD + size := 10000 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31()) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 32 // Exactly at threshold + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Streaming heap k at threshold failed: %s", reason) +} + +@(test) +test_streaming_heap_k_above_threshold :: proc(t: ^testing.T) { + // k = 33 just above HEAP_SELECT_K_THRESHOLD, should use MSD path + size := 10000 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(rand.int31()) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 33 // Just above threshold + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Streaming heap k above threshold failed: %s", reason) +} + +@(test) +test_streaming_heap_all_same_then_one_smaller :: proc(t: ^testing.T) { + // Test heap replacement logic: all same values then one smaller + size := 10000 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = 100.0 + } + original[size - 1] = 1.0 // One smaller at the end + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 10 + + partial_sort_float(data, k) + + testing.expect(t, data[0] == 1.0, "Smallest element should be first") + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Streaming heap one smaller failed: %s", reason) +} + +@(test) +test_streaming_heap_descending :: proc(t: ^testing.T) { + // Worst case for heap: descending order means every element replaces heap root + size := 10000 + original := make([]f64, size) + defer delete(original) + for i := 0; i < size; i += 1 { + original[i] = f64(size - i) + } + + data := make([]f64, size) + defer delete(data) + copy(data, original) + k := 20 + + partial_sort_float(data, k) + + ok, reason := validate_partial_sort(original, data, k) + testing.expectf(t, ok, "Streaming heap descending failed: %s", reason) + + // Verify exact values + for i := 0; i < k; i += 1 { + testing.expectf(t, data[i] == f64(i + 1), "Expected %d at position %d, got %v", i + 1, i, data[i]) + } +} diff --git a/levsync/levsync.odin b/levsync/levsync.odin new file mode 100644 index 0000000..30e441b --- /dev/null +++ b/levsync/levsync.odin @@ -0,0 +1,391 @@ +package levsync + +import "base:intrinsics" + +@(private) +Flop :: enum { + Add, + Subtract, + Multiply, + Divide, +} + +// Returns the value at `dst` that was atomically replaced by the result of the operation. +@(private) +atomic_float_op_cas :: #force_inline proc "contextless" ( + dst: ^$FLOAT, + val: FLOAT, + $OP: Flop, + $ORDER: intrinsics.Atomic_Memory_Order, +) -> FLOAT where intrinsics.type_is_float(FLOAT) { + when FLOAT == f16 { + dst_i := cast(^u16)(dst) + } else when FLOAT == f32 { + dst_i := cast(^u32)(dst) + } else when FLOAT == f64 { + dst_i := cast(^u64)(dst) + } else { + #panic("atomic_float_op only supports f16, f32, and f64") + } + + for { + old_f := intrinsics.atomic_load_explicit(dst, .Relaxed) + when OP == .Add { + new_f := old_f + val + } else when OP == .Subtract { + new_f := old_f - val + } else when OP == .Multiply { + new_f := old_f * val + } else when OP == .Divide { + new_f := old_f / val + } else { + #panic("Flop support not yet added for operation. This should never happen.") + } + + when FLOAT == f16 { + old_i := transmute(u16)old_f + new_i := transmute(u16)new_f + } else when FLOAT == f32 { + old_i := transmute(u32)old_f + new_i := transmute(u32)new_f + } else when FLOAT == f64 { + old_i := transmute(u64)old_f + new_i := transmute(u64)new_f + } + + // Setting order of compare exchange success alone guarentees overall order of the flop. + _, ok := intrinsics.atomic_compare_exchange_weak_explicit(dst_i, old_i, new_i, ORDER, .Relaxed) + + if ok do return old_f + } +} + +// Returns the value at `dst` that was atomically replaced by the result of the operation. +atomic_add_float :: #force_inline proc "contextless" ( + dst: ^$FLOAT, + val: FLOAT, + $ORDER: intrinsics.Atomic_Memory_Order, +) -> FLOAT where intrinsics.type_is_float(FLOAT) { + return atomic_float_op_cas(dst, val, .Add, ORDER) +} + +// Returns the value at `dst` that was atomically replaced by the result of the operation. +atomic_sub_float :: #force_inline proc "contextless" ( + dst: ^$FLOAT, + val: FLOAT, + $ORDER: intrinsics.Atomic_Memory_Order, +) -> FLOAT where intrinsics.type_is_float(FLOAT) { + return atomic_float_op_cas(dst, val, .Subtract, ORDER) +} + +// Returns the value at `dst` that was atomically replaced by the result of the operation. +atomic_mul_float :: #force_inline proc "contextless" ( + dst: ^$FLOAT, + val: FLOAT, + $ORDER: intrinsics.Atomic_Memory_Order, +) -> FLOAT where intrinsics.type_is_float(FLOAT) { + return atomic_float_op_cas(dst, val, .Multiply, ORDER) +} + +// Returns the value at `dst` that was atomically replaced by the result of the operation. +atomic_div_float :: #force_inline proc "contextless" ( + dst: ^$FLOAT, + val: FLOAT, + $ORDER: intrinsics.Atomic_Memory_Order, +) -> FLOAT where intrinsics.type_is_float(FLOAT) { + return atomic_float_op_cas(dst, val, .Divide, ORDER) +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Tests ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +import "core:sync" +import "core:testing" +import "core:thread" + +@(test) +test_concurrent_atomic_add_no_lost_updates :: proc(t: ^testing.T) { + // Multiple threads will each add 1.0 this many times. + // If any updates are lost due to race conditions, the final sum will be wrong. + NUM_THREADS :: 8 + ITERATIONS_PER_THREAD :: 10_000 + + shared_value: f64 = 0.0 + barrier: sync.Barrier + sync.barrier_init(&barrier, NUM_THREADS) + + Thread_Data :: struct { + value: ^f64, + barrier: ^sync.Barrier, + } + + thread_proc :: proc(th: ^thread.Thread) { + ctx := cast(^Thread_Data)th.data + // Wait for all threads to be ready before starting + sync.barrier_wait(ctx.barrier) + + for _ in 0 ..< ITERATIONS_PER_THREAD { + atomic_add_float(ctx.value, 1.0, .Relaxed) + } + } + + thread_data := Thread_Data{&shared_value, &barrier} + + threads: [NUM_THREADS]^thread.Thread + for &th in threads { + th = thread.create(thread_proc) + th.data = &thread_data + } + for th in threads { + thread.start(th) + } + for th in threads { + thread.join(th) + thread.destroy(th) + } + + expected := f64(NUM_THREADS * ITERATIONS_PER_THREAD) + testing.expect_value(t, shared_value, expected) +} + +@(test) +test_concurrent_atomic_sub_no_lost_updates :: proc(t: ^testing.T) { + // Start with a known value, multiple threads subtract. + // If any updates are lost due to race conditions, the final result will be wrong. + NUM_THREADS :: 8 + ITERATIONS_PER_THREAD :: 10_000 + + shared_value: f64 = f64(NUM_THREADS * ITERATIONS_PER_THREAD) + barrier: sync.Barrier + sync.barrier_init(&barrier, NUM_THREADS) + + Thread_Data :: struct { + value: ^f64, + barrier: ^sync.Barrier, + } + + thread_proc :: proc(th: ^thread.Thread) { + ctx := cast(^Thread_Data)th.data + // Wait for all threads to be ready before starting + sync.barrier_wait(ctx.barrier) + + for _ in 0 ..< ITERATIONS_PER_THREAD { + atomic_sub_float(ctx.value, 1.0, .Relaxed) + } + } + + thread_data := Thread_Data{&shared_value, &barrier} + + threads: [NUM_THREADS]^thread.Thread + for &th in threads { + th = thread.create(thread_proc) + th.data = &thread_data + } + for th in threads { + thread.start(th) + } + for th in threads { + thread.join(th) + thread.destroy(th) + } + + testing.expect_value(t, shared_value, 0.0) +} + +@(test) +test_concurrent_atomic_mul_div_round_trip :: proc(t: ^testing.T) { + // Each thread multiplies by 2.0 then divides by 2.0. + // Since these are inverses, the final value should equal the starting value + // regardless of how operations interleave. + NUM_THREADS :: 8 + ITERATIONS_PER_THREAD :: 10_000 + + shared_value: f64 = 1000.0 // Start with a value that won't underflow/overflow + barrier: sync.Barrier + sync.barrier_init(&barrier, NUM_THREADS) + + Thread_Data :: struct { + value: ^f64, + barrier: ^sync.Barrier, + } + + thread_proc :: proc(th: ^thread.Thread) { + ctx := cast(^Thread_Data)th.data + // Wait for all threads to be ready before starting + sync.barrier_wait(ctx.barrier) + + for _ in 0 ..< ITERATIONS_PER_THREAD { + atomic_mul_float(ctx.value, 2.0, .Relaxed) + atomic_div_float(ctx.value, 2.0, .Relaxed) + } + } + + thread_data := Thread_Data{&shared_value, &barrier} + + threads: [NUM_THREADS]^thread.Thread + for &th in threads { + th = thread.create(thread_proc) + th.data = &thread_data + } + for th in threads { + thread.start(th) + } + for th in threads { + thread.join(th) + thread.destroy(th) + } + + testing.expect_value(t, shared_value, 1000.0) +} + +@(test) +test_atomic_add_with_f32 :: proc(t: ^testing.T) { + // Verify the f32 type dispatch works correctly under contention. + // Same approach as the f64 add test but with f32. + NUM_THREADS :: 8 + ITERATIONS_PER_THREAD :: 10_000 + + shared_value: f32 = 0.0 + barrier: sync.Barrier + sync.barrier_init(&barrier, NUM_THREADS) + + Thread_Data :: struct { + value: ^f32, + barrier: ^sync.Barrier, + } + + thread_proc :: proc(th: ^thread.Thread) { + ctx := cast(^Thread_Data)th.data + // Wait for all threads to be ready before starting + sync.barrier_wait(ctx.barrier) + + for _ in 0 ..< ITERATIONS_PER_THREAD { + atomic_add_float(ctx.value, 1.0, .Relaxed) + } + } + + thread_data := Thread_Data{&shared_value, &barrier} + + threads: [NUM_THREADS]^thread.Thread + for &th in threads { + th = thread.create(thread_proc) + th.data = &thread_data + } + for th in threads { + thread.start(th) + } + for th in threads { + thread.join(th) + thread.destroy(th) + } + + expected := f32(NUM_THREADS * ITERATIONS_PER_THREAD) + testing.expect_value(t, shared_value, expected) +} + +@(test) +test_atomic_release_acquire_publish_visibility :: proc(t: ^testing.T) { + // Tests that the memory order passed to atomic_float_op's CAS success condition + // provides full ordering guarantees for the entire float operation. + // + // Both sides use atomic_add_float (not raw intrinsics) to verify: + // - Release on CAS success publishes prior non-atomic writes + // - Acquire on CAS success makes those writes visible to the reader + // + // NOTE: This test may pass even with Relaxed ordering on x86 due to its strong memory model. + // On ARM or other weak-memory architectures, using Relaxed here would likely cause failures. + NUM_READERS :: 4 + + Shared_State :: struct { + flag: f64, + // Padding to avoid false sharing between flag and data + _padding: [64]u8, + published_data: [4]int, + } + + shared: Shared_State + barrier: sync.Barrier + sync.barrier_init(&barrier, NUM_READERS + 1) // +1 for writer + + Reader_Data :: struct { + shared: ^Shared_State, + barrier: ^sync.Barrier, + saw_data: bool, + data_valid: bool, + } + + Writer_Data :: struct { + shared: ^Shared_State, + barrier: ^sync.Barrier, + } + + writer_proc :: proc(th: ^thread.Thread) { + ctx := cast(^Writer_Data)th.data + sync.barrier_wait(ctx.barrier) + + // Write data that readers will verify + ctx.shared.published_data[0] = 42 + ctx.shared.published_data[1] = 43 + ctx.shared.published_data[2] = 44 + ctx.shared.published_data[3] = 45 + + // Release via the float op: CAS success ordering must publish all writes above + atomic_add_float(&ctx.shared.flag, 1.0, .Release) + } + + reader_proc :: proc(th: ^thread.Thread) { + ctx := cast(^Reader_Data)th.data + sync.barrier_wait(ctx.barrier) + + // Spin using the float op with Acquire ordering. + // Adding 0.0 is a no-op on the value but exercises the full CAS loop. + // When the CAS succeeds with Acquire, all writes before the writer's Release must be visible. + for { + old := atomic_add_float(&ctx.shared.flag, 0.0, .Acquire) + if old > 0.0 do break + intrinsics.cpu_relax() + } + + // If the CAS success ordering provides full guarantees, we MUST see all published data + ctx.saw_data = true + d0 := ctx.shared.published_data[0] + d1 := ctx.shared.published_data[1] + d2 := ctx.shared.published_data[2] + d3 := ctx.shared.published_data[3] + + ctx.data_valid = (d0 == 42 && d1 == 43 && d2 == 44 && d3 == 45) + } + + writer_data := Writer_Data{&shared, &barrier} + reader_data: [NUM_READERS]Reader_Data + for &rd in reader_data { + rd = Reader_Data{&shared, &barrier, false, false} + } + + writer_thread := thread.create(writer_proc) + writer_thread.data = &writer_data + + reader_threads: [NUM_READERS]^thread.Thread + for &th, i in reader_threads { + th = thread.create(reader_proc) + th.data = &reader_data[i] + } + + thread.start(writer_thread) + for th in reader_threads { + thread.start(th) + } + + thread.join(writer_thread) + thread.destroy(writer_thread) + for th in reader_threads { + thread.join(th) + thread.destroy(th) + } + + // Verify all readers saw the data correctly + for rd, i in reader_data { + testing.expectf(t, rd.saw_data, "Reader %d didn't observe the flag", i) + testing.expectf(t, rd.data_valid, "Reader %d saw flag but data was not visible (memory ordering bug)", i) + } +} diff --git a/many_bits/many_bits.odin b/many_bits/many_bits.odin new file mode 100644 index 0000000..0ad1e0d --- /dev/null +++ b/many_bits/many_bits.odin @@ -0,0 +1,986 @@ +package many_bits + +import "base:builtin" +import "base:intrinsics" +import "core:fmt" +import "core:slice" + +@(private) +ODIN_BOUNDS_CHECK :: !ODIN_NO_BOUNDS_CHECK +// Number of bits in system uint +UINT_NUM_BITS :: size_of(uint) * 8 +UINT_MAX: uint : 1 << UINT_NUM_BITS - 1 +// Power to which 2 is raised to get the size of uint in bits +// For bitshift division which gives index of integer in int_bits_array +INDEX_SHIFT :: uint(intrinsics.count_trailing_zeros(UINT_NUM_BITS)) +// Value to & overall index by to get bit position +BIT_POS_MASK :: UINT_NUM_BITS - 1 + +Int_Bits :: bit_set[0 ..< UINT_NUM_BITS;uint] + +// Use `core:container.Bit_Array` if dynamic length is needed. +// This has a more specific purpose. +Bits :: struct { + int_array: []Int_Bits, + length: int, // Total number of bits being stored +} + +delete :: proc(using bits: Bits, allocator := context.allocator) { + delete_slice(int_array, allocator) +} + +make :: proc(#any_int length: int, allocator := context.allocator) -> Bits { + return Bits { + int_array = make_slice([]Int_Bits, ((length - 1) >> INDEX_SHIFT) + 1, allocator), + length = length, + } +} + +// Sets all bits to 0 (false) +zero :: #force_inline proc(bits: Bits) { + slice.zero(bits.int_array) +} + +set :: #force_inline proc(bits: Bits, #any_int index: int, set_to: bool) { + when ODIN_BOUNDS_CHECK { + if index >= bits.length { + panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length)) + } + } + if set_to == true { + bits.int_array[index >> INDEX_SHIFT] += {index & BIT_POS_MASK} + } else { + bits.int_array[index >> INDEX_SHIFT] -= {index & BIT_POS_MASK} + } +} + +set_true :: #force_inline proc(bits: Bits, #any_int index: int) { + when ODIN_BOUNDS_CHECK { + if index >= bits.length { + panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length)) + } + } + bits.int_array[index >> INDEX_SHIFT] += {index & BIT_POS_MASK} +} + +set_one :: set_true + +set_false :: #force_inline proc(bits: Bits, #any_int index: int) { + when ODIN_BOUNDS_CHECK { + if index >= bits.length { + panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length)) + } + } + bits.int_array[index >> INDEX_SHIFT] -= {index & BIT_POS_MASK} +} + +set_zero :: set_false + +get :: #force_inline proc(bits: Bits, #any_int index: int) -> bool { + when ODIN_BOUNDS_CHECK { + if index >= bits.length { + panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length)) + } + } + return (index & BIT_POS_MASK) in bits.int_array[index >> INDEX_SHIFT] +} + +// Returns true if all bits in range [start, end) are set [start is inclusive, end is exclusive) +range_true :: proc(bits: Bits, #any_int start, end: int) -> bool { + when ODIN_BOUNDS_CHECK { + if start < 0 { + panic(fmt.tprintf("Start %i is negative.", start)) + } + if start > end { + panic(fmt.tprintf("Start %i is greater than end %i.", start, end)) + } + if end > bits.length { + panic(fmt.tprintf("End %i out of bounds for length %i.", end, bits.length)) + } + } + + // Empty range is vacuously true + if start == end do return true + + start_u := uint(start) + end_u := uint(end) + + start_word := start_u >> INDEX_SHIFT + end_word := (end_u - 1) >> INDEX_SHIFT + + start_bit := start_u & BIT_POS_MASK + end_bit := end_u & BIT_POS_MASK // end is exclusive; 0 means "to end of word" + + // Range is within a single word + if start_word == end_word { + word := transmute(uint)bits.int_array[start_word] + + low_mask: uint = (uint(1) << start_bit) - 1 + high_mask: uint = ((uint(1) << end_bit) - 1) | (UINT_MAX * uint(end_bit == 0)) + + mask := high_mask & ~low_mask + return word & mask == mask + } + + // Range spans multiple words + + // First word: [start_bit, UINT_NUM_BITS) + if start_bit != 0 { + first_word := transmute(uint)bits.int_array[start_word] + start_mask: uint = ~((uint(1) << start_bit) - 1) + if first_word & start_mask != start_mask { + return false + } + start_word += 1 + } + + // Last word: [0, end_bit) + // If end_bit == 0, we need the whole last word, so include it in the middle scan. + if end_bit != 0 { + last_word := transmute(uint)bits.int_array[end_word] + end_mask: uint = (uint(1) << end_bit) - 1 + if last_word & end_mask != end_mask { + return false + } + } else { + end_word += 1 + } + + // Middle words: all bits must be set + for i := start_word; i < end_word; i += 1 { + if transmute(uint)bits.int_array[i] != UINT_MAX { + return false + } + } + + return true +} + +range_ones :: range_true + +all_true :: proc(bits: Bits) -> bool { + // Empty bit array is vacuously true + if bits.length == 0 do return true + + bit_index := uint(bits.length - 1) + int_index := bit_index >> INDEX_SHIFT + // The last int needs special treatment because we only want to check part of it + last_bit_pos := bit_index & BIT_POS_MASK + last_bit_mask: uint = (1 << (last_bit_pos + 1)) - 1 + int_val := transmute(uint)bits.int_array[int_index] + if int_val & last_bit_mask != last_bit_mask { + return false + } + if int_index == 0 { // If there was only 1 int in the array + return true + } + int_index -= 1 + + // All other ints should be all 1s + for { + int_val := transmute(uint)bits.int_array[int_index] + if int_val != UINT_MAX { + return false + } + + if int_index == 0 { + return true + } + int_index -= 1 + } +} + +all_ones :: all_true + +// Returns ok = false if there are no 1 bits in the entire array. +nearest_true :: proc(bits: Bits, index: int) -> (nearest: int, ok: bool) { + when ODIN_BOUNDS_CHECK { + if index >= bits.length { + panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length)) + } + } + + bit_index := uint(index) + word_index := bit_index >> INDEX_SHIFT + bit_pos := bit_index & BIT_POS_MASK + + word_index_int := int(word_index) + total_words := len(bits.int_array) + max_left := word_index_int + max_right := total_words - 1 - word_index_int + max_offset := max(max_left, max_right) + + word_val := transmute(uint)bits.int_array[word_index_int] + if word_val != 0 { + if (word_val & (uint(1) << bit_pos)) != 0 do return index, true + + left_mask := (uint(1) << bit_pos) | ((uint(1) << bit_pos) - 1) + left_bits_value := word_val & left_mask + + right_mask := ~((uint(1) << bit_pos) - 1) + right_bits_value := word_val & right_mask + + nearest_left := 0 + left_found := false + if left_bits_value != 0 { + left_offset_from_top := intrinsics.count_leading_zeros(left_bits_value) + left_bit := (UINT_NUM_BITS - 1) - left_offset_from_top + nearest_left = (word_index_int << INDEX_SHIFT) + int(left_bit) + left_found = true + } + + nearest_right := 0 + right_found := false + if right_bits_value != 0 { + right_offset := intrinsics.count_trailing_zeros(right_bits_value) + nearest_right = (word_index_int << INDEX_SHIFT) + int(right_offset) + right_found = true + } + + if left_found && right_found { + left_dist := index - nearest_left + right_dist := nearest_right - index + if left_dist <= right_dist { + return nearest_left, true + } else { + return nearest_right, true + } + } else if left_found { + return nearest_left, true + } else if right_found { + return nearest_right, true + } + } + + for offset := 1; offset <= max_offset; offset += 1 { + right_found := false + left_found := false + nearest_right := 0 + nearest_left := 0 + right_dist := 0 + left_dist := 0 + + right_index := word_index_int + offset + if right_index < total_words { + word_val := transmute(uint)bits.int_array[right_index] + if word_val != 0 { + right_offset := intrinsics.count_trailing_zeros(word_val) + nearest_right = (right_index << INDEX_SHIFT) + int(right_offset) + right_found = true + right_dist = nearest_right - index + } + } + + left_index := word_index_int - offset + if left_index >= 0 { + word_val := transmute(uint)bits.int_array[left_index] + if word_val != 0 { + left_offset_from_top := intrinsics.count_leading_zeros(word_val) + left_bit := (UINT_NUM_BITS - 1) - left_offset_from_top + nearest_left = (left_index << INDEX_SHIFT) + int(left_bit) + left_found = true + left_dist = index - nearest_left + } + } + + if left_found && right_found { + if left_dist <= right_dist { + return nearest_left, true + } else { + return nearest_right, true + } + } else if left_found { + return nearest_left, true + } else if right_found { + return nearest_right, true + } + } + + return +} + +nearest_one :: nearest_true + +// Returns ok = false if there are no 0 bits in the entire array. +nearest_false :: proc(bits: Bits, index: int) -> (nearest: int, ok: bool) { + when ODIN_BOUNDS_CHECK { + if index >= bits.length { + panic(fmt.tprintf("Bit position %i out of bounds for length %i.", index, bits.length)) + } + } + + bit_index := uint(index) + word_index := bit_index >> INDEX_SHIFT + bit_pos := bit_index & BIT_POS_MASK + + word_index_int := int(word_index) + total_words := len(bits.int_array) + max_left := word_index_int + max_right := total_words - 1 - word_index_int + max_offset := max(max_left, max_right) + + last_bit_index := uint(bits.length - 1) + last_word_index := int(last_bit_index >> INDEX_SHIFT) + last_bit_pos := last_bit_index & BIT_POS_MASK + valid_bits_mask: uint + if last_bit_pos == UINT_NUM_BITS - 1 { + valid_bits_mask = UINT_MAX + } else { + valid_bits_mask = (uint(1) << (last_bit_pos + 1)) - 1 + } + + word_val := transmute(uint)bits.int_array[word_index_int] + word_val_search := ~word_val + if word_index_int == last_word_index { + word_val_search &= valid_bits_mask + } + if word_val_search != 0 { + if (word_val & (uint(1) << bit_pos)) == 0 do return index, true + + left_mask := (uint(1) << bit_pos) | ((uint(1) << bit_pos) - 1) + left_bits_value := word_val_search & left_mask + + right_mask := ~((uint(1) << bit_pos) - 1) + right_bits_value := word_val_search & right_mask + + nearest_left := 0 + left_found := false + if left_bits_value != 0 { + left_offset_from_top := intrinsics.count_leading_zeros(left_bits_value) + left_bit := (UINT_NUM_BITS - 1) - left_offset_from_top + nearest_left = (word_index_int << INDEX_SHIFT) + int(left_bit) + left_found = true + } + + nearest_right := 0 + right_found := false + if right_bits_value != 0 { + right_offset := intrinsics.count_trailing_zeros(right_bits_value) + nearest_right = (word_index_int << INDEX_SHIFT) + int(right_offset) + right_found = true + } + + if left_found && right_found { + left_dist := index - nearest_left + right_dist := nearest_right - index + if left_dist <= right_dist { + return nearest_left, true + } else { + return nearest_right, true + } + } else if left_found { + return nearest_left, true + } else if right_found { + return nearest_right, true + } + } + + for offset := 1; offset <= max_offset; offset += 1 { + right_found := false + left_found := false + nearest_right := 0 + nearest_left := 0 + right_dist := 0 + left_dist := 0 + + right_index := word_index_int + offset + if right_index < total_words { + word_val := transmute(uint)bits.int_array[right_index] + word_val_search := ~word_val + if right_index == last_word_index { + word_val_search &= valid_bits_mask + } + if word_val_search != 0 { + right_offset := intrinsics.count_trailing_zeros(word_val_search) + nearest_right = (right_index << INDEX_SHIFT) + int(right_offset) + right_found = true + right_dist = nearest_right - index + } + } + + left_index := word_index_int - offset + if left_index >= 0 { + word_val := transmute(uint)bits.int_array[left_index] + word_val_search := ~word_val + if word_val_search != 0 { + left_offset_from_top := intrinsics.count_leading_zeros(word_val_search) + left_bit := (UINT_NUM_BITS - 1) - left_offset_from_top + nearest_left = (left_index << INDEX_SHIFT) + int(left_bit) + left_found = true + left_dist = index - nearest_left + } + } + + if left_found && right_found { + if left_dist <= right_dist { + return nearest_left, true + } else { + return nearest_right, true + } + } else if left_found { + return nearest_left, true + } else if right_found { + return nearest_right, true + } + } + + return +} + +nearest_zero :: nearest_false + +Iterator :: struct { + bits: ^Bits, + word_idx: int, + bit_idx: uint, +} + +iterator :: #force_inline proc(bits: ^Bits) -> Iterator { + return {bits = bits} +} + +iterate :: proc(iterator: ^Iterator) -> (is_true: bool, idx: int, cond: bool) { + idx = iterator.word_idx * UINT_NUM_BITS + int(iterator.bit_idx) + if idx >= iterator.bits.length { + return false, 0, false + } + + word := transmute(uint)iterator.bits.int_array[iterator.word_idx] + is_true = (word >> iterator.bit_idx & 1) == 1 + + iterator.bit_idx += 1 + if iterator.bit_idx >= UINT_NUM_BITS { + iterator.bit_idx = 0 + iterator.word_idx += 1 + } + + return is_true, idx, true +} + +@(private = "file") +_iterate_kind :: #force_inline proc(iterator: ^Iterator, $ITERATE_ZEROS: bool) -> (idx: int, cond: bool) { + for iterator.word_idx < len(iterator.bits.int_array) { + word := transmute(uint)iterator.bits.int_array[iterator.word_idx] + when ITERATE_ZEROS do word = ~word + + word >>= iterator.bit_idx // Mask out already-processed bits + + if word != 0 { + // Found a bit - count_trailing_zeros gives position in shifted word + iterator.bit_idx += uint(intrinsics.count_trailing_zeros(word)) + idx = iterator.word_idx * UINT_NUM_BITS + int(iterator.bit_idx) + + // Advance for next call + iterator.bit_idx += 1 + if iterator.bit_idx >= UINT_NUM_BITS { + iterator.bit_idx = 0 + iterator.word_idx += 1 + } + + return idx, idx < iterator.bits.length + } + + // Word exhausted, move to next + iterator.word_idx += 1 + iterator.bit_idx = 0 + } + + return 0, false +} + +iterate_true :: proc(iterator: ^Iterator) -> (idx: int, cond: bool) { + return _iterate_kind(iterator, ITERATE_ZEROS = false) +} + +iterate_ones :: iterate_true + +iterate_false :: proc(iterator: ^Iterator) -> (idx: int, cond: bool) { + return _iterate_kind(iterator, ITERATE_ZEROS = true) +} + +iterate_zeros :: iterate_false + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Tests ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +import "core:testing" + +@(test) +test_set :: proc(t: ^testing.T) { + bits := make(128) + defer delete(bits) + + set(bits, 0, true) + testing.expect_value(t, bits.int_array[0], Int_Bits{0}) + set(bits, 3, true) + testing.expect_value(t, bits.int_array[0], Int_Bits{0, 3}) + set(bits, 64, true) + testing.expect_value(t, bits.int_array[1], Int_Bits{0}) + set(bits, 127, true) + testing.expect_value(t, bits.int_array[1], Int_Bits{0, 63}) + set(bits, 127, false) + testing.expect_value(t, bits.int_array[1], Int_Bits{0}) +} + +@(test) +test_get :: proc(t: ^testing.T) { + bits := make(128) + defer delete(bits) + + // Default is false + testing.expect(t, !get(bits, 0)) + testing.expect(t, !get(bits, 63)) + testing.expect(t, !get(bits, 64)) + testing.expect(t, !get(bits, 127)) + + // Set and verify within first uint + set(bits, 0, true) + testing.expect(t, get(bits, 0)) + testing.expect(t, !get(bits, 1)) + + set(bits, 3, true) + testing.expect(t, get(bits, 3)) + testing.expect(t, !get(bits, 2)) + testing.expect(t, !get(bits, 4)) + + // Cross uint boundary + set(bits, 64, true) + testing.expect(t, get(bits, 64)) + testing.expect(t, !get(bits, 63)) + testing.expect(t, !get(bits, 65)) + + // Last bit + set(bits, 127, true) + testing.expect(t, get(bits, 127)) + + // Unset and verify + set(bits, 127, false) + testing.expect(t, !get(bits, 127)) +} + +@(test) +test_set_true_set_false :: proc(t: ^testing.T) { + bits := make(128) + defer delete(bits) + + // set_true within first uint + set_true(bits, 0) + testing.expect_value(t, bits.int_array[0], Int_Bits{0}) + testing.expect(t, get(bits, 0)) + + set_true(bits, 3) + testing.expect_value(t, bits.int_array[0], Int_Bits{0, 3}) + testing.expect(t, get(bits, 3)) + + // set_true across uint boundary + set_true(bits, 64) + testing.expect_value(t, bits.int_array[1], Int_Bits{0}) + testing.expect(t, get(bits, 64)) + testing.expect(t, !get(bits, 63)) + testing.expect(t, !get(bits, 65)) + + // set_true on last bit + set_true(bits, 127) + testing.expect_value(t, bits.int_array[1], Int_Bits{0, 63}) + testing.expect(t, get(bits, 127)) + + // set_false to clear bits + set_false(bits, 127) + testing.expect_value(t, bits.int_array[1], Int_Bits{0}) + testing.expect(t, !get(bits, 127)) + + set_false(bits, 0) + testing.expect_value(t, bits.int_array[0], Int_Bits{3}) + testing.expect(t, !get(bits, 0)) + testing.expect(t, get(bits, 3)) // bit 3 still set + + // set_false on already-false bit (should be no-op) + set_false(bits, 1) + testing.expect_value(t, bits.int_array[0], Int_Bits{3}) + testing.expect(t, !get(bits, 1)) +} + +@(test) +all_true_test :: proc(t: ^testing.T) { + uint_max := UINT_MAX + all_ones := transmute(Int_Bits)uint_max + + bits := make(132) + defer delete(bits) + + bits.int_array[0] = all_ones + bits.int_array[1] = all_ones + bits.int_array[2] = {0, 1, 2, 3} + testing.expect(t, all_true(bits)) + + bits.int_array[2] = {0, 1, 2} + testing.expect(t, !all_true(bits)) + + bits2 := make(1) + defer delete(bits2) + + bits2.int_array[0] = {0} + testing.expect(t, all_true(bits2)) +} + +@(test) +test_range_true :: proc(t: ^testing.T) { + uint_max := UINT_MAX + all_ones := transmute(Int_Bits)uint_max + + bits := make(192) + defer delete(bits) + + // Empty range is vacuously true + testing.expect(t, range_true(bits, 0, 0)) + testing.expect(t, range_true(bits, 50, 50)) + // inverted range should panic under bounds checking; keep this test case out of here + + // Single word, partial range + bits.int_array[0] = {3, 4, 5, 6} + testing.expect(t, range_true(bits, 3, 7)) + testing.expect(t, !range_true(bits, 2, 7)) // bit 2 not set + testing.expect(t, !range_true(bits, 3, 8)) // bit 7 not set + + // Single word, full word + bits.int_array[0] = all_ones + testing.expect(t, range_true(bits, 0, 64)) + + // Range spanning two words + bits.int_array[0] = all_ones + bits.int_array[1] = {0, 1, 2, 3} + testing.expect(t, range_true(bits, 60, 68)) // bits 60-63 in word 0, bits 0-3 in word 1 + testing.expect(t, !range_true(bits, 60, 69)) // bit 68 (4 in word 1) not set + + // Range spanning three words with full middle word + bits.int_array[0] = all_ones + bits.int_array[1] = all_ones + bits.int_array[2] = {0, 1, 2, 3} + testing.expect(t, range_true(bits, 60, 132)) // partial first, full middle, partial last + testing.expect(t, !range_true(bits, 60, 133)) // bit 132 (4 in word 2) not set + + // Middle word not all set + bits.int_array[1] = all_ones - {32} + testing.expect(t, !range_true(bits, 60, 132)) + + // Boundary: range ends exactly at word boundary + bits.int_array[0] = all_ones + bits.int_array[1] = all_ones + testing.expect(t, range_true(bits, 32, 128)) + + // Boundary: range starts exactly at word boundary + bits.int_array[1] = all_ones + bits.int_array[2] = all_ones + testing.expect(t, range_true(bits, 64, 192)) +} + +@(test) +nearest_true_handles_same_word_and_boundaries :: proc(t: ^testing.T) { + bits := make(128, context.temp_allocator) + + set_true(bits, 0) + set_true(bits, 10) + set_true(bits, 20) + set_true(bits, 63) + + nearest, ok := nearest_true(bits, 10) + testing.expect(t, ok) + testing.expect_value(t, nearest, 10) + + nearest, ok = nearest_true(bits, 12) + testing.expect(t, ok) + testing.expect_value(t, nearest, 10) + + nearest, ok = nearest_true(bits, 17) + testing.expect(t, ok) + testing.expect_value(t, nearest, 20) + + nearest, ok = nearest_true(bits, 15) + testing.expect(t, ok) + testing.expect_value(t, nearest, 10) + + nearest, ok = nearest_true(bits, 0) + testing.expect(t, ok) + testing.expect_value(t, nearest, 0) + + nearest, ok = nearest_true(bits, 63) + testing.expect(t, ok) + testing.expect_value(t, nearest, 63) +} + +@(test) +nearest_false_handles_same_word_and_boundaries :: proc(t: ^testing.T) { + bits := make(128, context.temp_allocator) + + // Start with all bits true, then clear a few to false. + for i := 0; i < bits.length; i += 1 { + set_true(bits, i) + } + + set_false(bits, 0) + set_false(bits, 10) + set_false(bits, 20) + set_false(bits, 63) + + nearest, ok := nearest_false(bits, 10) + testing.expect(t, ok) + testing.expect_value(t, nearest, 10) + + nearest, ok = nearest_false(bits, 12) + testing.expect(t, ok) + testing.expect_value(t, nearest, 10) + + nearest, ok = nearest_false(bits, 17) + testing.expect(t, ok) + testing.expect_value(t, nearest, 20) + + nearest, ok = nearest_false(bits, 15) + testing.expect(t, ok) + testing.expect_value(t, nearest, 10) + + nearest, ok = nearest_false(bits, 0) + testing.expect(t, ok) + testing.expect_value(t, nearest, 0) + + nearest, ok = nearest_false(bits, 63) + testing.expect(t, ok) + testing.expect_value(t, nearest, 63) +} + +@(test) +nearest_false_scans_across_words_and_returns_false_when_all_true :: proc(t: ^testing.T) { + bits := make(192, context.temp_allocator) + + // Start with all bits true, then clear a couple far apart. + for i := 0; i < bits.length; i += 1 { + set_true(bits, i) + } + + set_false(bits, 5) + set_false(bits, 130) + + nearest, ok := nearest_false(bits, 96) + testing.expect(t, ok) + testing.expect_value(t, nearest, 130) + + // Restore the only zero bits so there are no zeros left. + set_true(bits, 5) + set_true(bits, 130) + + nearest, ok = nearest_false(bits, 96) + testing.expect(t, !ok) +} + +@(test) +nearest_true_scans_across_words_and_returns_false_when_empty :: proc(t: ^testing.T) { + bits := make(192, context.temp_allocator) + + set_true(bits, 5) + set_true(bits, 130) + + nearest, ok := nearest_true(bits, 96) + testing.expect(t, ok) + testing.expect_value(t, nearest, 130) + + zero(bits) + + nearest, ok = nearest_true(bits, 96) + testing.expect(t, !ok) +} + +@(test) +nearest_false_handles_last_word_partial_length :: proc(t: ^testing.T) { + bits := make(130, context.temp_allocator) + + // Start with all bits true, then clear the first and last valid bits. + for i := 0; i < bits.length; i += 1 { + set_true(bits, i) + } + + set_false(bits, 0) + set_false(bits, 129) + + nearest, ok := nearest_false(bits, 128) + testing.expect(t, ok) + testing.expect_value(t, nearest, 129) + + nearest, ok = nearest_false(bits, 127) + testing.expect(t, ok) + testing.expect_value(t, nearest, 129) +} + +@(test) +nearest_true_handles_last_word_partial_length :: proc(t: ^testing.T) { + bits := make(130, context.temp_allocator) + + set_true(bits, 0) + set_true(bits, 129) + + nearest, ok := nearest_true(bits, 128) + testing.expect(t, ok) + testing.expect_value(t, nearest, 129) + + nearest, ok = nearest_true(bits, 127) + testing.expect(t, ok) + testing.expect_value(t, nearest, 129) +} + +@(test) +iterator_basic_mixed_bits :: proc(t: ^testing.T) { + // Use non-word-aligned length to test partial last word handling + bits := make(100, context.temp_allocator) + + // Set specific bits: 0, 3, 64, 99 (last valid index) + set_true(bits, 0) + set_true(bits, 3) + set_true(bits, 64) + set_true(bits, 99) + + expected_true_indices := [?]int{0, 3, 64, 99} + + // Test iterate - should return all 100 bits with correct set state + { + it := iterator(&bits) + count := 0 + for is_set, idx in iterate(&it) { + expected_set := slice.contains(expected_true_indices[:], idx) + testing.expectf( + t, + is_set == expected_set, + "iterate: bit %d expected is_set=%v, got %v", + idx, + expected_set, + is_set, + ) + testing.expectf(t, idx == count, "iterate: expected sequential idx=%d, got %d", count, idx) + count += 1 + } + testing.expectf(t, count == 100, "iterate: expected 100 iterations, got %d", count) + } + + // Test iterate_true - should only return the 4 set bits + { + it := iterator(&bits) + result_indices := builtin.make([dynamic]int, allocator = context.temp_allocator) + for idx in iterate_true(&it) { + append(&result_indices, idx) + } + testing.expectf( + t, + len(result_indices) == 4, + "iterate_true: expected 4 set bits, got %d", + len(result_indices), + ) + for expected_idx, i in expected_true_indices { + testing.expectf( + t, + result_indices[i] == expected_idx, + "iterate_true: at position %d expected idx=%d, got %d", + i, + expected_idx, + result_indices[i], + ) + } + } + + // Test iterate_false - should return all 96 unset bits + { + it := iterator(&bits) + count := 0 + for idx in iterate_false(&it) { + testing.expectf( + t, + !slice.contains(expected_true_indices[:], idx), + "iterate_false: returned set bit index %d", + idx, + ) + count += 1 + } + testing.expectf(t, count == 96, "iterate_false: expected 96 unset bits, got %d", count) + } +} + +@(test) +iterator_all_false_bits :: proc(t: ^testing.T) { + // Use non-word-aligned length + bits := make(100, context.temp_allocator) + // All bits default to false, no need to set anything + + // Test iterate - should return all 100 bits as false + { + it := iterator(&bits) + count := 0 + for is_set, idx in iterate(&it) { + testing.expectf(t, !is_set, "iterate: bit %d expected is_set=false, got true", idx) + testing.expectf(t, idx == count, "iterate: expected sequential idx=%d, got %d", count, idx) + count += 1 + } + testing.expectf(t, count == 100, "iterate: expected 100 iterations, got %d", count) + } + + // Test iterate_true - should return nothing + { + it := iterator(&bits) + count := 0 + for idx in iterate_true(&it) { + testing.expectf(t, false, "iterate_true: unexpectedly returned idx=%d when all bits are false", idx) + count += 1 + } + testing.expectf(t, count == 0, "iterate_true: expected 0 iterations, got %d", count) + } + + // Test iterate_false - should return all 100 indices + { + it := iterator(&bits) + count := 0 + for idx in iterate_false(&it) { + testing.expectf(t, idx == count, "iterate_false: expected sequential idx=%d, got %d", count, idx) + count += 1 + } + testing.expectf(t, count == 100, "iterate_false: expected 100 iterations, got %d", count) + } +} + +@(test) +iterator_all_true_bits :: proc(t: ^testing.T) { + // Use non-word-aligned length + bits := make(100, context.temp_allocator) + // Set all bits to true + for i := 0; i < bits.length; i += 1 { + set_true(bits, i) + } + + // Test iterate - should return all 100 bits as true + { + it := iterator(&bits) + count := 0 + for is_set, idx in iterate(&it) { + testing.expectf(t, is_set, "iterate: bit %d expected is_set=true, got false", idx) + testing.expectf(t, idx == count, "iterate: expected sequential idx=%d, got %d", count, idx) + count += 1 + } + testing.expectf(t, count == 100, "iterate: expected 100 iterations, got %d", count) + } + + // Test iterate_true - should return all 100 indices + { + it := iterator(&bits) + count := 0 + for idx in iterate_true(&it) { + testing.expectf(t, idx == count, "iterate_true: expected sequential idx=%d, got %d", count, idx) + count += 1 + } + testing.expectf(t, count == 100, "iterate_true: expected 100 iterations, got %d", count) + } + + // Test iterate_false - should return nothing + { + it := iterator(&bits) + count := 0 + for idx in iterate_false(&it) { + testing.expectf(t, false, "iterate_false: unexpectedly returned idx=%d when all bits are true", idx) + count += 1 + } + testing.expectf(t, count == 0, "iterate_false: expected 0 iterations, got %d", count) + } +} diff --git a/odinfmt.json b/odinfmt.json new file mode 100644 index 0000000..601712b --- /dev/null +++ b/odinfmt.json @@ -0,0 +1,5 @@ +{ + "$schema": "https://raw.githubusercontent.com/DanielGavin/ols/master/misc/odinfmt.schema.json", + "character_width": 110, + "tabs_width": 1 +} diff --git a/quantity/irradiance.odin b/quantity/irradiance.odin new file mode 100644 index 0000000..06efff3 --- /dev/null +++ b/quantity/irradiance.odin @@ -0,0 +1,7 @@ +package quantity + +import "base:intrinsics" + +Watts_Per_Square_Meter :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} diff --git a/quantity/pressure.odin b/quantity/pressure.odin new file mode 100644 index 0000000..41bc9a8 --- /dev/null +++ b/quantity/pressure.odin @@ -0,0 +1,134 @@ +package quantity + +import "base:intrinsics" + +PASCALS_PER_TORR :: 101325.0 / 760.0 +KILO_PASCALS_PER_PSI :: 6.894757293168364 + +//----- Pascals ---------------------------------- +Pascals :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +@(private = "file") +pascals_to_kilo_pascals :: #force_inline proc "contextless" ( + pascals: Pascals($V), +) -> Kilo_Pascals(V) where intrinsics.type_is_numeric(V) { + return Kilo_Pascals(V){pascals.v / KILO} +} + +@(private = "file") +pascals_to_torr :: #force_inline proc "contextless" ( + pascals: Pascals($V), +) -> Torr(V) where intrinsics.type_is_float(V) { + return Torr(V){pascals.v / PASCALS_PER_TORR} +} + +//----- Kilopascals ---------------------------------- +Kilo_Pascals :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +@(private = "file") +kilo_pascals_to_pascals :: #force_inline proc "contextless" ( + kilo_pascals: Kilo_Pascals($V), +) -> Pascals(V) where intrinsics.type_is_numeric(V) { + return Pascals(V){kilo_pascals.v * KILO} +} + +kilo_pascals_to_psi :: #force_inline proc "contextless" ( + kilo_pascals: Kilo_Pascals($V), +) -> Psi(V) where intrinsics.type_is_float(V) { + return Psi(V){kilo_pascals.v / KILO_PASCALS_PER_PSI} +} + +//----- Torr ---------------------------------- +Torr :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +@(private = "file") +torr_to_pascals :: #force_inline proc "contextless" ( + torr: Torr($V), +) -> Pascals(V) where intrinsics.type_is_float(V) { + return Pascals(V){torr.v * PASCALS_PER_TORR} +} + +//----- PSI ---------------------------------- +Psi :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +psi_to_kilo_pascals :: #force_inline proc "contextless" ( + psi: Psi($V), +) -> Kilo_Pascals(V) where intrinsics.type_is_float(V) { + return Kilo_Pascals(V){psi.v * KILO_PASCALS_PER_PSI} +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Conversion Overloads ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +to_pascals :: proc { + kilo_pascals_to_pascals, + torr_to_pascals, +} + +to_kilo_pascals :: proc { + pascals_to_kilo_pascals, + psi_to_kilo_pascals, +} + +to_torr :: proc { + pascals_to_torr, +} + +to_psi :: proc { + kilo_pascals_to_psi, +} + + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Tests ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +import "core:testing" + +@(test) +test_pascals_to_kilo_pascals :: proc(t: ^testing.T) { + pascals := Pascals(int){1000} + kilo_pascals := to_kilo_pascals(pascals) + + testing.expect_value(t, kilo_pascals, Kilo_Pascals(int){1}) +} + +@(test) +test_kilo_pascals_to_pascals :: proc(t: ^testing.T) { + kilo_pascals := Kilo_Pascals(int){1} + pascals := to_pascals(kilo_pascals) + + testing.expect_value(t, pascals, Pascals(int){1000}) +} + +@(test) +test_pascals_to_torr :: proc(t: ^testing.T) { + pascals := Pascals(f32){1000} + torr := to_torr(pascals) + + testing.expect(t, torr.v > 7.49 && torr.v < 7.51) +} + +@(test) +test_torr_to_pascals :: proc(t: ^testing.T) { + torr := Torr(f32){7.5} + pascals := to_pascals(torr) + + testing.expect(t, pascals.v > 999.91 && pascals.v < 999.92) +} + +@(test) +test_psi_kilo_pascals :: proc(t: ^testing.T) { + psi := Psi(f32){2.5} + kilo_pascals := Kilo_Pascals(f32){17.23689323292091} + + testing.expect(t, to_kilo_pascals(psi).v > 17.22 && to_kilo_pascals(psi).v < 17.24) + testing.expect(t, to_psi(kilo_pascals).v > 2.49 && to_psi(kilo_pascals).v < 2.51) +} diff --git a/quantity/quantity.odin b/quantity/quantity.odin new file mode 100644 index 0000000..daadded --- /dev/null +++ b/quantity/quantity.odin @@ -0,0 +1,12 @@ +package quantity + +DECA :: 10; +DECI :: 10; +HECTO :: 100; +CENTI :: 100; +KILO :: 1_000; +MILLI :: 1_000; +MEGA :: 1_000_000; +MICRO :: 1_000_000; +GIGA :: 1_000_000_000; +NANO :: 1_000_000_000; diff --git a/quantity/resistance.odin b/quantity/resistance.odin new file mode 100644 index 0000000..86291b9 --- /dev/null +++ b/quantity/resistance.odin @@ -0,0 +1,7 @@ +package quantity + +import "base:intrinsics" + +Ohms :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} diff --git a/quantity/temperature.odin b/quantity/temperature.odin new file mode 100644 index 0000000..28d9992 --- /dev/null +++ b/quantity/temperature.odin @@ -0,0 +1,157 @@ +package quantity + +import "base:intrinsics" + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Constants ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +@(private = "file") +kelvins_celsius_offset :: #force_inline proc "contextless" ( + $V: typeid, +) -> V where intrinsics.type_is_numeric(V) { + when intrinsics.type_is_float(V) { + OFFSET :: 273.15 + } else { + OFFSET :: 273 + } + return OFFSET +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Types ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +//----- Kelvins ---------------------------------- +Kelvins :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +@(private = "file") +kelvins_to_celsius :: #force_inline proc "contextless" ( + kelvins: Kelvins($V), +) -> Celsius(V) where intrinsics.type_is_numeric(V) { + return Celsius(V){kelvins.v - kelvins_celsius_offset(V)} +} + +@(private = "file") +kelvins_to_deci_kelvins :: #force_inline proc "contextless" ( + kelvins: Kelvins($V), +) -> Deci_Kelvins(V) where intrinsics.type_is_numeric(V) { + return Deci_Kelvins(V){kelvins.v * DECI} +} + +//----- Decikelvins ---------------------------------- +Deci_Kelvins :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +@(private = "file") +deci_kelvins_to_kelvins :: #force_inline proc "contextless" ( + deci_kelvins: Deci_Kelvins($V), +) -> Kelvins(V) where intrinsics.type_is_numeric(V) { + return Kelvins(V){deci_kelvins.v / DECI} +} + +//----- Degrees Celsius ---------------------------------- +Celsius :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +@(private = "file") +celsius_to_kelvins :: #force_inline proc "contextless" ( + degrees_celsius: Celsius($V), +) -> Kelvins(V) where intrinsics.type_is_numeric(V) { + return Kelvins(V){degrees_celsius.v + kelvins_celsius_offset(V)} +} + +@(private = "file") +celsius_to_deci_celsius :: #force_inline proc "contextless" ( + degrees_celsius: Celsius($V), +) -> Deci_Celsius(V) where intrinsics.type_is_numeric(V) { + return Deci_Celsius(V){degrees_celsius.v * DECI} +} + +//----- Deci Degrees Celsius ---------------------------------- +Deci_Celsius :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +@(private = "file") +deci_celsius_to_celsius :: #force_inline proc "contextless" ( + deci_degrees_celsius: Deci_Celsius($V), +) -> Celsius(V) where intrinsics.type_is_numeric(V) { + return Celsius(V){deci_degrees_celsius.v / DECI} +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Conversion Overloads ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +to_kelvins :: proc { + deci_kelvins_to_kelvins, + celsius_to_kelvins, +} + +to_deci_kelvins :: proc { + kelvins_to_deci_kelvins, +} + +to_celsius :: proc { + kelvins_to_celsius, + deci_celsius_to_celsius, +} + +to_deci_celsius :: proc { + celsius_to_deci_celsius, +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Tests ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +import "core:testing" + +@(test) +test_kelvins_to_celsius :: proc(t: ^testing.T) { + kelvins := Kelvins(f32){273.15} + celsius := to_celsius(kelvins) + + testing.expect_value(t, celsius, Celsius(f32){0}) +} + +@(test) +test_kelvins_to_deci_kelvins :: proc(t: ^testing.T) { + kelvins := Kelvins(int){100} + deci_kelvins := to_deci_kelvins(kelvins) + + testing.expect_value(t, deci_kelvins, Deci_Kelvins(int){1000}) +} + +@(test) +test_deci_kelvins_to_kelvins :: proc(t: ^testing.T) { + deci_kelvins := Deci_Kelvins(int){1000} + kelvins := to_kelvins(deci_kelvins) + + testing.expect_value(t, kelvins, Kelvins(int){100}) +} + +@(test) +test_celsius_to_kelvins :: proc(t: ^testing.T) { + degrees_celsius := Celsius(f32){0} + kelvins := to_kelvins(degrees_celsius) + + testing.expect_value(t, kelvins, Kelvins(f32){273.15}) +} + +@(test) +test_celsius_to_deci_celsius :: proc(t: ^testing.T) { + degrees_celsius := Celsius(int){100} + deci_degrees_celsius := to_deci_celsius(degrees_celsius) + + testing.expect_value(t, deci_degrees_celsius, Deci_Celsius(int){1000}) +} + +@(test) +test_deci_celsius_to_celsius :: proc(t: ^testing.T) { + deci_degrees_celsius := Deci_Celsius(int){1000} + degrees_celsius := to_celsius(deci_degrees_celsius) + + testing.expect_value(t, degrees_celsius, Celsius(int){100}) +} diff --git a/quantity/voltage.odin b/quantity/voltage.odin new file mode 100644 index 0000000..3421914 --- /dev/null +++ b/quantity/voltage.odin @@ -0,0 +1,59 @@ +package quantity + +import "base:intrinsics" + +//----- Volts ---------------------------------- +Volts :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +@(private = "file") +volts_to_milli_volts :: #force_inline proc "contextless" ( + volts: Volts($V), +) -> Milli_Volts(V) where intrinsics.type_is_numeric(V) { + return Milli_Volts(V){volts.v * MILLI} +} + +//----- Millivolts ---------------------------------- +Milli_Volts :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +@(private = "file") +milli_volts_to_volts :: #force_inline proc "contextless" ( + milli_volts: Milli_Volts($V), +) -> Volts(V) where intrinsics.type_is_numeric(V) { + return Volts(V){milli_volts.v / MILLI} +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Conversion Overloads ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +to_volts :: proc { + milli_volts_to_volts, +} + +to_milli_volts :: proc { + volts_to_milli_volts, +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Tests ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +import "core:testing" + +@(test) +test_volts_to_milli_volts :: proc(t: ^testing.T) { + volts := Volts(int){1} + milli_volts := to_milli_volts(volts) + + testing.expect_value(t, milli_volts, Milli_Volts(int){1000}) +} + +@(test) +test_milli_volts_to_volts :: proc(t: ^testing.T) { + milli_volts := Milli_Volts(int){1000} + volts := to_volts(milli_volts) + + testing.expect_value(t, volts, Volts(int){1}) +} diff --git a/quantity/volume.odin b/quantity/volume.odin new file mode 100644 index 0000000..de01d2a --- /dev/null +++ b/quantity/volume.odin @@ -0,0 +1,59 @@ +package quantity + +import "base:intrinsics" + +//----- Liters ---------------------------------- +Liters :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +@(private = "file") +liters_to_milli_liters :: #force_inline proc "contextless" ( + liters: Liters($V), +) -> Milli_Liters(V) where intrinsics.type_is_numeric(V) { + return Milli_Liters(V){liters.v * MILLI} +} + +//----- Milliliters ---------------------------------- +Milli_Liters :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} + +@(private = "file") +milli_liters_to_liters :: #force_inline proc "contextless" ( + milli_liters: Milli_Liters($V), +) -> Liters(V) where intrinsics.type_is_numeric(V) { + return Liters(V){milli_liters.v / MILLI} +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Conversion Overloads ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +to_liters :: proc { + milli_liters_to_liters, +} + +to_milli_liters :: proc { + liters_to_milli_liters, +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Tests ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +import "core:testing" + +@(test) +test_liters_to_milli_liters :: proc(t: ^testing.T) { + liters := Liters(int){12} + milli_liters := to_milli_liters(liters) + + testing.expect_value(t, milli_liters, Milli_Liters(int){12_000}) +} + +@(test) +test_milli_liters_to_liters :: proc(t: ^testing.T) { + milli_liters := Milli_Liters(int){12_000} + liters := to_liters(milli_liters) + + testing.expect_value(t, liters, Liters(int){12}) +} diff --git a/quantity/volume_rate.odin b/quantity/volume_rate.odin new file mode 100644 index 0000000..89de0fb --- /dev/null +++ b/quantity/volume_rate.odin @@ -0,0 +1,7 @@ +package quantity + +import "base:intrinsics" + +Liters_Per_Minute :: struct($V: typeid) where intrinsics.type_is_numeric(V) { + v: V, +} diff --git a/ring/ring.odin b/ring/ring.odin new file mode 100644 index 0000000..ec6065f --- /dev/null +++ b/ring/ring.odin @@ -0,0 +1,269 @@ +package ring + +import "core:fmt" + +@(private) +ODIN_BOUNDS_CHECK :: !ODIN_NO_BOUNDS_CHECK + +Ring :: struct($T: typeid) { + data: []T, + _end_index, len: int, +} + +Ring_Soa :: struct($T: typeid) { + data: #soa[]T, + _end_index, len: int, +} + +from_slice_raos :: #force_inline proc(data: $T/[]$E) -> Ring(E) { + return {data = data, _end_index = -1} +} + +from_slice_rsoa :: #force_inline proc(data: $T/#soa[]$E) -> Ring_Soa(E) { + return {data = data, _end_index = -1} +} + +from_slice :: proc { + from_slice_raos, + from_slice_rsoa, +} + +// Index in the backing array where the ring starts +_start_index_raos :: proc(ring: Ring($T)) -> int { + if ring.len < len(ring.data) { + return 0 + } else { + start_index := ring._end_index + 1 + return 0 if start_index == len(ring.data) else start_index + } +} + +// Index in the backing array where the ring starts +_start_index_rsoa :: proc(ring: Ring_Soa($T)) -> int { + if ring.len < len(ring.data) { + return 0 + } else { + start_index := ring._end_index + 1 + return 0 if start_index == len(ring.data) else start_index + } +} + +advance_raos :: proc(ring: ^Ring($T)) { + // Length + if ring.len != len(ring.data) do ring.len += 1 + // End index + if ring._end_index == len(ring.data) - 1 { // If we are at the end of the backing array + ring._end_index = 0 // Overflow end to 0 + } else { + ring._end_index += 1 + } +} + +advance_rsoa :: proc(ring: ^Ring_Soa($T)) { + // Length + if ring.len != len(ring.data) do ring.len += 1 + // End index + if ring._end_index == len(ring.data) - 1 { // If we are at the end of the backing array + ring._end_index = 0 // Overflow end to 0 + } else { + ring._end_index += 1 + } +} + +advance :: proc { + advance_raos, + advance_rsoa, +} + +append_raos :: proc(ring: ^Ring($T), element: T) { + advance(ring) + ring.data[ring._end_index] = element +} + +append_rsoa :: proc(ring: ^Ring_Soa($T), element: T) { + advance(ring) + ring.data[ring._end_index] = element +} + +append :: proc { + append_raos, + append_rsoa, +} + +get_raos :: proc(ring: Ring($T), index: int) -> ^T { + when ODIN_BOUNDS_CHECK { + if index >= ring.len { + panic(fmt.tprintf("Ring index %i out of bounds for length %i", index, ring.len)) + } + } + + array_index := _start_index_raos(ring) + index + if array_index < len(ring.data) { + return &ring.data[array_index] + } else { + array_index = array_index - len(ring.data) + return &ring.data[array_index] + } +} + +// SOA can't return soa pointer to parapoly T. +get_rsoa :: proc(ring: Ring_Soa($T), index: int) -> T { + when ODIN_BOUNDS_CHECK { + if index >= ring.len { + panic(fmt.tprintf("Ring index %i out of bounds for length %i", index, ring.len)) + } + } + + array_index := _start_index_rsoa(ring) + index + if array_index < len(ring.data) { + return ring.data[array_index] + } else { + array_index = array_index - len(ring.data) + return ring.data[array_index] + } +} + +get :: proc { + get_raos, + get_rsoa, +} + +get_last_raos :: #force_inline proc(ring: Ring($T)) -> ^T { + return get(ring, ring.len - 1) +} + +get_last_rsoa :: #force_inline proc(ring: Ring_Soa($T)) -> T { + return get(ring, ring.len - 1) +} + +get_last :: proc { + get_last_raos, + get_last_rsoa, +} + +clear_raos :: #force_inline proc "contextless" (ring: ^Ring($T)) { + ring.len = 0 + ring._end_index = -1 +} + +clear_rsoa :: #force_inline proc "contextless" (ring: ^Ring_Soa($T)) { + ring.len = 0 + ring._end_index = -1 +} + +clear :: proc { + clear_raos, + clear_rsoa, +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Tests ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +import "core:log" +import "core:testing" + +@(test) +test_ring_aos :: proc(t: ^testing.T) { + data := make_slice([]int, 10) + ring := from_slice(data) + defer delete(ring.data) + + for i in 1 ..= 5 { + append(&ring, i) + log.debug("Length:", ring.len) + log.debug("Start index:", _start_index_raos(ring)) + log.debug("End index:", ring._end_index) + log.debug(ring.data) + } + testing.expect_value(t, get(ring, 0)^, 1) + testing.expect_value(t, get(ring, 4)^, 5) + testing.expect_value(t, ring.len, 5) + testing.expect_value(t, ring._end_index, 4) + testing.expect_value(t, _start_index_raos(ring), 0) + + for i in 6 ..= 15 { + append(&ring, i) + log.debug("Length:", ring.len) + log.debug("Start index:", _start_index_raos(ring)) + log.debug("End index:", ring._end_index) + log.debug(ring.data) + } + testing.expect_value(t, get(ring, 0)^, 6) + testing.expect_value(t, get(ring, 4)^, 10) + testing.expect_value(t, get(ring, 9)^, 15) + testing.expect_value(t, get_last(ring)^, 15) + testing.expect_value(t, ring.len, 10) + testing.expect_value(t, ring._end_index, 4) + testing.expect_value(t, _start_index_raos(ring), 5) + + for i in 15 ..= 25 { + append(&ring, i) + log.debug("Length:", ring.len) + log.debug("Start index:", _start_index_raos(ring)) + log.debug("End index:", ring._end_index) + log.debug(ring.data) + } + testing.expect_value(t, get(ring, 0)^, 16) + testing.expect_value(t, ring._end_index, 5) + testing.expect_value(t, get_last(ring)^, 25) + + clear(&ring) + append(&ring, 1) + testing.expect_value(t, ring.len, 1) + testing.expect_value(t, get(ring, 0)^, 1) +} + +@(test) +test_ring_soa :: proc(t: ^testing.T) { + Ints :: struct { + x, y: int, + } + + data := make_soa_slice(#soa[]Ints, 10) + ring := from_slice(data) + defer delete(ring.data) + + for i in 1 ..= 5 { + append(&ring, Ints{i, i}) + log.debug("Length:", ring.len) + log.debug("Start index:", _start_index_rsoa(ring)) + log.debug("End index:", ring._end_index) + log.debug(ring.data) + } + testing.expect_value(t, get(ring, 0), Ints{1, 1}) + testing.expect_value(t, get(ring, 4), Ints{5, 5}) + testing.expect_value(t, ring.len, 5) + testing.expect_value(t, ring._end_index, 4) + testing.expect_value(t, _start_index_rsoa(ring), 0) + + for i in 6 ..= 15 { + append(&ring, Ints{i, i}) + log.debug("Length:", ring.len) + log.debug("Start index:", _start_index_rsoa(ring)) + log.debug("End index:", ring._end_index) + log.debug(ring.data) + } + testing.expect_value(t, get(ring, 0), Ints{6, 6}) + testing.expect_value(t, get(ring, 4), Ints{10, 10}) + testing.expect_value(t, get(ring, 9), Ints{15, 15}) + testing.expect_value(t, get_last(ring), Ints{15, 15}) + testing.expect_value(t, ring.len, 10) + testing.expect_value(t, ring._end_index, 4) + testing.expect_value(t, _start_index_rsoa(ring), 5) + + for i in 15 ..= 25 { + append(&ring, Ints{i, i}) + log.debug("Length:", ring.len) + log.debug("Start index:", _start_index_rsoa(ring)) + log.debug("End index:", ring._end_index) + log.debug(ring.data) + } + testing.expect_value(t, get(ring, 0), Ints{16, 16}) + testing.expect_value(t, ring._end_index, 5) + testing.expect_value(t, get_last(ring), Ints{25, 25}) + + clear(&ring) + append(&ring, Ints{1, 1}) + testing.expect_value(t, ring.len, 1) + testing.expect_value(t, get(ring, 0), Ints{1, 1}) +} diff --git a/vendor/libusb/libusb.odin b/vendor/libusb/libusb.odin new file mode 100644 index 0000000..c517094 --- /dev/null +++ b/vendor/libusb/libusb.odin @@ -0,0 +1,1233 @@ +package libusb + +import "core:c" +import "core:fmt" +//TODO: Make multiplatform +import "core:sys/posix" + +//TODO: Probably want to switch this to being statically linked +foreign import lib "system:usb-1.0" + +/** \ingroup libusb_desc + * Device and/or Interface Class codes */ +Class_Code :: enum c.int { + /** In the context of a \ref libusb_device_descriptor "device descriptor", + * this bDeviceClass value indicates that each interface specifies its + * own class information and all interfaces operate independently. + */ + CLASS_PER_INTERFACE = 0x00, + /** Audio class */ + CLASS_AUDIO = 0x01, + /** Communications class */ + CLASS_COMM = 0x02, + /** Human Interface Device class */ + CLASS_HID = 0x03, + /** Physical */ + CLASS_PHYSICAL = 0x05, + /** Image class */ + CLASS_IMAGE = 0x06, + CLASS_PTP = 0x06, /* legacy name from libusb-0.1 usb.h */ + /** Printer class */ + CLASS_PRINTER = 0x07, + /** Mass storage class */ + CLASS_MASS_STORAGE = 0x08, + /** Hub class */ + CLASS_HUB = 0x09, + /** Data class */ + CLASS_DATA = 0x0a, + /** Smart Card */ + CLASS_SMART_CARD = 0x0b, + /** Content Security */ + CLASS_CONTENT_SECURITY = 0x0d, + /** Video */ + CLASS_VIDEO = 0x0e, + /** Personal Healthcare */ + CLASS_PERSONAL_HEALTHCARE = 0x0f, + /** Diagnostic Device */ + CLASS_DIAGNOSTIC_DEVICE = 0xdc, + /** Wireless class */ + CLASS_WIRELESS = 0xe0, + /** Miscellaneous class */ + CLASS_MISCELLANEOUS = 0xef, + /** Application class */ + CLASS_APPLICATION = 0xfe, + /** Class is vendor-specific */ + CLASS_VENDOR_SPEC = 0xff, +} + +/** \ingroup libusb_desc + * Descriptor types as defined by the USB specification. */ +Descriptor_Type :: enum c.int { + /** Device descriptor. See libusb_device_descriptor. */ + DEVICE = 0x01, + /** Configuration descriptor. See libusb_config_descriptor. */ + CONFIG = 0x02, + /** String descriptor */ + STRING = 0x03, + /** Interface descriptor. See libusb_interface_descriptor. */ + INTERFACE = 0x04, + /** Endpoint descriptor. See libusb_endpoint_descriptor. */ + ENDPOINT = 0x05, + /** Interface Association Descriptor. + * See libusb_interface_association_descriptor */ + INTERFACE_ASSOCIATION = 0x0b, + /** BOS descriptor */ + BOS = 0x0f, + /** Device Capability descriptor */ + DEVICE_CAPABILITY = 0x10, + /** HID descriptor */ + HID = 0x21, + /** HID report descriptor */ + REPORT = 0x22, + /** Physical descriptor */ + PHYSICAL = 0x23, + /** Hub descriptor */ + HUB = 0x29, + /** SuperSpeed Hub descriptor */ + SUPERSPEED_HUB = 0x2a, + /** SuperSpeed Endpoint Companion descriptor */ + SS_ENDPOINT_COMPANION = 0x30, +} + +/* Descriptor sizes per descriptor type */ +DT_DEVICE_SIZE :: 18 +DT_CONFIG_SIZE :: 9 +DT_INTERFACE_SIZE :: 9 +DT_ENDPOINT_SIZE :: 7 +DT_ENDPOINT_AUDIO_SIZE :: 9 /* Audio extension */ +DT_HUB_NONVAR_SIZE :: 7 +DT_SS_ENDPOINT_COMPANION_SIZE :: 6 +DT_BOS_SIZE :: 5 +DT_DEVICE_CAPABILITY_SIZE :: 3 + +/* BOS descriptor sizes */ +BT_USB_2_0_EXTENSION_SIZE :: 7 +BT_SS_USB_DEVICE_CAPABILITY_SIZE :: 10 +BT_CONTAINER_ID_SIZE :: 20 +BT_PLATFORM_DESCRIPTOR_MIN_SIZE :: 20 + +/* We unwrap the BOS => define its max size */ +DT_BOS_MAX_SIZE :: 42 + +ENDPOINT_ADDRESS_MASK :: 0x0f /* in bEndpointAddress */ +ENDPOINT_DIR_MASK :: 0x80 + +/** \ingroup libusb_desc + * Endpoint direction. Values for bit 7 of the + * \ref libusb_endpoint_descriptor::bEndpointAddress "endpoint address" scheme. + */ +Endpoint_Direction :: enum c.int { + /** Out: host-to-device */ + ENDPOINT_OUT = 0x00, + /** In: device-to-host */ + ENDPOINT_IN = 0x80, +} + +TRANSFER_TYPE_MASK :: 0x03 /* in bmAttributes */ + +/** \ingroup libusb_desc + * Endpoint transfer type. Values for bits 0:1 of the + * \ref libusb_endpoint_descriptor::bmAttributes "endpoint attributes" field. + */ +Endpoint_Transfer_Type :: enum c.int { + /** Control endpoint */ + CONTROL = 0x0, + /** Isochronous endpoint */ + ISOCHRONOUS = 0x1, + /** Bulk endpoint */ + BULK = 0x2, + /** Interrupt endpoint */ + INTERRUPT = 0x3, +} + +/** \ingroup libusb_misc + * Standard requests, as defined in table 9-5 of the USB 3.0 specifications */ +Standard_Request :: enum c.int { + /** Request status of the specific recipient */ + GET_STATUS = 0x00, + /** Clear or disable a specific feature */ + CLEAR_FEATURE = 0x01, + + /* 0x02 is reserved */ + + /** Set or enable a specific feature */ + SET_FEATURE = 0x03, + + /* 0x04 is reserved */ + + /** Set device address for all future accesses */ + SET_ADDRESS = 0x05, + /** Get the specified descriptor */ + GET_DESCRIPTOR = 0x06, + /** Used to update existing descriptors or add new descriptors */ + SET_DESCRIPTOR = 0x07, + /** Get the current device configuration value */ + GET_CONFIGURATION = 0x08, + /** Set device configuration */ + SET_CONFIGURATION = 0x09, + /** Return the selected alternate setting for the specified interface */ + GET_INTERFACE = 0x0a, + /** Select an alternate interface for the specified interface */ + SET_INTERFACE = 0x0b, + /** Set then report an endpoint's synchronization frame */ + SYNCH_FRAME = 0x0c, + /** Sets both the U1 and U2 Exit Latency */ + SET_SEL = 0x30, + /** Delay from the time a host transmits a packet to the time it is + * received by the device. */ + SET_ISOCH_DELAY = 0x31, +} + +/** \ingroup libusb_misc + * Request type bits of the + * \ref libusb_control_setup::bmRequestType "bmRequestType" field in control + * transfers. */ +Request_Type :: enum c.int { + /** Standard */ + STANDARD = (0x00 << 5), + /** Class */ + CLASS = (0x01 << 5), + /** Vendor */ + VENDOR = (0x02 << 5), + /** Reserved */ + RESERVED = (0x03 << 5), +} + +/** \ingroup libusb_misc + * Recipient bits of the + * \ref libusb_control_setup::bmRequestType "bmRequestType" field in control + * transfers. Values 4 through 31 are reserved. */ +Request_Recipient :: enum c.int { + /** Device */ + DEVICE = 0x00, + /** Interface */ + INTERFACE = 0x01, + /** Endpoint */ + ENDPOINT = 0x02, + /** Other */ + OTHER = 0x03, +} + +ISO_SYNC_TYPE_MASK :: 0x0c + +/** \ingroup libusb_desc + * Synchronization type for isochronous endpoints. Values for bits 2:3 of the + * \ref libusb_endpoint_descriptor::bmAttributes "bmAttributes" field in + * libusb_endpoint_descriptor. + */ +Iso_Sync_Type :: enum c.int { + /** No synchronization */ + NONE = 0x0, + /** Asynchronous */ + ASYNC = 0x1, + /** Adaptive */ + ADAPTIVE = 0x2, + /** Synchronous */ + SYNC = 0x3, +} + +ISO_USAGE_TYPE_MASK :: 0x30 + +/** \ingroup libusb_desc + * Usage type for isochronous endpoints. Values for bits 4:5 of the + * \ref libusb_endpoint_descriptor::bmAttributes "bmAttributes" field in + * libusb_endpoint_descriptor. + */ +Iso_Usage_Type :: enum c.int { + /** Data endpoint */ + DATA = 0x0, + /** Feedback endpoint */ + FEEDBACK = 0x1, + /** Implicit feedback Data endpoint */ + IMPLICIT = 0x2, +} + +/** \ingroup libusb_desc + * Supported speeds (wSpeedSupported) bitfield. Indicates what + * speeds the device supports. + */ +Supported_Speed :: enum c.int { + /** Low speed operation supported (1.5MBit/s). */ + LOW_SPEED_OPERATION = (1 << 0), + /** Full speed operation supported (12MBit/s). */ + FULL_SPEED_OPERATION = (1 << 1), + /** High speed operation supported (480MBit/s). */ + HIGH_SPEED_OPERATION = (1 << 2), + /** Superspeed operation supported (5000MBit/s). */ + SUPER_SPEED_OPERATION = (1 << 3), +} + +/** \ingroup libusb_desc + * Masks for the bits of the + * \ref libusb_usb_2_0_extension_descriptor::bmAttributes "bmAttributes" field + * of the USB 2.0 Extension descriptor. + */ +Usb2_Extension_Attributes :: enum c.int { + /** Supports Link Power Management (LPM) */ + BM_LPM_SUPPORT = (1 << 1), +} + +/** \ingroup libusb_desc + * Masks for the bits of the + * \ref libusb_ss_usb_device_capability_descriptor::bmAttributes "bmAttributes" field + * field of the SuperSpeed USB Device Capability descriptor. + */ +Ss_Usb_Device_Capability_Attributes :: enum c.int { + /** Supports Latency Tolerance Messages (LTM) */ + BM_LTM_SUPPORT = (1 << 1), +} + +/** \ingroup libusb_desc + * USB capability types + */ +Bos_Type :: enum c.int { + /** Wireless USB device capability */ + WIRELESS_USB_DEVICE_CAPABILITY = 0x01, + /** USB 2.0 extensions */ + USB_2_0_EXTENSION = 0x02, + /** SuperSpeed USB device capability */ + SS_USB_DEVICE_CAPABILITY = 0x03, + /** Container ID type */ + CONTAINER_ID = 0x04, + /** Platform descriptor */ + PLATFORM_DESCRIPTOR = 0x05, +} + +/** \ingroup libusb_desc + * A structure representing the standard USB device descriptor. This + * descriptor is documented in section 9.6.1 of the USB 3.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +Device_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_DEVICE LIBUSB_DT_DEVICE in this + * context. */ + bDescriptorType: u8, + /** USB specification release number in binary-coded decimal. A value of + * 0x0200 indicates USB 2.0, 0x0110 indicates USB 1.1, etc. */ + bcdUSB: u16, + /** USB-IF class code for the device. See \ref libusb_class_code. */ + bDeviceClass: u8, + /** USB-IF subclass code for the device, qualified by the bDeviceClass + * value */ + bDeviceSubClass: u8, + /** USB-IF protocol code for the device, qualified by the bDeviceClass and + * bDeviceSubClass values */ + bDeviceProtocol: u8, + /** Maximum packet size for endpoint 0 */ + bMaxPacketSize0: u8, + /** USB-IF vendor ID */ + idVendor: u16, + /** USB-IF product ID */ + idProduct: u16, + /** Device release number in binary-coded decimal */ + bcdDevice: u16, + /** Index of string descriptor describing manufacturer */ + iManufacturer: u8, + /** Index of string descriptor describing product */ + iProduct: u8, + /** Index of string descriptor containing device serial number */ + iSerialNumber: u8, + /** Number of possible configurations */ + bNumConfigurations: u8, +} + +/** \ingroup libusb_desc + * A structure representing the standard USB endpoint descriptor. This + * descriptor is documented in section 9.6.6 of the USB 3.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +Endpoint_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_ENDPOINT LIBUSB_DT_ENDPOINT in + * this context. */ + bDescriptorType: u8, + /** The address of the endpoint described by this descriptor. Bits 0:3 are + * the endpoint number. Bits 4:6 are reserved. Bit 7 indicates direction, + * see \ref libusb_endpoint_direction. */ + bEndpointAddress: u8, + /** Attributes which apply to the endpoint when it is configured using + * the bConfigurationValue. Bits 0:1 determine the transfer type and + * correspond to \ref libusb_endpoint_transfer_type. Bits 2:3 are only used + * for isochronous endpoints and correspond to \ref libusb_iso_sync_type. + * Bits 4:5 are also only used for isochronous endpoints and correspond to + * \ref libusb_iso_usage_type. Bits 6:7 are reserved. */ + bmAttributes: u8, + /** Maximum packet size this endpoint is capable of sending/receiving. */ + wMaxPacketSize: u16, + /** Interval for polling endpoint for data transfers. */ + bInterval: u8, + /** For audio devices only: the rate at which synchronization feedback + * is provided. */ + bRefresh: u8, + /** For audio devices only: the address if the synch endpoint */ + bSynchAddress: u8, + /** Extra descriptors. If libusb encounters unknown endpoint descriptors, + * it will store them here, should you wish to parse them. */ + extra: [^]u8, + /** Length of the extra descriptors, in bytes. Must be non-negative. */ + extra_length: c.int, +} + +/** \ingroup libusb_desc + * A structure representing the standard USB interface association descriptor. + * This descriptor is documented in section 9.6.4 of the USB 3.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +Interface_Association_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_INTERFACE_ASSOCIATION + * LIBUSB_DT_INTERFACE_ASSOCIATION in this context. */ + bDescriptorType: u8, + /** Interface number of the first interface that is associated + * with this function */ + bFirstInterface: u8, + /** Number of contiguous interfaces that are associated with + * this function */ + bInterfaceCount: u8, + /** USB-IF class code for this function. + * A value of zero is not allowed in this descriptor. + * If this field is 0xff, the function class is vendor-specific. + * All other values are reserved for assignment by the USB-IF. + */ + bFunctionClass: u8, + /** USB-IF subclass code for this function. + * If this field is not set to 0xff, all values are reserved + * for assignment by the USB-IF + */ + bFunctionSubClass: u8, + /** USB-IF protocol code for this function. + * These codes are qualified by the values of the bFunctionClass + * and bFunctionSubClass fields. + */ + bFunctionProtocol: u8, + /** Index of string descriptor describing this function */ + iFunction: u8, +} + +/** \ingroup libusb_desc + * Structure containing an array of 0 or more interface association + * descriptors + */ +Interface_Association_Descriptor_Array :: struct { + /** Array of interface association descriptors. The size of this array + * is determined by the length field. + */ + iad: [^]Interface_Association_Descriptor, + /** Number of interface association descriptors contained. Read-only. */ + length: c.int, +} + +/** \ingroup libusb_desc + * A structure representing the standard USB interface descriptor. This + * descriptor is documented in section 9.6.5 of the USB 3.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +Interface_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_INTERFACE LIBUSB_DT_INTERFACE + * in this context. */ + bDescriptorType: u8, + /** Number of this interface */ + bInterfaceNumber: u8, + /** Value used to select this alternate setting for this interface */ + bAlternateSetting: u8, + /** Number of endpoints used by this interface (excluding the control + * endpoint). */ + bNumEndpoints: u8, + /** USB-IF class code for this interface. See \ref libusb_class_code. */ + bInterfaceClass: u8, + /** USB-IF subclass code for this interface, qualified by the + * bInterfaceClass value */ + bInterfaceSubClass: u8, + /** USB-IF protocol code for this interface, qualified by the + * bInterfaceClass and bInterfaceSubClass values */ + bInterfaceProtocol: u8, + /** Index of string descriptor describing this interface */ + iInterface: u8, + /** Array of endpoint descriptors. This length of this array is determined + * by the bNumEndpoints field. */ + endpoint: [^]Endpoint_Descriptor, + /** Extra descriptors. If libusb encounters unknown interface descriptors, + * it will store them here, should you wish to parse them. */ + extra: [^]u8, + /** Length of the extra descriptors, in bytes. Must be non-negative. */ + extra_length: c.int, +} + +/** \ingroup libusb_desc + * A collection of alternate settings for a particular USB interface. + */ +Interface :: struct { + /** Array of interface descriptors. The length of this array is determined + * by the num_altsetting field. */ + altsetting: [^]Interface_Descriptor, + /** The number of alternate settings that belong to this interface. + * Must be non-negative. */ + num_altsetting: c.int, +} + +/** \ingroup libusb_desc + * A structure representing the standard USB configuration descriptor. This + * descriptor is documented in section 9.6.3 of the USB 3.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +Config_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_CONFIG LIBUSB_DT_CONFIG + * in this context. */ + bDescriptorType: u8, + /** Total length of data returned for this configuration */ + wTotalLength: u16, + /** Number of interfaces supported by this configuration */ + bNumInterfaces: u8, + /** Identifier value for this configuration */ + bConfigurationValue: u8, + /** Index of string descriptor describing this configuration */ + iConfiguration: u8, + /** Configuration characteristics */ + bmAttributes: u8, + /** Maximum power consumption of the USB device from this bus in this + * configuration when the device is fully operation. Expressed in units + * of 2 mA when the device is operating in high-speed mode and in units + * of 8 mA when the device is operating in super-speed mode. */ + MaxPower: u8, + /** Array of interfaces supported by this configuration. The length of + * this array is determined by the bNumInterfaces field. */ + interface: [^]Interface, + /** Extra descriptors. If libusb encounters unknown configuration + * descriptors, it will store them here, should you wish to parse them. */ + extra: [^]u8, + /** Length of the extra descriptors, in bytes. Must be non-negative. */ + extra_length: c.int, +} + +/** \ingroup libusb_desc + * A structure representing the superspeed endpoint companion + * descriptor. This descriptor is documented in section 9.6.7 of + * the USB 3.0 specification. All multiple-byte fields are represented in + * host-endian format. + */ +Ss_Endpoint_Companion_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_SS_ENDPOINT_COMPANION in + * this context. */ + bDescriptorType: u8, + /** The maximum number of packets the endpoint can send or + * receive as part of a burst. */ + bMaxBurst: u8, + /** In bulk EP: bits 4:0 represents the maximum number of + * streams the EP supports. In isochronous EP: bits 1:0 + * represents the Mult - a zero based value that determines + * the maximum number of packets within a service interval */ + bmAttributes: u8, + /** The total number of bytes this EP will transfer every + * service interval. Valid only for periodic EPs. */ + wBytesPerInterval: u16, +} + +/** \ingroup libusb_desc + * A generic representation of a BOS Device Capability descriptor. It is + * advised to check bDevCapabilityType and call the matching + * libusb_get_*_descriptor function to get a structure fully matching the type. + */ +Bos_Dev_Capability_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_DEVICE_CAPABILITY + * LIBUSB_DT_DEVICE_CAPABILITY in this context. */ + bDescriptorType: u8, + /** Device Capability type */ + bDevCapabilityType: u8, + /** Device Capability data (bLength - 3 bytes) */ + dev_capability_data: [0]u8, +} + +/** \ingroup libusb_desc + * A structure representing the Binary Device Object Store (BOS) descriptor. + * This descriptor is documented in section 9.6.2 of the USB 3.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +Bos_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_BOS LIBUSB_DT_BOS + * in this context. */ + bDescriptorType: u8, + /** Length of this descriptor and all of its sub descriptors */ + wTotalLength: u16, + /** The number of separate device capability descriptors in + * the BOS */ + bNumDeviceCaps: u8, + /** bNumDeviceCap Device Capability Descriptors + * Isochronous packet descriptors, for isochronous transfers only. + * This is a C flexible array member and memory must be handled completely manually if it's used.*/ + dev_capability: [0]^Bos_Dev_Capability_Descriptor, +} + +/** \ingroup libusb_desc + * A structure representing the USB 2.0 Extension descriptor + * This descriptor is documented in section 9.6.2.1 of the USB 3.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +Usb2_Extension_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_DEVICE_CAPABILITY + * LIBUSB_DT_DEVICE_CAPABILITY in this context. */ + bDescriptorType: u8, + /** Capability type. Will have value + * \ref libusb_capability_type::LIBUSB_BT_USB_2_0_EXTENSION + * LIBUSB_BT_USB_2_0_EXTENSION in this context. */ + bDevCapabilityType: u8, + /** Bitmap encoding of supported device level features. + * A value of one in a bit location indicates a feature is + * supported; a value of zero indicates it is not supported. + * See \ref libusb_usb_2_0_extension_attributes. */ + bmAttributes: u32, +} + +/** \ingroup libusb_desc + * A structure representing the SuperSpeed USB Device Capability descriptor + * This descriptor is documented in section 9.6.2.2 of the USB 3.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +Ss_Usb_Device_Capability_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_DEVICE_CAPABILITY + * LIBUSB_DT_DEVICE_CAPABILITY in this context. */ + bDescriptorType: u8, + /** Capability type. Will have value + * \ref libusb_capability_type::LIBUSB_BT_SS_USB_DEVICE_CAPABILITY + * LIBUSB_BT_SS_USB_DEVICE_CAPABILITY in this context. */ + bDevCapabilityType: u8, + /** Bitmap encoding of supported device level features. + * A value of one in a bit location indicates a feature is + * supported; a value of zero indicates it is not supported. + * See \ref libusb_ss_usb_device_capability_attributes. */ + bmAttributes: u8, + /** Bitmap encoding of the speed supported by this device when + * operating in SuperSpeed mode. See \ref libusb_supported_speed. */ + wSpeedSupported: u16, + /** The lowest speed at which all the functionality supported + * by the device is available to the user. For example if the + * device supports all its functionality when connected at + * full speed and above then it sets this value to 1. */ + bFunctionalitySupport: u8, + /** U1 Device Exit Latency. */ + bU1DevExitLat: u8, + /** U2 Device Exit Latency. */ + bU2DevExitLat: u16, +} + +/** \ingroup libusb_desc + * A structure representing the Container ID descriptor. + * This descriptor is documented in section 9.6.2.3 of the USB 3.0 specification. + * All multiple-byte fields, except UUIDs, are represented in host-endian format. + */ +Container_Id_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_DEVICE_CAPABILITY + * LIBUSB_DT_DEVICE_CAPABILITY in this context. */ + bDescriptorType: u8, + /** Capability type. Will have value + * \ref libusb_capability_type::LIBUSB_BT_CONTAINER_ID + * LIBUSB_BT_CONTAINER_ID in this context. */ + bDevCapabilityType: u8, + /** Reserved field */ + bReserved: u8, + /** 128 bit UUID */ + ContainerID: u128, +} + +/** \ingroup libusb_desc + * A structure representing a Platform descriptor. + * This descriptor is documented in section 9.6.2.4 of the USB 3.2 specification. + * This struct contains a mandatory C flexible array member all of it's memory must be handled completely manually. + */ +Platform_Descriptor :: struct { + /** Size of this descriptor (in bytes) */ + bLength: u8, + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_DEVICE_CAPABILITY + * LIBUSB_DT_DEVICE_CAPABILITY in this context. */ + bDescriptorType: u8, + /** Capability type. Will have value + * \ref libusb_capability_type::LIBUSB_BT_PLATFORM_DESCRIPTOR + * LIBUSB_BT_CONTAINER_ID in this context. */ + bDevCapabilityType: u8, + /** Reserved field */ + bReserved: u8, + /** 128 bit UUID */ + PlatformCapabilityUUID: u128, + /** Capability data (bLength - 20) + * This is a C flexible array member and memory must be handled completely manually.*/ + CapabilityData: [0]u8, +} + +//TODO: Add `libusb_control_setup` + +Context :: distinct rawptr +Device :: distinct rawptr +Device_Handle :: distinct rawptr + +/** \ingroup libusb_lib + * Structure providing the version of the libusb runtime + */ +Version :: struct { + /** Library major version. */ + major: u16, + /** Library minor version. */ + minor: u16, + /** Library micro version. */ + micro: u16, + /** Library nano version. */ + nano: u16, + /** Library release candidate suffix string, e.g. "-rc4". */ + rc: cstring, + /** For ABI compatibility only. */ + describe: cstring, +} + +/** \ingroup libusb_dev + * Speed codes. Indicates the speed at which the device is operating. + */ +Speed :: enum c.int { + /** The OS doesn't report or know the device speed. */ + UNKNOWN = 0, + /** The device is operating at low speed (1.5MBit/s). */ + LOW = 1, + /** The device is operating at full speed (12MBit/s). */ + FULL = 2, + /** The device is operating at high speed (480MBit/s). */ + HIGH = 3, + /** The device is operating at super speed (5000MBit/s). */ + SUPER = 4, + /** The device is operating at super speed plus (10000MBit/s). */ + SUPER_PLUS = 5, +} + +/** \ingroup libusb_misc + * Error codes. Most libusb functions return 0 on success or one of these + * codes on failure. + * You can call libusb_error_name() to retrieve a string representation of an + * error code or libusb_strerror() to get an end-user suitable description of + * an error code. + */ +Error :: enum c.int { + /** Success (no error) */ + SUCCESS = 0, + /** Input/output error */ + IO = -1, + /** Invalid parameter */ + INVALID_PARAM = -2, + /** Access denied (insufficient permissions) */ + ACCESS = -3, + /** No such device (it may have been disconnected) */ + NO_DEVICE = -4, + /** Entity not found */ + NOT_FOUND = -5, + /** Resource busy */ + BUSY = -6, + /** Operation timed out */ + TIMEOUT = -7, + /** Overflow */ + OVERFLOW = -8, + /** Pipe error */ + PIPE = -9, + /** System call interrupted (perhaps due to signal) */ + INTERRUPTED = -10, + /** Insufficient memory */ + NO_MEM = -11, + /** Operation not supported or unimplemented on this platform */ + NOT_SUPPORTED = -12, + /* NB: Remember to update LIBUSB_ERROR_COUNT below as well as the + message strings in strerror.c when adding new error codes here. */ + + /** Other error */ + OTHER = -99, +} + +/* Total number of error codes in enum libusb_error */ +ERROR_COUNT :: 14 + +/** \ingroup libusb_asyncio + * Transfer type */ +TransferType :: enum c.int { + /** Control transfer */ + CONTROL = 0, + /** Isochronous transfer */ + ISOCHRONOUS = 1, + /** Bulk transfer */ + BULK = 2, + /** Interrupt transfer */ + INTERRUPT = 3, + /** Bulk stream transfer */ + BULK_STREAM = 4, +} + +/** \ingroup libusb_asyncio + * Transfer status codes */ +Transfer_Status :: enum c.int { + /** Transfer completed without error. Note that this does not indicate + * that the entire amount of requested data was transferred. */ + COMPLETED, + /** Transfer failed */ + ERROR, + /** Transfer timed out */ + TIMED_OUT, + /** Transfer was cancelled */ + CANCELLED, + /** For bulk/interrupt endpoints: halt condition detected (endpoint + * stalled). For control endpoints: control request not supported. */ + STALL, + /** Device was disconnected */ + NO_DEVICE, + /** Device sent more data than requested */ + OVERFLOW, +} + +Transfer_Flag_Bits :: enum u8 { + /** Report short frames as errors */ + SHORT_NOT_OK, + /** Automatically free() transfer buffer during libusb_free_transfer(). + * Note that buffers allocated with libusb_dev_mem_alloc() should not + * be attempted freed in this way, since free() is not an appropriate + * way to release such memory. */ + FREE_BUFFER, + /** Automatically call libusb_free_transfer() after callback returns. + * If this flag is set, it is illegal to call libusb_free_transfer() + * from your transfer callback, as this will result in a double-free + * when this flag is acted upon. */ + FREE_TRANSFER, + /** Terminate transfers that are a multiple of the endpoint's + * wMaxPacketSize with an extra zero length packet. This is useful + * when a device protocol mandates that each logical request is + * terminated by an incomplete packet (i.e. the logical requests are + * not separated by other means). + * + * This flag only affects host-to-device transfers to bulk and interrupt + * endpoints. In other situations, it is ignored. + * + * This flag only affects transfers with a length that is a multiple of + * the endpoint's wMaxPacketSize. On transfers of other lengths, this + * flag has no effect. Therefore, if you are working with a device that + * needs a ZLP whenever the end of the logical request falls on a packet + * boundary, then it is sensible to set this flag on every + * transfer (you do not have to worry about only setting it on transfers + * that end on the boundary). + * + * This flag is currently only supported on Linux. + * On other systems, libusb_submit_transfer() will return + * \ref LIBUSB_ERROR_NOT_SUPPORTED for every transfer where this + * flag is set. + * + * Available since libusb-1.0.9. + */ + ADD_ZERO_PACKET, +} + +Transfer_Flag :: bit_set[Transfer_Flag_Bits;u8] + +/** \ingroup libusb_asyncio + * Isochronous packet descriptor. */ +Iso_Packet_Descriptor :: struct { + /** Length of data to request in this packet */ + length: c.uint, + /** Amount of data that was actually transferred */ + actual_length: c.uint, + /** Status code for this packet */ + status: Transfer_Status, +} + +Transfer_Cb :: #type proc "c" (transfer: ^Transfer) + +/** \ingroup libusb_asyncio + * The generic USB transfer structure. The user populates this structure and + * then submits it in order to request a transfer. After the transfer has + * completed, the library populates the transfer with the results and passes + * it back to the user. + */ +Transfer :: struct { + /** Handle of the device that this transfer will be submitted to */ + dev_handle: Device_Handle, + /** A bitwise OR combination of \ref libusb_transfer_flags. */ + flags: Transfer_Flag, + /** Address of the endpoint where this transfer will be sent. */ + endpoint: u8, + /** Type of the transfer from \ref libusb_transfer_type */ + type: u8, + /** Timeout for this transfer in milliseconds. A value of 0 indicates no + * timeout. */ + timeout: c.uint, + /** The status of the transfer. Read-only, and only for use within + * transfer callback function. + * + * If this is an isochronous transfer, this field may read COMPLETED even + * if there were errors in the frames. Use the + * \ref libusb_iso_packet_descriptor::status "status" field in each packet + * to determine if errors occurred. */ + status: Transfer_Status, + /** Length of the data buffer. Must be non-negative. */ + length: c.int, + /** Actual length of data that was transferred. Read-only, and only for + * use within transfer callback function. Not valid for isochronous + * endpoint transfers. */ + actual_length: c.int, + /** Callback function. This will be invoked when the transfer completes, + * fails, or is cancelled. */ + callback: Transfer_Cb, + /** User context data. Useful for associating specific data to a transfer + * that can be accessed from within the callback function. + * + * This field may be set manually or is taken as the `user_data` parameter + * of the following functions: + * - libusb_fill_bulk_transfer() + * - libusb_fill_bulk_stream_transfer() + * - libusb_fill_control_transfer() + * - libusb_fill_interrupt_transfer() + * - libusb_fill_iso_transfer() */ + user_data: rawptr, + /** Data buffer */ + buffer: [^]u8, + /** Number of isochronous packets. Only used for I/O with isochronous + * endpoints. Must be non-negative. */ + num_iso_packets: c.int, + /** Isochronous packet descriptors, for isochronous transfers only. + * This is a C flexible array member and memory must be handled completely manually if it's used.*/ + iso_packet_desc: [0]Iso_Packet_Descriptor, +} + +/** \ingroup libusb_misc + * Capabilities supported by an instance of libusb on the current running + * platform. Test if the loaded library supports a given capability by calling + * \ref libusb_has_capability(). + */ +Capability :: enum c.uint { + /** The libusb_has_capability() API is available. */ + HAS_CAPABILITY = 0x0000, + /** Hotplug support is available on this platform. */ + HAS_HOTPLUG = 0x0001, + /** The library can access HID devices without requiring user intervention. + * Note that before being able to actually access an HID device, you may + * still have to call additional libusb functions such as + * \ref libusb_detach_kernel_driver(). */ + HAS_HID_ACCESS = 0x0100, + /** The library supports detaching of the default USB driver, using + * \ref libusb_detach_kernel_driver(), if one is set by the OS kernel */ + SUPPORTS_DETACH_KERNEL_DRIVER = 0x0101, +} + +/** \ingroup libusb_lib + * Log message levels. + */ +Log_Level :: enum c.int { + /** (0) : No messages ever emitted by the library (default) */ + NONE = 0, + /** (1) : Error messages are emitted */ + ERROR = 1, + /** (2) : Warning and error messages are emitted */ + WARNING = 2, + /** (3) : Informational, warning and error messages are emitted */ + INFO = 3, + /** (4) : All messages are emitted */ + DEBUG = 4, +} + +/** \ingroup libusb_lib + * Log callback mode. + * + * Since version 1.0.23, \ref LIBUSB_API_VERSION >= 0x01000107 + * + * \see libusb_set_log_cb() + */ +Log_Cb_Mode :: enum c.int { + /** Callback function handling all log messages. */ + GLOBAL = (1 << 0), + /** Callback function handling context related log messages. */ + CONTEXT = (1 << 1), +} + +/** \ingroup libusb_lib + * Available option values for libusb_set_option() and libusb_init_context(). + */ +Option :: enum c.int { + /** Set the log message verbosity. + * + * This option must be provided an argument of type \ref libusb_log_level. + * The default level is LIBUSB_LOG_LEVEL_NONE, which means no messages are ever + * printed. If you choose to increase the message verbosity level, ensure + * that your application does not close the stderr file descriptor. + * + * You are advised to use level LIBUSB_LOG_LEVEL_WARNING. libusb is conservative + * with its message logging and most of the time, will only log messages that + * explain error conditions and other oddities. This will help you debug + * your software. + * + * If the LIBUSB_DEBUG environment variable was set when libusb was + * initialized, this option does nothing: the message verbosity is fixed + * to the value in the environment variable. + * + * If libusb was compiled without any message logging, this option does + * nothing: you'll never get any messages. + * + * If libusb was compiled with verbose debug message logging, this option + * does nothing: you'll always get messages from all levels. + */ + LOG_LEVEL = 0, + /** Use the UsbDk backend for a specific context, if available. + * + * This option should be set at initialization with libusb_init_context() + * otherwise unspecified behavior may occur. + * + * Only valid on Windows. Ignored on all other platforms. + */ + USE_USBDK = 1, + /** Do not scan for devices + * + * With this option set, libusb will skip scanning devices in + * libusb_init_context(). + * + * Hotplug functionality will also be deactivated. + * + * The option is useful in combination with libusb_wrap_sys_device(), + * which can access a device directly without prior device scanning. + * + * This is typically needed on Android, where access to USB devices + * is limited. + * + * This option should only be used with libusb_init_context() + * otherwise unspecified behavior may occur. + * + * Only valid on Linux. Ignored on all other platforms. + */ + NO_DEVICE_DISCOVERY = 2, + /** Set the context log callback function. + * + * Set the log callback function either on a context or globally. This + * option must be provided an argument of type \ref libusb_log_cb. + * Using this option with a NULL context is equivalent to calling + * libusb_set_log_cb() with mode \ref LIBUSB_LOG_CB_GLOBAL. + * Using it with a non-NULL context is equivalent to calling + * libusb_set_log_cb() with mode \ref LIBUSB_LOG_CB_CONTEXT. + */ + LOG_CB = 3, + MAX = 4, +} + +Log_Cb :: #type proc "c" (ctx: ^Context, level: Log_Level, str: cstring) + +Init_Option_Value :: struct #raw_union { + ival: c.int, + libusb_log_cb: Log_Cb, +} + +Init_Option :: struct { + option: Option, + value: Init_Option_Value, +} + +/** \ingroup libusb_hotplug + * + * Since version 1.0.16, \ref LIBUSB_API_VERSION >= 0x01000102 + * + * Hotplug events */ +Hotplug_Event :: enum c.int { + /** A device has been plugged in and is ready to use */ + DEVICE_ARRIVED = (1 << 0), + /** A device has left and is no longer available. + * It is the user's responsibility to call libusb_close on any handle associated with a disconnected device. + * It is safe to call libusb_get_device_descriptor on a device that has left */ + DEVICE_LEFT = (1 << 1), +} + +/** \ingroup libusb_hotplug + * + * Since version 1.0.16, \ref LIBUSB_API_VERSION >= 0x01000102 + * + * Hotplug flags */ +Hotplug_Flag_Bits :: enum c.int { + /** Arm the callback and fire it for all matching currently attached devices. */ + ENUMERATE = 0, +} + +Hotplug_Flag :: bit_set[Hotplug_Flag_Bits;c.int] + +/** \ingroup libusb_hotplug + * Convenience macro when not using any flags */ +HOTPLUG_NO_FLAGS :: 0 +/** \ingroup libusb_hotplug + * Wildcard matching for hotplug events */ +HOTPLUG_MATCH_ANY :: -1 + +Hotplug_Callback_Fn :: #type proc "c" ( + ctx: Context, + device: Device, + event: Hotplug_Event, + user_data: rawptr, +) -> c.int +Callback_Handle :: distinct c.int + +Transfer_Type :: enum c.int { + CONTROL = 0, + ISOCHRONOUS = 1, + BULK = 2, + INTERRUPT = 3, + STREAM = 4, +} + +Poll_Fd :: struct { + /** Numeric file descriptor */ + fd: c.int, + /** Event flags to poll for from . POLLIN indicates that you + * should monitor this file descriptor for becoming ready to read from, + * and POLLOUT indicates that you should monitor this file descriptor for + * nonblocking write readiness. */ + events: posix.Poll_Event, +} + +Poll_FD_Added_CB :: #type proc "c" (fd: posix.FD, events: posix.Poll_Event, user_data: rawptr) + +Poll_FD_Removed_CB :: #type proc "c" (fd: posix.FD, user_data: rawptr) + +@(default_calling_convention = "c", link_prefix = "libusb_") +foreign lib { + //----- Library initialization/deinitialization ---------------------------------- + set_log_cb :: proc(ctx: Context, cb: Log_Cb, mode: Log_Cb_Mode) --- + set_option :: proc(ctx: Context, option: Option, args: c.va_list) -> Error --- + init :: proc(ctx: ^Context) -> Error --- + init_context :: proc(ctx: ^Context, options: [^]Init_Option, num_options: c.int) -> Error --- + exit :: proc(ctx: Context) --- + + //----- Device handling and enumeration ---------------------------------- + get_device_list :: proc(ctx: Context, list: ^[^]Device) -> int --- + free_device_list :: proc(device: [^]Device, unref_devices: c.int) --- + get_bus_number :: proc(dev: Device) -> u8 --- + get_port_number :: proc(dev: Device) -> u8 --- + get_port_numbers :: proc(dev: Device, port_numbers: [^]u8, port_numbers_len: c.int) -> Error --- + get_parent :: proc(dev: Device) -> Device --- + get_device_address :: proc(dev: Device) -> u8 --- + get_device_speed :: proc(dev: Device) -> Speed --- + get_max_iso_packet_size :: proc(dev: Device, endpoint: c.char) -> c.int --- + get_max_alt_packet_size :: proc(dev: Device, interface_number: c.int, alternate_setting: c.int, endpoint: u8) -> c.int --- + ref_device :: proc(dev: Device) -> Device --- + unref_device :: proc(dev: Device) --- + wrap_sys_device :: proc(ctx: Context, sys_dev: rawptr, dev_handle: ^Device_Handle) -> Error --- + open :: proc(dev: Device, dev_handle: ^Device_Handle) -> Error --- + open_device_with_vid_pid :: proc(ctx: Context, vendor_id: u16, product_id: u16) -> Device_Handle --- + close :: proc(dev_handle: Device_Handle) --- + get_device :: proc(dev_handle: Device_Handle) -> Device --- + get_configuration :: proc(dev: Device_Handle, config: ^c.int) -> Error --- + set_configuration :: proc(dev_handle: Device_Handle, configuration: c.int) -> Error --- + claim_interface :: proc(dev_handle: Device_Handle, interface_number: c.int) -> Error --- + release_interface :: proc(dev_handle: Device_Handle, interface_number: c.int) -> Error --- + interface_alt_setting :: proc(dev_handle: Device_Handle, interface_number: c.int, alternate_setting: c.int) -> Error --- + clear_halt :: proc(dev_handle: Device_Handle, endpoint: u8) -> Error --- + reset_device :: proc(dev_handle: Device_Handle) -> Error --- + kernel_driver_active :: proc(dev_handle: Device_Handle, interface_number: c.int) -> Error --- + detach_kernel_driver :: proc(dev_handle: Device_Handle, interface_number: c.int) -> Error --- + attach_kernel_driver :: proc(dev_handle: Device_Handle, interface_number: c.int) -> Error --- + set_auto_detach_kernel_driver :: proc(dev_handle: Device_Handle, enable: c.int) -> Error --- + + //----- Miscellaneous ---------------------------------- + has_capability :: proc(capability: Capability) -> c.int --- + error_name :: proc(errcode: Error) -> cstring --- + get_version :: proc() -> Version --- + setlocale :: proc(locale: cstring) -> Error --- + strerror :: proc(errcode: Error) -> cstring --- + + //----- USB descriptors ---------------------------------- + get_device_descriptor :: proc(dev: Device, desc: ^Device_Descriptor) -> Error --- + get_active_config_descriptor :: proc(dev: Device, config: ^^Config_Descriptor) -> Error --- + get_config_descriptor :: proc(dev: Device, config_index: u8, config_descriptor: ^^Config_Descriptor) -> Error --- + get_config_descriptor_by_value :: proc(dev: Device, bConfigurationValue: u8, config: ^^Config_Descriptor) -> Error --- + free_config_descriptor :: proc(config: ^Config_Descriptor) --- + get_ss_endpoint_companion_descriptor :: proc(ctx: Context, endpoint: ^Endpoint_Direction, ep_comp: ^^Ss_Endpoint_Companion_Descriptor) -> Error --- + free_ss_endpoint_companion_descriptor :: proc(ep_comp: ^Ss_Endpoint_Companion_Descriptor) --- + get_bos_descriptor :: proc(dev_handle: Device_Handle, bos: ^^Bos_Descriptor) -> Error --- + free_bos_descriptor :: proc(bos: ^Bos_Descriptor) --- + get_usb2_extension_descriptor :: proc(ctx: Context, dev_cap: ^Bos_Dev_Capability_Descriptor, usb2_extension: ^^Usb2_Extension_Descriptor) -> Error --- + free_usb2_extension_descriptor :: proc(usb2_extension: ^Usb2_Extension_Descriptor) --- + get_ss_usb_device_capability_descriptor :: proc(ctx: Context, dev_cap: ^Bos_Dev_Capability_Descriptor, ss_usb_device_cap: ^^Ss_Usb_Device_Capability_Descriptor) -> Error --- + free_ss_usb_device_capability_descriptor :: proc(ss_usb_device_cap: ^Ss_Usb_Device_Capability_Descriptor) --- + get_container_id_descriptor :: proc(ctx: Context, dev_cap: ^Bos_Dev_Capability_Descriptor, container_id: ^^Container_Id_Descriptor) -> Error --- + free_container_id_descriptor :: proc(container_id: ^Container_Id_Descriptor) --- + get_platform_descriptor :: proc(ctx: Context, dev_cap: ^Bos_Dev_Capability_Descriptor, platform_descriptor: ^^Platform_Descriptor) -> Error --- + free_platform_descriptor :: proc(platform_descriptor: ^Platform_Descriptor) --- + get_string_descriptor_ascii :: proc(dev_handle: Device_Handle, desc_index: u8, data: cstring, length: c.int) -> c.int --- + get_interface_association_descriptors :: proc(dev: Device, config_index: u8, iad_array: [^][^]Interface_Association_Descriptor_Array) -> Error --- + get_active_interface_association_descriptors :: proc(dev: Device, iad_array: [^][^]Interface_Association_Descriptor_Array) -> Error --- + free_interface_association_descriptors :: proc(iad_array: ^Interface_Association_Descriptor_Array) -> Error --- + + //----- Device hotplug event notification ---------------------------------- + hotplug_register_callback :: proc(ctx: Context, events: c.int, flags: Hotplug_Flag, vendor_id: c.int, product_id: c.int, dev_class: c.int, cb_fn: Hotplug_Callback_Fn, user_data: rawptr, callback_handle: ^Callback_Handle) -> Error --- + hotplug_deregister_callback :: proc(ctx: Context, hotplug_callback_handle: Callback_Handle) --- + hotplug_get_user_data :: proc(ctx: Context, hotplug_callback_handle: Callback_Handle) -> rawptr --- + + //----- Asynchronous device I/O ---------------------------------- + alloc_streams :: proc(dev_handle: Device_Handle, num_streams: u32, endpoints: [^]u8, num_endpoints: c.int) -> c.int --- + free_streams :: proc(dev_handle: Device_Handle, endpoints: [^]u8, num_endpoints: c.int) -> Error --- + dev_mem_alloc :: proc(dev_handle: Device_Handle, length: c.size_t) -> [^]u8 --- + dev_mem_free :: proc(dev_handle: Device_Handle, buffer: [^]u8, length: c.size_t) -> Error --- + alloc_transfer :: proc(iso_packets: c.int = 0) -> ^Transfer --- + free_transfer :: proc(transfer: ^Transfer) --- + submit_transfer :: proc(transfer: ^Transfer) -> Error --- + cancel_transfer :: proc(transfer: ^Transfer) -> Error --- + transfer_set_stream_id :: proc(transfer: ^Transfer, stream_id: u32) --- + transfer_get_stream_id :: proc(transfer: ^Transfer) -> u32 --- + + //----- Polling and timing ---------------------------------- + try_lock_events :: proc(ctx: Context) -> c.int --- + lock_events :: proc(ctx: Context) --- + unlock_events :: proc(ctx: Context) --- + event_handling_ok :: proc(ctx: Context) -> c.int --- + event_handler_active :: proc(ctx: Context) -> c.int --- + interrupt_event_handler :: proc(ctx: Context) --- + lock_event_waiters :: proc(ctx: Context) --- + unlock_event_waiters :: proc(ctx: Context) --- + wait_for_event :: proc(ctx: Context, tv: ^posix.timeval) -> c.int --- + handle_events_timeout_completed :: proc(ctx: Context, tv: ^posix.timeval, completed: ^c.int) -> Error --- + handle_events_completed :: proc(ctx: Context, completed: ^c.int) -> Error --- + handle_events_locked :: proc(ctx: Context, tv: ^posix.timeval) -> Error --- + pollfds_handle_timeouts :: proc(ctx: Context) -> c.int --- + get_next_timeout :: proc(ctx: Context, tv: ^posix.timeval) --- + set_pollfd_notifiers :: proc(ctx: Context, added_cb: Poll_FD_Added_CB, removed_cb: Poll_FD_Removed_CB, user_data: rawptr) --- + get_pollfds :: proc(ctx: Context) -> [^][^]Poll_Fd --- + free_fds :: proc(pollfds: [^][^]Poll_Fd) --- + + //----- Synchronous device I/O ---------------------------------- + control_transfer :: proc(dev_handle: Device_Handle, bmRequestType: u8, bRequest: u8, wValue: u16, wIndex: u16, data: [^]u8, wLength: u16, timeout: c.uint) -> Error --- + bulk_transfer :: proc(dev_handle: Device_Handle, endpoint: u8, data: [^]u8, length: c.int, transferred: ^c.int, timeout: c.uint) -> Error --- + interrupt_transfer :: proc(dev_handle: Device_Handle, endpoint: u8, data: [^]u8, length: c.int, transferred: ^c.int, timeout: c.uint) -> Error --- +} + +// --------------------------------------------------------------------------------------------------------------------- +// ----- Tests ------------------------ +// --------------------------------------------------------------------------------------------------------------------- +import "core:testing" + +@(test) +init_test :: proc(t: ^testing.T) { + result := init(nil) + + testing.expect_value(t, result, Error.SUCCESS) +} diff --git a/vendor/lmdb/examples/examples.odin b/vendor/lmdb/examples/examples.odin new file mode 100644 index 0000000..846df61 --- /dev/null +++ b/vendor/lmdb/examples/examples.odin @@ -0,0 +1,43 @@ +package examples + +import "core:fmt" +import "core:os" +import "core:sys/posix" +import mdb "../../lmdb" + +// 0o660 +DB_MODE :: posix.mode_t{.IWGRP, .IRGRP, .IWUSR, .IRUSR} +DB_PATH :: "out/debug/lmdb_example_db" + +main :: proc() { + environment: ^mdb.Env + + // Create environment for lmdb + mdb.panic_on_err(mdb.env_create(&environment)) + // Create directory for databases. Won't do anything if it already exists. + // 0o774 gives all permissions for owner and group, read for everyone else. + os.make_directory(DB_PATH, 0o774) + // Open the database files (creates them if they don't already exist) + mdb.panic_on_err(mdb.env_open(environment, DB_PATH, 0, DB_MODE)) + + // Transactions + txn_handle: ^mdb.Txn + db_handle: mdb.Dbi + // Put transaction + key := 7 + key_val := mdb.autoval(&key) + put_data := 12 + put_data_val := mdb.autoval(&put_data) + mdb.panic_on_err(mdb.txn_begin(environment, nil, 0, &txn_handle)) + mdb.panic_on_err(mdb.dbi_open(txn_handle, nil, 0, &db_handle)) + mdb.panic_on_err(mdb.put(txn_handle, db_handle, &key_val.raw, &put_data_val.raw, 0)) + mdb.panic_on_err(mdb.txn_commit(txn_handle)) + + // Get transaction + get_data_val := mdb.nil_autoval(int) + mdb.panic_on_err(mdb.txn_begin(environment, nil, 0, &txn_handle)) + mdb.panic_on_err(mdb.get(txn_handle, db_handle, &key_val.raw, &get_data_val.raw)) + mdb.panic_on_err(mdb.txn_commit(txn_handle)) + data_cpy := mdb.autoval_get_data(&get_data_val)^ + fmt.println("Get result:", data_cpy) +} diff --git a/vendor/lmdb/lmdb.odin b/vendor/lmdb/lmdb.odin new file mode 100644 index 0000000..7292c5c --- /dev/null +++ b/vendor/lmdb/lmdb.odin @@ -0,0 +1,1602 @@ +/** @file lmdb.h +* @brief Lightning memory-mapped database library +* +* @mainpage Lightning Memory-Mapped Database Manager (LMDB) +* +* @section intro_sec Introduction +* LMDB is a Btree-based database management library modeled loosely on the +* BerkeleyDB API, but much simplified. The entire database is exposed +* in a memory map, and all data fetches return data directly +* from the mapped memory, so no malloc's or memcpy's occur during +* data fetches. As such, the library is extremely simple because it +* requires no page caching layer of its own, and it is extremely high +* performance and memory-efficient. It is also fully transactional with +* full ACID semantics, and when the memory map is read-only, the +* database integrity cannot be corrupted by stray pointer writes from +* application code. +* +* The library is fully thread-aware and supports concurrent read/write +* access from multiple processes and threads. Data pages use a copy-on- +* write strategy so no active data pages are ever overwritten, which +* also provides resistance to corruption and eliminates the need of any +* special recovery procedures after a system crash. Writes are fully +* serialized; only one write transaction may be active at a time, which +* guarantees that writers can never deadlock. The database structure is +* multi-versioned so readers run with no locks; writers cannot block +* readers, and readers don't block writers. +* +* Unlike other well-known database mechanisms which use either write-ahead +* transaction logs or append-only data writes, LMDB requires no maintenance +* during operation. Both write-ahead loggers and append-only databases +* require periodic checkpointing and/or compaction of their log or database +* files otherwise they grow without bound. LMDB tracks free pages within +* the database and re-uses them for new write operations, so the database +* size does not grow without bound in normal use. +* +* The memory map can be used as a read-only or read-write map. It is +* read-only by default as this provides total immunity to corruption. +* Using read-write mode offers much higher write performance, but adds +* the possibility for stray application writes thru pointers to silently +* corrupt the database. Of course if your application code is known to +* be bug-free (...) then this is not an issue. +* +* If this is your first time using a transactional embedded key/value +* store, you may find the \ref starting page to be helpful. +* +* @section caveats_sec Caveats +* Troubleshooting the lock file, plus semaphores on BSD systems: +* +* - A broken lockfile can cause sync issues. +* Stale reader transactions left behind by an aborted program +* cause further writes to grow the database quickly, and +* stale locks can block further operation. +* +* Fix: Check for stale readers periodically, using the +* #mdb_reader_check function or the \ref mdb_stat_1 "mdb_stat" tool. +* Stale writers will be cleared automatically on some systems: +* - Windows - automatic +* - Linux, systems using POSIX mutexes with Robust option - automatic +* - not on BSD, systems using POSIX semaphores. +* Otherwise just make all programs using the database close it; +* the lockfile is always reset on first open of the environment. +* +* - On BSD systems or others configured with MDB_USE_POSIX_SEM, +* startup can fail due to semaphores owned by another userid. +* +* Fix: Open and close the database as the user which owns the +* semaphores (likely last user) or as root, while no other +* process is using the database. +* +* Restrictions/caveats (in addition to those listed for some functions): +* +* - Only the database owner should normally use the database on +* BSD systems or when otherwise configured with MDB_USE_POSIX_SEM. +* Multiple users can cause startup to fail later, as noted above. +* +* - There is normally no pure read-only mode, since readers need write +* access to locks and lock file. Exceptions: On read-only filesystems +* or with the #MDB_NOLOCK flag described under #mdb_env_open(). +* +* - An LMDB configuration will often reserve considerable \b unused +* memory address space and maybe file size for future growth. +* This does not use actual memory or disk space, but users may need +* to understand the difference so they won't be scared off. +* +* - By default, in versions before 0.9.10, unused portions of the data +* file might receive garbage data from memory freed by other code. +* (This does not happen when using the #MDB_WRITEMAP flag.) As of +* 0.9.10 the default behavior is to initialize such memory before +* writing to the data file. Since there may be a slight performance +* cost due to this initialization, applications may disable it using +* the #MDB_NOMEMINIT flag. Applications handling sensitive data +* which must not be written should not use this flag. This flag is +* irrelevant when using #MDB_WRITEMAP. +* +* - A thread can only use one transaction at a time, plus any child +* transactions. Each transaction belongs to one thread. See below. +* The #MDB_NOTLS flag changes this for read-only transactions. +* +* - Use an MDB_env* in the process which opened it, not after fork(). +* +* - Do not have open an LMDB database twice in the same process at +* the same time. Not even from a plain open() call - close()ing it +* breaks fcntl() advisory locking. (It is OK to reopen it after +* fork() - exec*(), since the lockfile has FD_CLOEXEC set.) +* +* - Avoid long-lived transactions. Read transactions prevent +* reuse of pages freed by newer write transactions, thus the +* database can grow quickly. Write transactions prevent +* other write transactions, since writes are serialized. +* +* - Avoid suspending a process with active transactions. These +* would then be "long-lived" as above. Also read transactions +* suspended when writers commit could sometimes see wrong data. +* +* ...when several processes can use a database concurrently: +* +* - Avoid aborting a process with an active transaction. +* The transaction becomes "long-lived" as above until a check +* for stale readers is performed or the lockfile is reset, +* since the process may not remove it from the lockfile. +* +* This does not apply to write transactions if the system clears +* stale writers, see above. +* +* - If you do that anyway, do a periodic check for stale readers. Or +* close the environment once in a while, so the lockfile can get reset. +* +* - Do not use LMDB databases on remote filesystems, even between +* processes on the same host. This breaks flock() on some OSes, +* possibly memory map sync, and certainly sync between programs +* on different hosts. +* +* - Opening a database can fail if another process is opening or +* closing it at exactly the same time. +* +* @author Howard Chu, Symas Corporation. +* +* @copyright Copyright 2011-2021 Howard Chu, Symas Corp. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted only as authorized by the OpenLDAP +* Public License. +* +* A copy of this license is available in the file LICENSE in the +* top-level directory of the distribution or, alternatively, at +* . +* +* @par Derived From: +* This code is derived from btree.c written by Martin Hedenfalk. +* +* Copyright (c) 2009, 2010 Martin Hedenfalk +* +* Permission to use, copy, modify, and distribute this software for any +* purpose with or without fee is hereby granted, provided that the above +* copyright notice and this permission notice appear in all copies. +* +* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +package lmdb + +foreign import lib "system:lmdb" + +import "core:c" +import "core:fmt" +import "core:sys/posix" + +_ :: c + +when ODIN_OS == .Windows { + mode_t :: c.int +} else { + mode_t :: posix.mode_t +} + +when ODIN_OS == .Windows { + filehandle_t :: rawptr +} else { + filehandle_t :: c.int +} + +Env :: struct {} + +Txn :: struct {} + +/** @brief A handle for an individual database in the DB environment. */ +Dbi :: u32 + +Cursor :: struct {} + +/** @brief Generic structure used for passing keys and data in and out +* of the database. +* +* Values returned from the database are valid only until a subsequent +* update operation, or the end of the transaction. Do not modify or +* free them, they commonly point into the database itself. +* +* Key sizes must be between 1 and #mdb_env_get_maxkeysize() inclusive. +* The same applies to data sizes in databases with the #MDB_DUPSORT flag. +* Other data items can in theory be from 0 to 0xffffffff bytes long. +*/ +Val :: struct { + mv_size: uint, /**< size of the data item */ + mv_data: rawptr, /**< address of the data item */ +} + +// Automatic `Val` handling for a given type 'T'. +// Will not traverse pointers. If `T` stores pointers, you probably don't want to use this. +Auto_Val :: struct($T: typeid) { + raw: Val, +} + +autoval :: #force_inline proc "contextless" (val_ptr: ^$T) -> Auto_Val(T) { + return Auto_Val(T){Val{size_of(T), val_ptr}} +} + +nil_autoval :: #force_inline proc "contextless" ($T: typeid) -> Auto_Val(T) { + return Auto_Val(T){Val{size_of(T), nil}} +} + +autoval_get_data :: #force_inline proc "contextless" (val: ^Auto_Val($T)) -> ^T { + return cast(^T)val.raw.mv_data +} + +// Panic if there is an error +panic_on_err :: #force_inline proc(error: Error) { + if error != .NONE { + fmt.panicf("Irrecoverable LMDB error", strerror(i32(error))) + } +} + +/** @brief A callback function used to compare two keys in a database */ +Cmp_Func :: #type proc "c" (_: ^Val, _: ^Val) -> i32 + +/** @brief A callback function used to relocate a position-dependent data item +* in a fixed-address database. +* +* The \b newptr gives the item's desired address in +* the memory map, and \b oldptr gives its previous address. The item's actual +* data resides at the address in \b item. This callback is expected to walk +* through the fields of the record in \b item and modify any +* values based at the \b oldptr address to be relative to the \b newptr address. +* @param[in,out] item The item that is to be relocated. +* @param[in] oldptr The previous address. +* @param[in] newptr The new address to relocate to. +* @param[in] relctx An application-provided context, set by #mdb_set_relctx(). +* @todo This feature is currently unimplemented. +*/ +Rel_Func :: #type proc "c" (item: ^Val, oldptr, newptr, relctx: rawptr) + +/** @defgroup mdb_env Environment Flags + * @{ + */ +/** mmap at a fixed address (experimental) */ +ENV_FIXEDMAP :: 0x01 +/** no environment directory */ +ENV_NOSUBDIR :: 0x4000 +/** don't fsync after commit */ +ENV_NOSYNC :: 0x10000 +/** read only */ +ENV_RDONLY :: 0x20000 +/** don't fsync metapage after commit */ +ENV_NOMETASYNC :: 0x40000 +/** use writable mmap */ +ENV_WRITEMAP :: 0x80000 +/** use asynchronous msync when #MDB_WRITEMAP is used */ +ENV_MAPASYNC :: 0x100000 +/** tie reader locktable slots to #MDB_txn objects instead of to threads */ +ENV_NOTLS :: 0x200000 +/** don't do any locking, caller must manage their own locks */ +ENV_NOLOCK :: 0x400000 +/** don't do readahead (no effect on Windows) */ +ENV_NORDAHEAD :: 0x800000 +/** don't initialize malloc'd memory before writing to datafile */ +ENV_NOMEMINIT :: 0x1000000 +/** @} */ + +/** @defgroup mdb_dbi_open Database Flags + * @{ + */ +/** use reverse string keys */ +DB_REVERSEKEY :: 0x02 +/** use sorted duplicates */ +DB_DUPSORT :: 0x04 +/** numeric keys in native byte order: either unsigned int or size_t. + * The keys must all be of the same size. */ +DB_INTEGERKEY :: 0x08 +/** with #MDB_DUPSORT, sorted dup items have fixed size */ +DB_DUPFIXED :: 0x10 +/** with #MDB_DUPSORT, dups are #MDB_INTEGERKEY-style integers */ +DB_INTEGERDUP :: 0x20 +/** with #MDB_DUPSORT, use reverse string dups */ +DB_REVERSEDUP :: 0x40 +/** create DB if not already existing */ +DB_CREATE :: 0x40000 +/** @} */ + +/** @defgroup mdb_put Write Flags + * @{ + */ +/** For put: Don't write if the key already exists. */ +WRITE_NOOVERWRITE :: 0x10 +/** Only for #MDB_DUPSORT
+ * For put: don't write if the key and data pair already exist.
+ * For mdb_cursor_del: remove all duplicate data items. + */ +WRITE_NODUPDATA :: 0x20 +/** For mdb_cursor_put: overwrite the current key/data pair */ +WRITE_CURRENT :: 0x40 +/** For put: Just reserve space for data, don't copy it. Return a + * pointer to the reserved space. + */ +WRITE_RESERVE :: 0x10000 +/** Data is being appended, don't split full pages. */ +WRITE_APPEND :: 0x20000 +/** Duplicate data is being appended, don't split full pages. */ +WRITE_APPENDDUP :: 0x40000 +/** Store multiple data items in one call. Only for #MDB_DUPFIXED. */ +WRITE_MULTIPLE :: 0x80000 +/* @} */ + +/** @defgroup mdb_copy Copy Flags + * @{ + */ +/** Compacting copy: Omit free space from copy, and renumber all + * pages sequentially. + */ +CP_COMPACT :: 0x01 +/* @} */ + +/** @brief Cursor Get operations. +* +* This is the set of all operations for retrieving data +* using a cursor. +*/ +Cursor_Op :: enum c.int { + FIRST, /**< Position at first key/data item */ + FIRST_DUP, /**< Position at first data item of current key. + Only for #MDB_DUPSORT */ + GET_BOTH, /**< Position at key/data pair. Only for #MDB_DUPSORT */ + GET_BOTH_RANGE, /**< position at key, nearest data. Only for #MDB_DUPSORT */ + GET_CURRENT, /**< Return key/data at current cursor position */ + GET_MULTIPLE, /**< Return up to a page of duplicate data items + from current cursor position. Move cursor to prepare + for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */ + LAST, /**< Position at last key/data item */ + LAST_DUP, /**< Position at last data item of current key. + Only for #MDB_DUPSORT */ + NEXT, /**< Position at next data item */ + NEXT_DUP, /**< Position at next data item of current key. + Only for #MDB_DUPSORT */ + NEXT_MULTIPLE, /**< Return up to a page of duplicate data items + from next cursor position. Move cursor to prepare + for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */ + NEXT_NODUP, /**< Position at first data item of next key */ + PREV, /**< Position at previous data item */ + PREV_DUP, /**< Position at previous data item of current key. + Only for #MDB_DUPSORT */ + PREV_NODUP, /**< Position at last data item of previous key */ + SET, /**< Position at specified key */ + SET_KEY, /**< Position at specified key, return key + data */ + SET_RANGE, /**< Position at first key greater than or equal to specified key. */ + PREV_MULTIPLE, /**< Position at previous page and return up to + a page of duplicate data items. Only for #MDB_DUPFIXED */ +} + +Error :: enum c.int { + /** Successful result */ + NONE = 0, + /** key/data pair already exists */ + KEYEXIST = -30799, + /** key/data pair not found (EOF) */ + NOTFOUND = -30798, + /** Requested page not found - this usually indicates corruption */ + PAGE_NOTFOUND = -30797, + /** Located page was wrong type */ + CORRUPTED = -30796, + /** Update of meta page failed or environment had fatal error */ + PANIC = -30795, + /** Environment version mismatch */ + VERSION_MISMATCH = -30794, + /** File is not a valid LMDB file */ + INVALID = -30793, + /** Environment mapsize reached */ + MAP_FULL = -30792, + /** Environment maxdbs reached */ + DBS_FULL = -30791, + /** Environment maxreaders reached */ + READERS_FULL = -30790, + /** Too many TLS keys in use - Windows only */ + TLS_FULL = -30789, + /** Txn has too many dirty pages */ + TXN_FULL = -30788, + /** Cursor stack too deep - internal error */ + CURSOR_FULL = -30787, + /** Page has not enough space - internal error */ + PAGE_FULL = -30786, + /** Database contents grew beyond environment mapsize */ + MAP_RESIZED = -30785, + /** Operation and DB incompatible, or DB type changed. This can mean: + *
    + *
  • The operation expects an #MDB_DUPSORT / #MDB_DUPFIXED database. + *
  • Opening a named DB when the unnamed DB has #MDB_DUPSORT / #MDB_INTEGERKEY. + *
  • Accessing a data record as a database, or vice versa. + *
  • The database was dropped and recreated with different flags. + *
+ */ + INCOMPATIBLE = -30784, + /** Invalid reuse of reader locktable slot */ + BAD_RSLOT = -30783, + /** Transaction must abort, has a child, or is invalid */ + BAD_TXN = -30782, + /** Unsupported size of key/DB name/data, or wrong DUPFIXED size */ + BAD_VALSIZE = -30781, + /** The specified DBI was changed unexpectedly */ + BAD_DBI = -30780, +} + +/** @brief Statistics for a database in the environment */ +Stat :: struct { + ms_psize: u32, + /**< Size of a database page. + This is currently the same for all databases. */ + ms_depth: u32, + /**< Depth (height) of the B-tree */ + ms_branch_pages: uint, + /**< Number of internal (non-leaf) pages */ + ms_leaf_pages: uint, + /**< Number of leaf pages */ + ms_overflow_pages: uint, + /**< Number of overflow pages */ + ms_entries: uint, + /**< Number of data items */ +} + +/** @brief Information about the environment */ +Env_Info :: struct { + me_mapaddr: rawptr, /**< Address of map, if fixed */ + me_mapsize: uint, /**< Size of the data memory map */ + me_last_pgno: uint, /**< ID of the last used page */ + me_last_txnid: uint, /**< ID of the last committed transaction */ + me_maxreaders: u32, /**< max reader slots in the environment */ + me_numreaders: u32, /**< max reader slots used in the environment */ +} + +/** @brief A callback function for most LMDB assert() failures, +* called before printing the message and aborting. +* +* @param[in] env An environment handle returned by #mdb_env_create(). +* @param[in] msg The assertion message, not including newline. +*/ +Assert_Func :: proc "c" (_: ^Env, _: cstring) + +/** @brief A callback function used to print a message from the library. +* +* @param[in] msg The string to be printed. +* @param[in] ctx An arbitrary context pointer for the callback. +* @return < 0 on failure, >= 0 on success. +*/ +Msg_Func :: proc "c" (_: cstring, _: rawptr) -> i32 + +@(default_calling_convention = "c", link_prefix = "mdb_") +foreign lib { + /** @brief Return the LMDB library version information. + * + * @param[out] major if non-NULL, the library major version number is copied here + * @param[out] minor if non-NULL, the library minor version number is copied here + * @param[out] patch if non-NULL, the library patch version number is copied here + * @retval "version string" The library version as a string + */ + version :: proc(major: ^i32, minor: ^i32, patch: ^i32) -> cstring --- + + /** @brief Return a string describing a given error code. + * + * This function is a superset of the ANSI C X3.159-1989 (ANSI C) strerror(3) + * function. If the error code is greater than or equal to 0, then the string + * returned by the system function strerror(3) is returned. If the error code + * is less than 0, an error string corresponding to the LMDB library error is + * returned. See @ref errors for a list of LMDB-specific error codes. + * @param[in] err The error code + * @retval "error message" The description of the error + */ + strerror :: proc(err: i32) -> cstring --- + + /** @brief Create an LMDB environment handle. + * + * This function allocates memory for a #MDB_env structure. To release + * the allocated memory and discard the handle, call #mdb_env_close(). + * Before the handle may be used, it must be opened using #mdb_env_open(). + * Various other options may also need to be set before opening the handle, + * e.g. #mdb_env_set_mapsize(), #mdb_env_set_maxreaders(), #mdb_env_set_maxdbs(), + * depending on usage requirements. + * @param[out] env The address where the new handle will be stored + * @return A non-zero error value on failure and 0 on success. + */ + @(require_results) + env_create :: proc(env: ^^Env) -> Error --- + + /** @brief Open an environment handle. + * + * If this function fails, #mdb_env_close() must be called to discard the #MDB_env handle. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] path The directory in which the database files reside. This + * directory must already exist and be writable. + * @param[in] flags Special options for this environment. This parameter + * must be set to 0 or by bitwise OR'ing together one or more of the + * values described here. + * Flags set by mdb_env_set_flags() are also used. + *
    + *
  • #MDB_FIXEDMAP + * use a fixed address for the mmap region. This flag must be specified + * when creating the environment, and is stored persistently in the environment. + * If successful, the memory map will always reside at the same virtual address + * and pointers used to reference data items in the database will be constant + * across multiple invocations. This option may not always work, depending on + * how the operating system has allocated memory to shared libraries and other uses. + * The feature is highly experimental. + *
  • #MDB_NOSUBDIR + * By default, LMDB creates its environment in a directory whose + * pathname is given in \b path, and creates its data and lock files + * under that directory. With this option, \b path is used as-is for + * the database main data file. The database lock file is the \b path + * with "-lock" appended. + *
  • #MDB_RDONLY + * Open the environment in read-only mode. No write operations will be + * allowed. LMDB will still modify the lock file - except on read-only + * filesystems, where LMDB does not use locks. + *
  • #MDB_WRITEMAP + * Use a writeable memory map unless MDB_RDONLY is set. This uses + * fewer mallocs but loses protection from application bugs + * like wild pointer writes and other bad updates into the database. + * This may be slightly faster for DBs that fit entirely in RAM, but + * is slower for DBs larger than RAM. + * Incompatible with nested transactions. + * Do not mix processes with and without MDB_WRITEMAP on the same + * environment. This can defeat durability (#mdb_env_sync etc). + *
  • #MDB_NOMETASYNC + * Flush system buffers to disk only once per transaction, omit the + * metadata flush. Defer that until the system flushes files to disk, + * or next non-MDB_RDONLY commit or #mdb_env_sync(). This optimization + * maintains database integrity, but a system crash may undo the last + * committed transaction. I.e. it preserves the ACI (atomicity, + * consistency, isolation) but not D (durability) database property. + * This flag may be changed at any time using #mdb_env_set_flags(). + *
  • #MDB_NOSYNC + * Don't flush system buffers to disk when committing a transaction. + * This optimization means a system crash can corrupt the database or + * lose the last transactions if buffers are not yet flushed to disk. + * The risk is governed by how often the system flushes dirty buffers + * to disk and how often #mdb_env_sync() is called. However, if the + * filesystem preserves write order and the #MDB_WRITEMAP flag is not + * used, transactions exhibit ACI (atomicity, consistency, isolation) + * properties and only lose D (durability). I.e. database integrity + * is maintained, but a system crash may undo the final transactions. + * Note that (#MDB_NOSYNC | #MDB_WRITEMAP) leaves the system with no + * hint for when to write transactions to disk, unless #mdb_env_sync() + * is called. (#MDB_MAPASYNC | #MDB_WRITEMAP) may be preferable. + * This flag may be changed at any time using #mdb_env_set_flags(). + *
  • #MDB_MAPASYNC + * When using #MDB_WRITEMAP, use asynchronous flushes to disk. + * As with #MDB_NOSYNC, a system crash can then corrupt the + * database or lose the last transactions. Calling #mdb_env_sync() + * ensures on-disk database integrity until next commit. + * This flag may be changed at any time using #mdb_env_set_flags(). + *
  • #MDB_NOTLS + * Don't use Thread-Local Storage. Tie reader locktable slots to + * #MDB_txn objects instead of to threads. I.e. #mdb_txn_reset() keeps + * the slot reserved for the #MDB_txn object. A thread may use parallel + * read-only transactions. A read-only transaction may span threads if + * the user synchronizes its use. Applications that multiplex many + * user threads over individual OS threads need this option. Such an + * application must also serialize the write transactions in an OS + * thread, since LMDB's write locking is unaware of the user threads. + *
  • #MDB_NOLOCK + * Don't do any locking. If concurrent access is anticipated, the + * caller must manage all concurrency itself. For proper operation + * the caller must enforce single-writer semantics, and must ensure + * that no readers are using old transactions while a writer is + * active. The simplest approach is to use an exclusive lock so that + * no readers may be active at all when a writer begins. + *
  • #MDB_NORDAHEAD + * Turn off readahead. Most operating systems perform readahead on + * read requests by default. This option turns it off if the OS + * supports it. Turning it off may help random read performance + * when the DB is larger than RAM and system RAM is full. + * The option is not implemented on Windows. + *
  • #MDB_NOMEMINIT + * Don't initialize malloc'd memory before writing to unused spaces + * in the data file. By default, memory for pages written to the data + * file is obtained using malloc. While these pages may be reused in + * subsequent transactions, freshly malloc'd pages will be initialized + * to zeroes before use. This avoids persisting leftover data from other + * code (that used the heap and subsequently freed the memory) into the + * data file. Note that many other system libraries may allocate + * and free memory from the heap for arbitrary uses. E.g., stdio may + * use the heap for file I/O buffers. This initialization step has a + * modest performance cost so some applications may want to disable + * it using this flag. This option can be a problem for applications + * which handle sensitive data like passwords, and it makes memory + * checkers like Valgrind noisy. This flag is not needed with #MDB_WRITEMAP, + * which writes directly to the mmap instead of using malloc for pages. The + * initialization is also skipped if #MDB_RESERVE is used; the + * caller is expected to overwrite all of the memory that was + * reserved in that case. + * This flag may be changed at any time using #mdb_env_set_flags(). + *
+ * @param[in] mode The UNIX permissions to set on created files and semaphores. + * This parameter is ignored on Windows. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_VERSION_MISMATCH - the version of the LMDB library doesn't match the + * version that created the database environment. + *
  • #MDB_INVALID - the environment file headers are corrupted. + *
  • ENOENT - the directory specified by the path parameter doesn't exist. + *
  • EACCES - the user didn't have permission to access the environment files. + *
  • EAGAIN - the environment was locked by another process. + *
+ */ + @(require_results) + env_open :: proc(env: ^Env, path: cstring, flags: u32, mode: mode_t) -> Error --- + + /** @brief Copy an LMDB environment to the specified path. + * + * This function may be used to make a backup of an existing environment. + * No lockfile is created, since it gets recreated at need. + * @note This call can trigger significant file size growth if run in + * parallel with write transactions, because it employs a read-only + * transaction. See long-lived transactions under @ref caveats_sec. + * @param[in] env An environment handle returned by #mdb_env_create(). It + * must have already been opened successfully. + * @param[in] path The directory in which the copy will reside. This + * directory must already exist and be writable but must otherwise be + * empty. + * @return A non-zero error value on failure and 0 on success. + */ + @(require_results) + env_copy :: proc(env: ^Env, path: cstring) -> Error --- + + /** @brief Copy an LMDB environment to the specified file descriptor. + * + * This function may be used to make a backup of an existing environment. + * No lockfile is created, since it gets recreated at need. + * @note This call can trigger significant file size growth if run in + * parallel with write transactions, because it employs a read-only + * transaction. See long-lived transactions under @ref caveats_sec. + * @param[in] env An environment handle returned by #mdb_env_create(). It + * must have already been opened successfully. + * @param[in] fd The filedescriptor to write the copy to. It must + * have already been opened for Write access. + * @return A non-zero error value on failure and 0 on success. + */ + @(require_results) + env_copyfd :: proc(env: ^Env, fd: filehandle_t) -> Error --- + + /** @brief Copy an LMDB environment to the specified path, with options. + * + * This function may be used to make a backup of an existing environment. + * No lockfile is created, since it gets recreated at need. + * @note This call can trigger significant file size growth if run in + * parallel with write transactions, because it employs a read-only + * transaction. See long-lived transactions under @ref caveats_sec. + * @param[in] env An environment handle returned by #mdb_env_create(). It + * must have already been opened successfully. + * @param[in] path The directory in which the copy will reside. This + * directory must already exist and be writable but must otherwise be + * empty. + * @param[in] flags Special options for this operation. This parameter + * must be set to 0 or by bitwise OR'ing together one or more of the + * values described here. + *
    + *
  • #MDB_CP_COMPACT - Perform compaction while copying: omit free + * pages and sequentially renumber all pages in output. This option + * consumes more CPU and runs more slowly than the default. + * Currently it fails if the environment has suffered a page leak. + *
+ * @return A non-zero error value on failure and 0 on success. + */ + @(require_results) + env_copy2 :: proc(env: ^Env, path: cstring, flags: u32) -> Error --- + + /** @brief Copy an LMDB environment to the specified file descriptor, + * with options. + * + * This function may be used to make a backup of an existing environment. + * No lockfile is created, since it gets recreated at need. See + * #mdb_env_copy2() for further details. + * @note This call can trigger significant file size growth if run in + * parallel with write transactions, because it employs a read-only + * transaction. See long-lived transactions under @ref caveats_sec. + * @param[in] env An environment handle returned by #mdb_env_create(). It + * must have already been opened successfully. + * @param[in] fd The filedescriptor to write the copy to. It must + * have already been opened for Write access. + * @param[in] flags Special options for this operation. + * See #mdb_env_copy2() for options. + * @return A non-zero error value on failure and 0 on success. + */ + @(require_results) + env_copyfd2 :: proc(env: ^Env, fd: filehandle_t, flags: u32) -> Error --- + + /** @brief Return statistics about the LMDB environment. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] stat The address of an #MDB_stat structure + * where the statistics will be copied + */ + env_stat :: proc(env: ^Env, stat: ^Stat) -> i32 --- + + /** @brief Return information about the LMDB environment. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] stat The address of an #MDB_envinfo structure + * where the information will be copied + */ + env_info :: proc(env: ^Env, stat: ^Env_Info) -> i32 --- + + /** @brief Flush the data buffers to disk. + * + * Data is always written to disk when #mdb_txn_commit() is called, + * but the operating system may keep it buffered. LMDB always flushes + * the OS buffers upon commit as well, unless the environment was + * opened with #MDB_NOSYNC or in part #MDB_NOMETASYNC. This call is + * not valid if the environment was opened with #MDB_RDONLY. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] force If non-zero, force a synchronous flush. Otherwise + * if the environment has the #MDB_NOSYNC flag set the flushes + * will be omitted, and with #MDB_MAPASYNC they will be asynchronous. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EACCES - the environment is read-only. + *
  • EINVAL - an invalid parameter was specified. + *
  • EIO - an error occurred during synchronization. + *
+ */ + @(require_results) + env_sync :: proc(env: ^Env, force: i32) -> Error --- + + /** @brief Close the environment and release the memory map. + * + * Only a single thread may call this function. All transactions, databases, + * and cursors must already be closed before calling this function. Attempts to + * use any such handles after calling this function will cause a SIGSEGV. + * The environment handle will be freed and must not be used again after this call. + * @param[in] env An environment handle returned by #mdb_env_create() + */ + env_close :: proc(env: ^Env) --- + + /** @brief Set environment flags. + * + * This may be used to set some flags in addition to those from + * #mdb_env_open(), or to unset these flags. If several threads + * change the flags at the same time, the result is undefined. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] flags The flags to change, bitwise OR'ed together + * @param[in] onoff A non-zero value sets the flags, zero clears them. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + env_set_flags :: proc(env: ^Env, flags: u32, onoff: i32) -> Error --- + + /** @brief Get environment flags. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] flags The address of an integer to store the flags + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + env_get_flags :: proc(env: ^Env, flags: ^u32) -> Error --- + + /** @brief Return the path that was used in #mdb_env_open(). + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] path Address of a string pointer to contain the path. This + * is the actual string in the environment, not a copy. It should not be + * altered in any way. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + env_get_path :: proc(env: ^Env, path: ^^u8) -> Error --- + + /** @brief Return the filedescriptor for the given environment. + * + * This function may be called after fork(), so the descriptor can be + * closed before exec*(). Other LMDB file descriptors have FD_CLOEXEC. + * (Until LMDB 0.9.18, only the lockfile had that.) + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] fd Address of a mdb_filehandle_t to contain the descriptor. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + env_get_fd :: proc(env: ^Env, fd: ^filehandle_t) -> Error --- + + /** @brief Set the size of the memory map to use for this environment. + * + * The size should be a multiple of the OS page size. The default is + * 10485760 bytes. The size of the memory map is also the maximum size + * of the database. The value should be chosen as large as possible, + * to accommodate future growth of the database. + * This function should be called after #mdb_env_create() and before #mdb_env_open(). + * It may be called at later times if no transactions are active in + * this process. Note that the library does not check for this condition, + * the caller must ensure it explicitly. + * + * The new size takes effect immediately for the current process but + * will not be persisted to any others until a write transaction has been + * committed by the current process. Also, only mapsize increases are + * persisted into the environment. + * + * If the mapsize is increased by another process, and data has grown + * beyond the range of the current mapsize, #mdb_txn_begin() will + * return #MDB_MAP_RESIZED. This function may be called with a size + * of zero to adopt the new size. + * + * Any attempt to set a size smaller than the space already consumed + * by the environment will be silently changed to the current size of the used space. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] size The size in bytes + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified, or the environment has + * an active write transaction. + *
+ */ + @(require_results) + env_set_mapsize :: proc(env: ^Env, size: uint) -> Error --- + + /** @brief Set the maximum number of threads/reader slots for the environment. + * + * This defines the number of slots in the lock table that is used to track readers in the + * the environment. The default is 126. + * Starting a read-only transaction normally ties a lock table slot to the + * current thread until the environment closes or the thread exits. If + * MDB_NOTLS is in use, #mdb_txn_begin() instead ties the slot to the + * MDB_txn object until it or the #MDB_env object is destroyed. + * This function may only be called after #mdb_env_create() and before #mdb_env_open(). + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] readers The maximum number of reader lock table slots + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified, or the environment is already open. + *
+ */ + @(require_results) + env_set_maxreaders :: proc(env: ^Env, readers: u32) -> Error --- + + /** @brief Get the maximum number of threads/reader slots for the environment. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] readers Address of an integer to store the number of readers + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + env_get_maxreaders :: proc(env: ^Env, readers: ^u32) -> Error --- + + /** @brief Set the maximum number of named databases for the environment. + * + * This function is only needed if multiple databases will be used in the + * environment. Simpler applications that use the environment as a single + * unnamed database can ignore this option. + * This function may only be called after #mdb_env_create() and before #mdb_env_open(). + * + * Currently a moderate number of slots are cheap but a huge number gets + * expensive: 7-120 words per transaction, and every #mdb_dbi_open() + * does a linear search of the opened slots. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] dbs The maximum number of databases + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified, or the environment is already open. + *
+ */ + @(require_results) + env_set_maxdbs :: proc(env: ^Env, dbs: Dbi) -> Error --- + + /** @brief Get the maximum size of keys and #MDB_DUPSORT data we can write. + * + * Depends on the compile-time constant #MDB_MAXKEYSIZE. Default 511. + * See @ref MDB_val. + * @param[in] env An environment handle returned by #mdb_env_create() + * @return The maximum size of a key we can write + */ + env_get_maxkeysize :: proc(env: ^Env) -> i32 --- + + /** @brief Set application information associated with the #MDB_env. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] ctx An arbitrary pointer for whatever the application needs. + * @return A non-zero error value on failure and 0 on success. + */ + @(require_results) + env_set_userctx :: proc(env: ^Env, ctx: rawptr) -> Error --- + + /** @brief Get the application information associated with the #MDB_env. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @return The pointer set by #mdb_env_set_userctx(). + */ + env_get_userctx :: proc(env: ^Env) -> rawptr --- + + /** Set or reset the assert() callback of the environment. + * Disabled if liblmdb is built with NDEBUG. + * @note This hack should become obsolete as lmdb's error handling matures. + * @param[in] env An environment handle returned by #mdb_env_create(). + * @param[in] func An #MDB_assert_func function, or 0. + * @return A non-zero error value on failure and 0 on success. + */ + @(require_results) + env_set_assert :: proc(env: ^Env, func: Assert_Func) -> Error --- + + /** @brief Create a transaction for use with the environment. + * + * The transaction handle may be discarded using #mdb_txn_abort() or #mdb_txn_commit(). + * @note A transaction and its cursors must only be used by a single + * thread, and a thread may only have a single transaction at a time. + * If #MDB_NOTLS is in use, this does not apply to read-only transactions. + * @note Cursors may not span transactions. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] parent If this parameter is non-NULL, the new transaction + * will be a nested transaction, with the transaction indicated by \b parent + * as its parent. Transactions may be nested to any level. A parent + * transaction and its cursors may not issue any other operations than + * mdb_txn_commit and mdb_txn_abort while it has active child transactions. + * @param[in] flags Special options for this transaction. This parameter + * must be set to 0 or by bitwise OR'ing together one or more of the + * values described here. + *
    + *
  • #MDB_RDONLY + * This transaction will not perform any write operations. + *
+ * @param[out] txn Address where the new #MDB_txn handle will be stored + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_PANIC - a fatal error occurred earlier and the environment + * must be shut down. + *
  • #MDB_MAP_RESIZED - another process wrote data beyond this MDB_env's + * mapsize and this environment's map must be resized as well. + * See #mdb_env_set_mapsize(). + *
  • #MDB_READERS_FULL - a read-only transaction was requested and + * the reader lock table is full. See #mdb_env_set_maxreaders(). + *
  • ENOMEM - out of memory. + *
+ */ + @(require_results) + txn_begin :: proc(env: ^Env, parent: ^Txn, flags: u32, txn: ^^Txn) -> Error --- + + /** @brief Returns the transaction's #MDB_env + * + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + */ + txn_env :: proc(txn: ^Txn) -> ^Env --- + + /** @brief Return the transaction's ID. + * + * This returns the identifier associated with this transaction. For a + * read-only transaction, this corresponds to the snapshot being read; + * concurrent readers will frequently have the same transaction ID. + * + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @return A transaction ID, valid if input is an active transaction. + */ + txn_id :: proc(txn: ^Txn) -> uint --- + + /** @brief Commit all the operations of a transaction into the database. + * + * The transaction handle is freed. It and its cursors must not be used + * again after this call, except with #mdb_cursor_renew(). + * @note Earlier documentation incorrectly said all cursors would be freed. + * Only write-transactions free cursors. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
  • ENOSPC - no more disk space. + *
  • EIO - a low-level I/O error occurred while writing. + *
  • ENOMEM - out of memory. + *
+ */ + @(require_results) + txn_commit :: proc(txn: ^Txn) -> Error --- + + /** @brief Abandon all the operations of the transaction instead of saving them. + * + * The transaction handle is freed. It and its cursors must not be used + * again after this call, except with #mdb_cursor_renew(). + * @note Earlier documentation incorrectly said all cursors would be freed. + * Only write-transactions free cursors. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + */ + txn_abort :: proc(txn: ^Txn) --- + + /** @brief Reset a read-only transaction. + * + * Abort the transaction like #mdb_txn_abort(), but keep the transaction + * handle. #mdb_txn_renew() may reuse the handle. This saves allocation + * overhead if the process will start a new read-only transaction soon, + * and also locking overhead if #MDB_NOTLS is in use. The reader table + * lock is released, but the table slot stays tied to its thread or + * #MDB_txn. Use mdb_txn_abort() to discard a reset handle, and to free + * its lock table slot if MDB_NOTLS is in use. + * Cursors opened within the transaction must not be used + * again after this call, except with #mdb_cursor_renew(). + * Reader locks generally don't interfere with writers, but they keep old + * versions of database pages allocated. Thus they prevent the old pages + * from being reused when writers commit new data, and so under heavy load + * the database size may grow much more rapidly than otherwise. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + */ + txn_reset :: proc(txn: ^Txn) --- + + /** @brief Renew a read-only transaction. + * + * This acquires a new reader lock for a transaction handle that had been + * released by #mdb_txn_reset(). It must be called before a reset transaction + * may be used again. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_PANIC - a fatal error occurred earlier and the environment + * must be shut down. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + txn_renew :: proc(txn: ^Txn) -> Error --- + + /** @brief Open a database in the environment. + * + * A database handle denotes the name and parameters of a database, + * independently of whether such a database exists. + * The database handle may be discarded by calling #mdb_dbi_close(). + * The old database handle is returned if the database was already open. + * The handle may only be closed once. + * + * The database handle will be private to the current transaction until + * the transaction is successfully committed. If the transaction is + * aborted the handle will be closed automatically. + * After a successful commit the handle will reside in the shared + * environment, and may be used by other transactions. + * + * This function must not be called from multiple concurrent + * transactions in the same process. A transaction that uses + * this function must finish (either commit or abort) before + * any other transaction in the process may use this function. + * + * To use named databases (with name != NULL), #mdb_env_set_maxdbs() + * must be called before opening the environment. Database names are + * keys in the unnamed database, and may be read but not written. + * + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] name The name of the database to open. If only a single + * database is needed in the environment, this value may be NULL. + * @param[in] flags Special options for this database. This parameter + * must be set to 0 or by bitwise OR'ing together one or more of the + * values described here. + *
    + *
  • #MDB_REVERSEKEY + * Keys are strings to be compared in reverse order, from the end + * of the strings to the beginning. By default, Keys are treated as strings and + * compared from beginning to end. + *
  • #MDB_DUPSORT + * Duplicate keys may be used in the database. (Or, from another perspective, + * keys may have multiple data items, stored in sorted order.) By default + * keys must be unique and may have only a single data item. + *
  • #MDB_INTEGERKEY + * Keys are binary integers in native byte order, either unsigned int + * or size_t, and will be sorted as such. + * The keys must all be of the same size. + *
  • #MDB_DUPFIXED + * This flag may only be used in combination with #MDB_DUPSORT. This option + * tells the library that the data items for this database are all the same + * size, which allows further optimizations in storage and retrieval. When + * all data items are the same size, the #MDB_GET_MULTIPLE, #MDB_NEXT_MULTIPLE + * and #MDB_PREV_MULTIPLE cursor operations may be used to retrieve multiple + * items at once. + *
  • #MDB_INTEGERDUP + * This option specifies that duplicate data items are binary integers, + * similar to #MDB_INTEGERKEY keys. + *
  • #MDB_REVERSEDUP + * This option specifies that duplicate data items should be compared as + * strings in reverse order. + *
  • #MDB_CREATE + * Create the named database if it doesn't exist. This option is not + * allowed in a read-only transaction or a read-only environment. + *
+ * @param[out] dbi Address where the new #MDB_dbi handle will be stored + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_NOTFOUND - the specified database doesn't exist in the environment + * and #MDB_CREATE was not specified. + *
  • #MDB_DBS_FULL - too many databases have been opened. See #mdb_env_set_maxdbs(). + *
+ */ + @(require_results) + dbi_open :: proc(txn: ^Txn, name: cstring, flags: u32, dbi: ^Dbi) -> Error --- + + /** @brief Retrieve statistics for a database. + * + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[out] stat The address of an #MDB_stat structure + * where the statistics will be copied + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + stat :: proc(txn: ^Txn, dbi: Dbi, stat: ^Stat) -> Error --- + + /** @brief Retrieve the DB flags for a database handle. + * + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[out] flags Address where the flags will be returned. + * @return A non-zero error value on failure and 0 on success. + */ + @(require_results) + dbi_flags :: proc(txn: ^Txn, dbi: Dbi, flags: ^u32) -> Error --- + + /** @brief Close a database handle. Normally unnecessary. Use with care: + * + * This call is not mutex protected. Handles should only be closed by + * a single thread, and only if no other threads are going to reference + * the database handle or one of its cursors any further. Do not close + * a handle if an existing transaction has modified its database. + * Doing so can cause misbehavior from database corruption to errors + * like MDB_BAD_VALSIZE (since the DB name is gone). + * + * Closing a database handle is not necessary, but lets #mdb_dbi_open() + * reuse the handle value. Usually it's better to set a bigger + * #mdb_env_set_maxdbs(), unless that value would be large. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + */ + dbi_close :: proc(env: ^Env, dbi: Dbi) --- + + /** @brief Empty or delete+close a database. + * + * See #mdb_dbi_close() for restrictions about closing the DB handle. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] del 0 to empty the DB, 1 to delete it from the + * environment and close the DB handle. + * @return A non-zero error value on failure and 0 on success. + */ + @(require_results) + drop :: proc(txn: ^Txn, dbi: Dbi, del: i32) -> Error --- + + /** @brief Set a custom key comparison function for a database. + * + * The comparison function is called whenever it is necessary to compare a + * key specified by the application with a key currently stored in the database. + * If no comparison function is specified, and no special key flags were specified + * with #mdb_dbi_open(), the keys are compared lexically, with shorter keys collating + * before longer keys. + * @warning This function must be called before any data access functions are used, + * otherwise data corruption may occur. The same comparison function must be used by every + * program accessing the database, every time the database is used. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] cmp A #MDB_cmp_func function + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + set_compare :: proc(txn: ^Txn, dbi: Dbi, cmp: Cmp_Func) -> Error --- + + /** @brief Set a custom data comparison function for a #MDB_DUPSORT database. + * + * This comparison function is called whenever it is necessary to compare a data + * item specified by the application with a data item currently stored in the database. + * This function only takes effect if the database was opened with the #MDB_DUPSORT + * flag. + * If no comparison function is specified, and no special key flags were specified + * with #mdb_dbi_open(), the data items are compared lexically, with shorter items collating + * before longer items. + * @warning This function must be called before any data access functions are used, + * otherwise data corruption may occur. The same comparison function must be used by every + * program accessing the database, every time the database is used. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] cmp A #MDB_cmp_func function + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + set_dupsort :: proc(txn: ^Txn, dbi: Dbi, cmp: Cmp_Func) -> Error --- + + /** @brief Set a relocation function for a #MDB_FIXEDMAP database. + * + * @todo The relocation function is called whenever it is necessary to move the data + * of an item to a different position in the database (e.g. through tree + * balancing operations, shifts as a result of adds or deletes, etc.). It is + * intended to allow address/position-dependent data items to be stored in + * a database in an environment opened with the #MDB_FIXEDMAP option. + * Currently the relocation feature is unimplemented and setting + * this function has no effect. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] rel A #MDB_rel_func function + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + set_relfunc :: proc(txn: ^Txn, dbi: Dbi, rel: Rel_Func) -> Error --- + + /** @brief Set a context pointer for a #MDB_FIXEDMAP database's relocation function. + * + * See #mdb_set_relfunc and #MDB_rel_func for more details. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] ctx An arbitrary pointer for whatever the application needs. + * It will be passed to the callback function set by #mdb_set_relfunc + * as its \b relctx parameter whenever the callback is invoked. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + set_relctx :: proc(txn: ^Txn, dbi: Dbi, ctx: rawptr) -> Error --- + + /** @brief Get items from a database. + * + * This function retrieves key/data pairs from the database. The address + * and length of the data associated with the specified \b key are returned + * in the structure to which \b data refers. + * If the database supports duplicate keys (#MDB_DUPSORT) then the + * first data item for the key will be returned. Retrieval of other + * items requires the use of #mdb_cursor_get(). + * + * @note The memory pointed to by the returned values is owned by the + * database. The caller need not dispose of the memory, and may not + * modify it in any way. For values returned in a read-only transaction + * any modification attempts will cause a SIGSEGV. + * @note Values returned from the database are valid only until a + * subsequent update operation, or the end of the transaction. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] key The key to search for in the database + * @param[out] data The data corresponding to the key + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_NOTFOUND - the key was not in the database. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + get :: proc(txn: ^Txn, dbi: Dbi, key: ^Val, data: ^Val) -> Error --- + + /** @brief Store items into a database. + * + * This function stores key/data pairs in the database. The default behavior + * is to enter the new key/data pair, replacing any previously existing key + * if duplicates are disallowed, or adding a duplicate data item if + * duplicates are allowed (#MDB_DUPSORT). + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] key The key to store in the database + * @param[in,out] data The data to store + * @param[in] flags Special options for this operation. This parameter + * must be set to 0 or by bitwise OR'ing together one or more of the + * values described here. + *
    + *
  • #MDB_NODUPDATA - enter the new key/data pair only if it does not + * already appear in the database. This flag may only be specified + * if the database was opened with #MDB_DUPSORT. The function will + * return #MDB_KEYEXIST if the key/data pair already appears in the + * database. + *
  • #MDB_NOOVERWRITE - enter the new key/data pair only if the key + * does not already appear in the database. The function will return + * #MDB_KEYEXIST if the key already appears in the database, even if + * the database supports duplicates (#MDB_DUPSORT). The \b data + * parameter will be set to point to the existing item. + *
  • #MDB_RESERVE - reserve space for data of the given size, but + * don't copy the given data. Instead, return a pointer to the + * reserved space, which the caller can fill in later - before + * the next update operation or the transaction ends. This saves + * an extra memcpy if the data is being generated later. + * LMDB does nothing else with this memory, the caller is expected + * to modify all of the space requested. This flag must not be + * specified if the database was opened with #MDB_DUPSORT. + *
  • #MDB_APPEND - append the given key/data pair to the end of the + * database. This option allows fast bulk loading when keys are + * already known to be in the correct order. Loading unsorted keys + * with this flag will cause a #MDB_KEYEXIST error. + *
  • #MDB_APPENDDUP - as above, but for sorted dup data. + *
+ * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize(). + *
  • #MDB_TXN_FULL - the transaction has too many dirty pages. + *
  • EACCES - an attempt was made to write in a read-only transaction. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + put :: proc(txn: ^Txn, dbi: Dbi, key: ^Val, data: ^Val, flags: u32) -> Error --- + + /** @brief Delete items from a database. + * + * This function removes key/data pairs from the database. + * If the database does not support sorted duplicate data items + * (#MDB_DUPSORT) the data parameter is ignored. + * If the database supports sorted duplicates and the data parameter + * is NULL, all of the duplicate data items for the key will be + * deleted. Otherwise, if the data parameter is non-NULL + * only the matching data item will be deleted. + * This function will return #MDB_NOTFOUND if the specified key/data + * pair is not in the database. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] key The key to delete from the database + * @param[in] data The data to delete + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EACCES - an attempt was made to write in a read-only transaction. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + del :: proc(txn: ^Txn, dbi: Dbi, key: ^Val, data: ^Val) -> Error --- + + /** @brief Create a cursor handle. + * + * A cursor is associated with a specific transaction and database. + * A cursor cannot be used when its database handle is closed. Nor + * when its transaction has ended, except with #mdb_cursor_renew(). + * It can be discarded with #mdb_cursor_close(). + * A cursor in a write-transaction can be closed before its transaction + * ends, and will otherwise be closed when its transaction ends. + * A cursor in a read-only transaction must be closed explicitly, before + * or after its transaction ends. It can be reused with + * #mdb_cursor_renew() before finally closing it. + * @note Earlier documentation said that cursors in every transaction + * were closed when the transaction committed or aborted. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[out] cursor Address where the new #MDB_cursor handle will be stored + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + cursor_open :: proc(txn: ^Txn, dbi: Dbi, cursor: ^^Cursor) -> Error --- + + /** @brief Close a cursor handle. + * + * The cursor handle will be freed and must not be used again after this call. + * Its transaction must still be live if it is a write-transaction. + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + */ + cursor_close :: proc(cursor: ^Cursor) --- + + /** @brief Renew a cursor handle. + * + * A cursor is associated with a specific transaction and database. + * Cursors that are only used in read-only + * transactions may be re-used, to avoid unnecessary malloc/free overhead. + * The cursor may be associated with a new read-only transaction, and + * referencing the same database handle as it was created with. + * This may be done whether the previous transaction is live or dead. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + cursor_renew :: proc(txn: ^Txn, cursor: ^Cursor) -> Error --- + + /** @brief Return the cursor's transaction handle. + * + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + */ + cursor_txn :: proc(cursor: ^Cursor) -> ^Txn --- + + /** @brief Return the cursor's database handle. + * + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + */ + cursor_dbi :: proc(cursor: ^Cursor) -> Dbi --- + + /** @brief Retrieve by cursor. + * + * This function retrieves key/data pairs from the database. The address and length + * of the key are returned in the object to which \b key refers (except for the + * case of the #MDB_SET option, in which the \b key object is unchanged), and + * the address and length of the data are returned in the object to which \b data + * refers. + * See #mdb_get() for restrictions on using the output values. + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + * @param[in,out] key The key for a retrieved item + * @param[in,out] data The data of a retrieved item + * @param[in] op A cursor operation #MDB_cursor_op + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_NOTFOUND - no matching key found. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + cursor_get :: proc(cursor: ^Cursor, key: ^Val, data: ^Val, op: Cursor_Op) -> Error --- + + /** @brief Store by cursor. + * + * This function stores key/data pairs into the database. + * The cursor is positioned at the new item, or on failure usually near it. + * @note Earlier documentation incorrectly said errors would leave the + * state of the cursor unchanged. + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + * @param[in] key The key operated on. + * @param[in] data The data operated on. + * @param[in] flags Options for this operation. This parameter + * must be set to 0 or one of the values described here. + *
    + *
  • #MDB_CURRENT - replace the item at the current cursor position. + * The \b key parameter must still be provided, and must match it. + * If using sorted duplicates (#MDB_DUPSORT) the data item must still + * sort into the same place. This is intended to be used when the + * new data is the same size as the old. Otherwise it will simply + * perform a delete of the old record followed by an insert. + *
  • #MDB_NODUPDATA - enter the new key/data pair only if it does not + * already appear in the database. This flag may only be specified + * if the database was opened with #MDB_DUPSORT. The function will + * return #MDB_KEYEXIST if the key/data pair already appears in the + * database. + *
  • #MDB_NOOVERWRITE - enter the new key/data pair only if the key + * does not already appear in the database. The function will return + * #MDB_KEYEXIST if the key already appears in the database, even if + * the database supports duplicates (#MDB_DUPSORT). + *
  • #MDB_RESERVE - reserve space for data of the given size, but + * don't copy the given data. Instead, return a pointer to the + * reserved space, which the caller can fill in later - before + * the next update operation or the transaction ends. This saves + * an extra memcpy if the data is being generated later. This flag + * must not be specified if the database was opened with #MDB_DUPSORT. + *
  • #MDB_APPEND - append the given key/data pair to the end of the + * database. No key comparisons are performed. This option allows + * fast bulk loading when keys are already known to be in the + * correct order. Loading unsorted keys with this flag will cause + * a #MDB_KEYEXIST error. + *
  • #MDB_APPENDDUP - as above, but for sorted dup data. + *
  • #MDB_MULTIPLE - store multiple contiguous data elements in a + * single request. This flag may only be specified if the database + * was opened with #MDB_DUPFIXED. The \b data argument must be an + * array of two MDB_vals. The mv_size of the first MDB_val must be + * the size of a single data element. The mv_data of the first MDB_val + * must point to the beginning of the array of contiguous data elements. + * The mv_size of the second MDB_val must be the count of the number + * of data elements to store. On return this field will be set to + * the count of the number of elements actually written. The mv_data + * of the second MDB_val is unused. + *
+ * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize(). + *
  • #MDB_TXN_FULL - the transaction has too many dirty pages. + *
  • EACCES - an attempt was made to write in a read-only transaction. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + cursor_put :: proc(cursor: ^Cursor, key: ^Val, data: ^Val, flags: u32) -> Error --- + + /** @brief Delete current key/data pair + * + * This function deletes the key/data pair to which the cursor refers. + * This does not invalidate the cursor, so operations such as MDB_NEXT + * can still be used on it. + * Both MDB_NEXT and MDB_GET_CURRENT will return the same record after + * this operation. + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + * @param[in] flags Options for this operation. This parameter + * must be set to 0 or one of the values described here. + *
    + *
  • #MDB_NODUPDATA - delete all of the data items for the current key. + * This flag may only be specified if the database was opened with #MDB_DUPSORT. + *
+ * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EACCES - an attempt was made to write in a read-only transaction. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ + @(require_results) + cursor_del :: proc(cursor: ^Cursor, flags: u32) -> Error --- + + /** @brief Return count of duplicates for current key. + * + * This call is only valid on databases that support sorted duplicate + * data items #MDB_DUPSORT. + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + * @param[out] countp Address where the count will be stored + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - cursor is not initialized, or an invalid parameter was specified. + *
+ */ + @(require_results) + cursor_count :: proc(cursor: ^Cursor, countp: ^uint) -> Error --- + + /** @brief Compare two data items according to a particular database. + * + * This returns a comparison as if the two data items were keys in the + * specified database. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] a The first item to compare + * @param[in] b The second item to compare + * @return < 0 if a < b, 0 if a == b, > 0 if a > b + */ + cmp :: proc(txn: ^Txn, dbi: Dbi, a: ^Val, b: ^Val) -> i32 --- + + /** @brief Compare two data items according to a particular database. + * + * This returns a comparison as if the two items were data items of + * the specified database. The database must have the #MDB_DUPSORT flag. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] a The first item to compare + * @param[in] b The second item to compare + * @return < 0 if a < b, 0 if a == b, > 0 if a > b + */ + dcmp :: proc(txn: ^Txn, dbi: Dbi, a: ^Val, b: ^Val) -> i32 --- + + /** @brief Dump the entries in the reader lock table. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] func A #MDB_msg_func function + * @param[in] ctx Anything the message function needs + * @return < 0 on failure, >= 0 on success. + */ + reader_list :: proc(env: ^Env, func: Msg_Func, ctx: rawptr) -> i32 --- + + /** @brief Check for stale entries in the reader lock table. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] dead Number of stale slots that were cleared + * @return 0 on success, non-zero on failure. + */ + @(require_results) + reader_check :: proc(env: ^Env, dead: ^i32) -> Error --- +}