Compare commits
No commits in common. "master" and "main" have entirely different histories.
31 changed files with 111 additions and 2373 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
|
@ -1,2 +0,0 @@
|
|||
.DS_Store
|
||||
.idea/
|
||||
3
README
3
README
|
|
@ -1,3 +0,0 @@
|
|||
XX
|
||||
|
||||
Random experiments in Go.
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
package arena
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
// New returns a pointer to an Arena allocated value of type T.
|
||||
// If allocation fails, New will panic.
|
||||
//
|
||||
// Note: Accessing the returned value after calling Reset is unsafe and may result in a fault.
|
||||
func New[T any](arena Arena) *T {
|
||||
ptr, err := arena(ACTION_ALLOC, mem.Sizeof[T](), mem.Alignof[T](), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return (*T)(ptr)
|
||||
}
|
||||
|
||||
// MakeSlice creates an Arena allocated []T with the given capacity and length.
|
||||
// If allocation fails, MakeSlice will panic.
|
||||
//
|
||||
// Note: Accessing the returned slice after calling Reset is unsafe and may result in a fault.
|
||||
func MakeSlice[T any](arena Arena, len, cap int) []T {
|
||||
ptr, err := arena(ACTION_ALLOC, mem.Sizeof[T]()*uintptr(len), mem.Alignof[T](), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return unsafe.Slice((*T)(ptr), cap)[:len]
|
||||
}
|
||||
|
||||
// Reset restores an Arena to its initial state.
|
||||
//
|
||||
// Note: Accessing memory returned by an Arena after calling Reset is unsafe and may result in a fault.
|
||||
func Reset(arena Arena) {
|
||||
if _, err := arena(ACTION_RESET, 0, 0, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Save returns the restorable state of an Arena.
|
||||
// The returned value is internal to the particular Arena and should not be modified.
|
||||
func Save(arena Arena) (watermark uintptr) {
|
||||
if _, err := arena(ACTION_SAVE, 0, 0, &watermark); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Restore restores an Arena to a previously saved state.
|
||||
func Restore(arena Arena, watermark uintptr) {
|
||||
if _, err := arena(ACTION_RESTORE, 0, 0, &watermark); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Arena represents a memory allocator.
|
||||
type Arena func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error)
|
||||
|
||||
// Action is a list of distinct events an Arena may respond to.
|
||||
type Action int
|
||||
|
||||
const (
|
||||
ACTION_ALLOC Action = iota
|
||||
ACTION_RESET
|
||||
ACTION_SAVE
|
||||
ACTION_RESTORE
|
||||
)
|
||||
|
||||
func (a Action) String() string {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
return "ALLOC"
|
||||
case ACTION_RESET:
|
||||
return "RESET"
|
||||
case ACTION_SAVE:
|
||||
return "SAVE"
|
||||
case ACTION_RESTORE:
|
||||
return "RESTORE"
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
package arena_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.brut.systems/judah/xx/arena"
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
"git.brut.systems/judah/xx/testx"
|
||||
)
|
||||
|
||||
func TestMakeSlice(t *testing.T) {
|
||||
a := arena.Linear(1024 * mem.Kilobyte)
|
||||
defer arena.Reset(a)
|
||||
|
||||
s := arena.MakeSlice[int](a, 99, 100)
|
||||
testx.Expect(t, len(s) == 99, "len = %d, expected 99", len(s))
|
||||
testx.Expect(t, cap(s) == 100, "cap = %d, expected 100", cap(s))
|
||||
|
||||
p := &s[0]
|
||||
|
||||
s[2] = 0xCAFE_DECAF
|
||||
s = append(s, 2)
|
||||
|
||||
testx.Expect(t, p == &s[0], "p = %p, expected %p", p, &s[0])
|
||||
|
||||
p = &s[0]
|
||||
s = append(s, 3) // cause a reallocation
|
||||
|
||||
testx.Expect(t, p != &s[0], "p = %p, expected %p", p, &s[0])
|
||||
}
|
||||
269
arena/arenas.go
269
arena/arenas.go
|
|
@ -1,269 +0,0 @@
|
|||
package arena
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"math/bits"
|
||||
"runtime"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
// Linear is a simple bump allocator with a fixed amount of backing memory.
|
||||
func Linear(capacity_in_bytes uintptr) Arena {
|
||||
if capacity_in_bytes <= 0 {
|
||||
panic("linear: capacity_in_bytes must be greater than zero")
|
||||
}
|
||||
|
||||
var (
|
||||
data = make([]byte, capacity_in_bytes)
|
||||
offset uintptr
|
||||
)
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
aligned := mem.AlignForward(size, align)
|
||||
if offset+aligned > capacity_in_bytes {
|
||||
return nil, errors.New("linear: out of memory")
|
||||
}
|
||||
|
||||
ptr := &data[offset]
|
||||
offset += aligned
|
||||
return unsafe.Pointer(ptr), nil
|
||||
case ACTION_RESET:
|
||||
clear(data)
|
||||
offset = 0
|
||||
case ACTION_SAVE:
|
||||
if watermark == nil {
|
||||
return nil, errors.New("linear: cannot save to nil watermark")
|
||||
}
|
||||
|
||||
*watermark = offset
|
||||
case ACTION_RESTORE:
|
||||
if watermark == nil {
|
||||
return nil, errors.New("linear: cannot restore nil watermark")
|
||||
}
|
||||
|
||||
clear(data[*watermark:offset])
|
||||
offset = *watermark
|
||||
default:
|
||||
panic("linear: unimplemented action - " + a.String())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Ring is an Arena that only allocates values of the given type.
|
||||
// When capacity is exceeded, previous allocations will be reused to accommodate new ones
|
||||
//
|
||||
// Note: Allocating different types from the same Pool is unsafe and may cause memory corruption.
|
||||
func Ring[T any](capacity uintptr) Arena {
|
||||
if capacity <= 0 {
|
||||
panic("pool: capacity must be greater than zero")
|
||||
}
|
||||
|
||||
pointers := make([]T, 0, capacity)
|
||||
return func(a Action, _, _ uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
if len(pointers) == cap(pointers) {
|
||||
pointers = pointers[:0]
|
||||
}
|
||||
|
||||
pointers = append(pointers, mem.ZeroValue[T]())
|
||||
return unsafe.Pointer(&pointers[len(pointers)-1]), nil
|
||||
case ACTION_RESET:
|
||||
clear(pointers)
|
||||
pointers = pointers[:0]
|
||||
case ACTION_SAVE:
|
||||
if watermark == nil {
|
||||
return nil, errors.New("pool: cannot save to nil watermark")
|
||||
}
|
||||
|
||||
*watermark = uintptr(len(pointers))
|
||||
case ACTION_RESTORE:
|
||||
if watermark == nil {
|
||||
return nil, errors.New("pool: cannot restore nil watermark")
|
||||
}
|
||||
|
||||
clear(pointers[*watermark:])
|
||||
pointers = pointers[:*watermark]
|
||||
default:
|
||||
panic("pool: unimplemented action - " + a.String())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Chunked is an Arena that groups allocations by size.
|
||||
func Chunked(max_allocs_per_chunk uintptr) Arena {
|
||||
type chunk struct {
|
||||
data []byte
|
||||
offset uintptr
|
||||
saved uintptr
|
||||
}
|
||||
|
||||
groups := make([][]chunk, 64)
|
||||
return func(a Action, size, align uintptr, _ *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
aligned := mem.AlignForward(size, align)
|
||||
if aligned == 0 {
|
||||
aligned = 1
|
||||
}
|
||||
aligned = 1 << bits.Len(uint(aligned-1))
|
||||
|
||||
idx := bits.TrailingZeros(uint(aligned))
|
||||
if idx >= len(groups) {
|
||||
groups = append(groups, make([][]chunk, idx-len(groups)+1)...)
|
||||
}
|
||||
|
||||
group := groups[idx]
|
||||
if len(group) == 0 {
|
||||
group = append(group, chunk{
|
||||
data: make([]byte, aligned*max_allocs_per_chunk),
|
||||
})
|
||||
}
|
||||
|
||||
c := &group[len(group)-1]
|
||||
if c.offset+aligned > uintptr(len(c.data)) {
|
||||
group = append(group, chunk{
|
||||
data: make([]byte, aligned*max_allocs_per_chunk),
|
||||
})
|
||||
|
||||
c = &group[len(group)-1]
|
||||
}
|
||||
|
||||
ptr := &c.data[c.offset]
|
||||
c.offset += aligned
|
||||
groups[idx] = group
|
||||
|
||||
return unsafe.Pointer(ptr), nil
|
||||
case ACTION_RESET:
|
||||
for _, g := range groups {
|
||||
for i := range len(g) {
|
||||
g[i].offset = 0
|
||||
g[i].saved = 0
|
||||
clear(g[i].data)
|
||||
}
|
||||
}
|
||||
case ACTION_SAVE:
|
||||
for _, g := range groups {
|
||||
for i := range len(g) {
|
||||
g[i].saved = g[i].offset
|
||||
}
|
||||
}
|
||||
case ACTION_RESTORE:
|
||||
for _, g := range groups {
|
||||
for i := range len(g) {
|
||||
g[i].offset = g[i].saved
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic("chunked: unimplemented action - " + a.String())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Nil is an Arena that always returns an error.
|
||||
//
|
||||
// Note: This is useful for tracking usage locations
|
||||
func Nil() Arena {
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
return nil, errors.New("use of nil allocator")
|
||||
}
|
||||
}
|
||||
|
||||
// Region wraps an Arena, restoring it to its previous state when Reset is called.
|
||||
func Region(arena Arena) Arena {
|
||||
watermark := Save(arena)
|
||||
return func(a Action, size, align uintptr, wm *uintptr) (unsafe.Pointer, error) {
|
||||
if a == ACTION_RESET {
|
||||
Restore(arena, watermark)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return arena(a, size, align, wm)
|
||||
}
|
||||
}
|
||||
|
||||
// Split wraps two [[Arena]]s, dispatching allocations to a particular one based on the requested size.
|
||||
func Split(split_size uintptr, smaller, larger Arena) Arena {
|
||||
var watermarks [2]uintptr
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
if size <= split_size {
|
||||
return smaller(a, size, align, watermark)
|
||||
}
|
||||
return larger(a, size, align, watermark)
|
||||
case ACTION_RESET:
|
||||
Reset(smaller)
|
||||
Reset(larger)
|
||||
case ACTION_SAVE:
|
||||
watermarks[0] = Save(smaller)
|
||||
watermarks[1] = Save(larger)
|
||||
case ACTION_RESTORE:
|
||||
Restore(smaller, watermarks[0])
|
||||
Restore(larger, watermarks[1])
|
||||
default:
|
||||
panic("split: unimplemented action - " + a.String())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Logger wraps an Arena, logging its usage locations.
|
||||
func Logger(arena Arena) Arena {
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
// We expect allocators to be used via the high-level API, so we grab the caller location relative to that.
|
||||
// @todo(judah): can we determine this dynamically?
|
||||
_, file, line, ok := runtime.Caller(2)
|
||||
if !ok {
|
||||
file = "<unknown>"
|
||||
line = 0
|
||||
}
|
||||
|
||||
log.Printf("%s:%d - %s (size: %d, align: %d, watermark: %p)", file, line, a, size, align, watermark)
|
||||
return arena(a, size, align, watermark)
|
||||
}
|
||||
}
|
||||
|
||||
// Concurrent wraps an Arena, ensuring it is safe for concurrent use.
|
||||
func Concurrent(arena Arena) Arena {
|
||||
mtx := new(sync.Mutex)
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
mtx.Lock()
|
||||
ptr, err := arena(a, size, align, watermark)
|
||||
mtx.Unlock()
|
||||
return ptr, err
|
||||
}
|
||||
}
|
||||
|
||||
// Pinned wraps an Arena, ensuring the memory returned is stable until Reset is called.
|
||||
//
|
||||
// The memory returned by Pinned is safe to pass over cgo boundaries.
|
||||
func Pinned(arena Arena) Arena {
|
||||
var pinner runtime.Pinner
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
ptr, err := arena(a, size, align, watermark)
|
||||
if err != nil {
|
||||
return ptr, err
|
||||
}
|
||||
|
||||
if a == ACTION_RESET {
|
||||
pinner.Unpin()
|
||||
} else {
|
||||
pinner.Pin(ptr)
|
||||
}
|
||||
|
||||
return ptr, err
|
||||
}
|
||||
}
|
||||
|
|
@ -1,279 +0,0 @@
|
|||
package arena_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/arena"
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
func BenchmarkAlloc_New_Small(b *testing.B) {
|
||||
var last *int
|
||||
for i := range b.N {
|
||||
v := new(int)
|
||||
*v = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
last = nil
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Closure_Small(b *testing.B) {
|
||||
alloc := arena.Ring[int](16)
|
||||
|
||||
var last *int
|
||||
for i := range b.N {
|
||||
v := arena.New[int](alloc)
|
||||
*v = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
arena.Reset(alloc)
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Interface_Small(b *testing.B) {
|
||||
alloc := NewLinear(16 * mem.Kilobyte)
|
||||
|
||||
var last *int
|
||||
for i := range b.N {
|
||||
v := New[int](alloc)
|
||||
*v = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
Reset(alloc)
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
type large struct{ a, b, c, d, e, f, g, h, i int }
|
||||
|
||||
func BenchmarkAlloc_New_Large(b *testing.B) {
|
||||
var last *large
|
||||
for i := range b.N {
|
||||
v := new(large)
|
||||
v.e = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
last = nil
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Closure_Large(b *testing.B) {
|
||||
alloc := arena.Linear(128 * mem.Kilobyte)
|
||||
|
||||
var last *large
|
||||
for i := range b.N {
|
||||
v := arena.New[large](alloc)
|
||||
v.e = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
arena.Reset(alloc)
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Interface_Large(b *testing.B) {
|
||||
alloc := NewLinear(128 * mem.Kilobyte)
|
||||
|
||||
var last *large
|
||||
for i := range b.N {
|
||||
v := New[large](alloc)
|
||||
v.e = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
Reset(alloc)
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Closure_HotPath(b *testing.B) {
|
||||
alloc := arena.Linear(256)
|
||||
|
||||
var (
|
||||
lastlarge *large
|
||||
lastsmall *int
|
||||
)
|
||||
for i := range b.N {
|
||||
if i%2 == 0 {
|
||||
lastsmall = arena.New[int](alloc)
|
||||
} else {
|
||||
lastlarge = arena.New[large](alloc)
|
||||
}
|
||||
|
||||
arena.Reset(alloc)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(lastlarge)
|
||||
runtime.KeepAlive(lastsmall)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Interface_HotPath(b *testing.B) {
|
||||
alloc := NewLinear(256)
|
||||
|
||||
var (
|
||||
lastlarge *large
|
||||
lastsmall *int
|
||||
)
|
||||
for i := range b.N {
|
||||
if i%2 == 0 {
|
||||
lastsmall = New[int](alloc)
|
||||
} else {
|
||||
lastlarge = New[large](alloc)
|
||||
}
|
||||
|
||||
Reset(alloc)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(lastlarge)
|
||||
runtime.KeepAlive(lastsmall)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Closure_Wrapped(b *testing.B) {
|
||||
alloc := arena.Pinned(arena.Pinned(arena.Pinned(arena.Linear(256))))
|
||||
|
||||
var (
|
||||
lastlarge *large
|
||||
lastsmall *int
|
||||
)
|
||||
for i := range b.N {
|
||||
if i%2 == 0 {
|
||||
lastsmall = arena.New[int](alloc)
|
||||
} else {
|
||||
lastlarge = arena.New[large](alloc)
|
||||
}
|
||||
|
||||
arena.Reset(alloc)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(lastlarge)
|
||||
runtime.KeepAlive(lastsmall)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Interface_Wrapped(b *testing.B) {
|
||||
alloc := NewPinned(NewPinned(NewPinned(NewLinear(256))))
|
||||
|
||||
var (
|
||||
lastlarge *large
|
||||
lastsmall *int
|
||||
)
|
||||
for i := range b.N {
|
||||
if i%2 == 0 {
|
||||
lastsmall = New[int](alloc)
|
||||
} else {
|
||||
lastlarge = New[large](alloc)
|
||||
}
|
||||
|
||||
Reset(alloc)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(lastlarge)
|
||||
runtime.KeepAlive(lastsmall)
|
||||
}
|
||||
|
||||
type Allocator interface {
|
||||
Proc(a arena.Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error)
|
||||
}
|
||||
|
||||
func New[T any](a Allocator) *T {
|
||||
ptr, err := a.Proc(arena.ACTION_ALLOC, mem.Sizeof[T](), mem.Alignof[T](), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return (*T)(ptr)
|
||||
}
|
||||
|
||||
func Reset(a Allocator) {
|
||||
if _, err := a.Proc(arena.ACTION_RESET, 0, 0, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
type Linear struct {
|
||||
data []byte
|
||||
maxsize uintptr
|
||||
offset uintptr
|
||||
}
|
||||
|
||||
func NewLinear(maxsize uintptr) *Linear {
|
||||
return &Linear{
|
||||
data: make([]byte, maxsize),
|
||||
maxsize: maxsize,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Linear) Proc(a arena.Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case arena.ACTION_ALLOC:
|
||||
aligned := mem.AlignForward(size, align)
|
||||
if l.offset+aligned > l.maxsize {
|
||||
return nil, errors.New("linear: out of memory")
|
||||
}
|
||||
|
||||
ptr := &l.data[l.offset]
|
||||
l.offset += aligned
|
||||
return unsafe.Pointer(ptr), nil
|
||||
|
||||
case arena.ACTION_RESET:
|
||||
clear(l.data)
|
||||
l.offset = 0
|
||||
|
||||
case arena.ACTION_SAVE:
|
||||
*watermark = l.offset
|
||||
case arena.ACTION_RESTORE:
|
||||
l.offset = *watermark
|
||||
|
||||
default:
|
||||
panic("unimplemented action: " + a.String())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type Pinned struct {
|
||||
arena Allocator
|
||||
pinner runtime.Pinner
|
||||
}
|
||||
|
||||
func NewPinned(arena Allocator) *Pinned {
|
||||
return &Pinned{arena: arena}
|
||||
}
|
||||
|
||||
func (p *Pinned) Proc(a arena.Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
ptr, err := p.arena.Proc(a, size, align, watermark)
|
||||
if err != nil {
|
||||
return ptr, err
|
||||
}
|
||||
|
||||
if a == arena.ACTION_RESET {
|
||||
p.pinner.Unpin()
|
||||
} else {
|
||||
p.pinner.Pin(ptr)
|
||||
}
|
||||
|
||||
return ptr, err
|
||||
}
|
||||
|
|
@ -1,122 +0,0 @@
|
|||
package arena_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/arena"
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
"git.brut.systems/judah/xx/testx"
|
||||
)
|
||||
|
||||
func TestArenas_ThatShouldPanicWhenOOM(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(1),
|
||||
arena.Nil(),
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
testx.ShouldPanic(t, func() {
|
||||
_ = arena.New[int](a)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestArenas_ThatShouldClearAfterReset(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(16),
|
||||
arena.Chunked(16),
|
||||
arena.Ring[uint16](2),
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
x := arena.New[uint16](a)
|
||||
y := arena.New[uint16](a)
|
||||
*x, *y = 100, 200
|
||||
arena.Reset(a)
|
||||
|
||||
testx.Expect(t, *x == 0, "x = %d, expected 0", *x)
|
||||
testx.Expect(t, *y == 0, "y = %d, expected 0", *y)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArenas_ThatShouldReuseMemoryAfterReset(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(16),
|
||||
arena.Chunked(16),
|
||||
arena.Ring[uint16](2),
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
x1 := arena.New[uint16](a)
|
||||
y1 := arena.New[uint16](a)
|
||||
|
||||
arena.Reset(a)
|
||||
|
||||
x2 := arena.New[uint16](a)
|
||||
y2 := arena.New[uint16](a)
|
||||
|
||||
testx.Expect(t, x1 == x2, "x1 = %p, x2 = %p", x1, x2)
|
||||
testx.Expect(t, y1 == y2, "y1 = %p, y2 = %p", y1, y2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArenas_WithRegion(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(256),
|
||||
arena.Chunked(256),
|
||||
arena.Ring[uint16](16),
|
||||
}
|
||||
|
||||
var baseptrs []*uint16
|
||||
for i, a := range arenas {
|
||||
v := arena.New[uint16](a)
|
||||
*v = uint16(i)
|
||||
baseptrs = append(baseptrs, v)
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
a := arena.Region(a)
|
||||
for range 10 {
|
||||
_ = arena.New[uint16](a)
|
||||
}
|
||||
arena.Reset(a)
|
||||
}
|
||||
|
||||
for i, a := range arenas {
|
||||
testx.Expect(t, *baseptrs[i] == uint16(i), "baseptrs[%d] = %d, expected %d", i, *baseptrs[i], i)
|
||||
|
||||
base := uintptr(unsafe.Pointer(baseptrs[i]))
|
||||
next := uintptr(unsafe.Pointer(arena.New[uint16](a)))
|
||||
testx.Expect(t, next-base == mem.Sizeof[uint16](), "delta was %d", next-base)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrent(t *testing.T) {
|
||||
a := arena.Concurrent(arena.Linear(16))
|
||||
|
||||
base, err := a(arena.ACTION_ALLOC, 0, 1, nil)
|
||||
testx.Expect(t, err == nil, "ACTION_ALLOC failed: %v", err)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Go(func() {
|
||||
_ = arena.New[uint8](a)
|
||||
_ = arena.New[uint8](a)
|
||||
_ = arena.New[uint8](a)
|
||||
_ = arena.New[uint8](a)
|
||||
})
|
||||
|
||||
wg.Go(func() {
|
||||
_ = arena.New[uint16](a)
|
||||
_ = arena.New[uint16](a)
|
||||
_ = arena.New[uint16](a)
|
||||
_ = arena.New[uint16](a)
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
|
||||
after, err := a(arena.ACTION_ALLOC, 0, 1, nil)
|
||||
testx.Expect(t, err == nil, "ACTION_ALLOC failed: %v", err)
|
||||
testx.Expect(t, uintptr(after)-uintptr(base) == 12, "diff is: %v", uintptr(after)-uintptr(base))
|
||||
}
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
//go:build !XX_DISABLE_ASSERT
|
||||
|
||||
package xx
|
||||
|
||||
func Assert(cond bool) {
|
||||
if !cond {
|
||||
panic("assertion failed")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
//go:build XX_DISABLE_ASSERT
|
||||
|
||||
package xx
|
||||
|
||||
func Assert(cond bool) {}
|
||||
|
||||
func Unreachable() {}
|
||||
|
|
@ -1,112 +0,0 @@
|
|||
package bucket
|
||||
|
||||
import (
|
||||
"iter"
|
||||
)
|
||||
|
||||
const DefaultElementsPerBucket = 32
|
||||
|
||||
// Array is a resizable array whose values will never move in memory.
|
||||
// This means it is safe to take a pointer to a value within the array
|
||||
// while continuing to append to it.
|
||||
type Array[T any] struct {
|
||||
last int
|
||||
elements_per_bucket int
|
||||
buckets []bucket[T]
|
||||
}
|
||||
|
||||
func (s *Array[T]) Init() {
|
||||
s.InitWithCapacity(DefaultElementsPerBucket)
|
||||
}
|
||||
|
||||
func (s *Array[T]) InitWithCapacity(elements_per_bucket int) {
|
||||
if elements_per_bucket <= 0 {
|
||||
elements_per_bucket = DefaultElementsPerBucket
|
||||
}
|
||||
|
||||
s.elements_per_bucket = elements_per_bucket
|
||||
|
||||
s.buckets = s.buckets[:0]
|
||||
s.buckets = append(s.buckets, make(bucket[T], 0, s.elements_per_bucket))
|
||||
s.last = 0
|
||||
}
|
||||
|
||||
func (s *Array[T]) Reset() {
|
||||
s.buckets = s.buckets[:0]
|
||||
s.last = 0
|
||||
}
|
||||
|
||||
func (s *Array[T]) Append(value T) *T {
|
||||
if len(s.buckets) == 0 {
|
||||
s.Init()
|
||||
}
|
||||
|
||||
if len(s.buckets[s.last]) == cap(s.buckets[s.last]) {
|
||||
s.buckets = append(s.buckets, make(bucket[T], 0, s.elements_per_bucket))
|
||||
s.last += 1
|
||||
}
|
||||
|
||||
s.buckets[s.last] = append(s.buckets[s.last], value)
|
||||
return &s.buckets[s.last][len(s.buckets[s.last])-1]
|
||||
}
|
||||
|
||||
func (s *Array[T]) AppendMany(values ...T) (first *T) {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
first = s.Append(values[0])
|
||||
|
||||
if len(values) > 1 {
|
||||
for _, v := range values[1:] {
|
||||
s.Append(v)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Array[T]) Get(index int) *T {
|
||||
b := s.buckets[index/s.elements_per_bucket]
|
||||
return &b[index%s.elements_per_bucket]
|
||||
}
|
||||
|
||||
func (s *Array[T]) Set(index int, value T) {
|
||||
*s.Get(index) = value
|
||||
}
|
||||
|
||||
func (s *Array[T]) Len() int {
|
||||
return s.Cap() - (cap(s.buckets[s.last]) - len(s.buckets[s.last]))
|
||||
}
|
||||
|
||||
func (s *Array[T]) Cap() int {
|
||||
return len(s.buckets) * s.elements_per_bucket
|
||||
}
|
||||
|
||||
func (s *Array[T]) Pointers() iter.Seq2[int, *T] {
|
||||
return func(yield func(int, *T) bool) {
|
||||
for bi := range s.buckets {
|
||||
startIdx := bi * s.elements_per_bucket
|
||||
for i := range s.buckets[bi] {
|
||||
if !yield(startIdx+i, &s.buckets[bi][i]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Array[T]) Values() iter.Seq2[int, T] {
|
||||
return func(yield func(int, T) bool) {
|
||||
for bi, b := range s.buckets {
|
||||
startIdx := bi * s.elements_per_bucket
|
||||
for i := range b {
|
||||
if !yield(startIdx+i, b[i]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type bucket[T any] = []T
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
package bucket_test
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"git.brut.systems/judah/xx/containerx/bucket"
|
||||
"git.brut.systems/judah/xx/testx"
|
||||
)
|
||||
|
||||
func TestArray_StableWithGC(t *testing.T) {
|
||||
type valuewithptr struct {
|
||||
value int
|
||||
ptr *int
|
||||
}
|
||||
|
||||
var arr bucket.Array[valuewithptr]
|
||||
aptr := arr.Append(valuewithptr{value: 10, ptr: nil})
|
||||
bptr := arr.Append(valuewithptr{value: 20, ptr: &aptr.value})
|
||||
|
||||
const N = 1000
|
||||
for i := range N {
|
||||
arr.Append(valuewithptr{value: i})
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
testx.Expect(t, arr.Get(0) == aptr)
|
||||
testx.Expect(t, arr.Get(1) == bptr)
|
||||
testx.Expect(t, arr.Len() == N+2, "len was %d", arr.Len())
|
||||
testx.Expect(t, bptr.ptr != nil && bptr.value == 20)
|
||||
testx.Expect(t, bptr.ptr == &aptr.value, "%p vs. %p", bptr.ptr, &aptr.value)
|
||||
}
|
||||
|
||||
func BenchmarkArray_RandomAccess(b *testing.B) {
|
||||
var arr bucket.Array[int]
|
||||
for i := range b.N {
|
||||
arr.Append(i * i)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := range b.N {
|
||||
arr.Get(i % b.N)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkArray_Append(b *testing.B) {
|
||||
var arr bucket.Array[int]
|
||||
for i := range b.N {
|
||||
arr.Append(i * i)
|
||||
}
|
||||
|
||||
arr.Reset()
|
||||
for i := range b.N {
|
||||
arr.Append(i * i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkArray_Iteration(b *testing.B) {
|
||||
var arr bucket.Array[int]
|
||||
for i := range b.N {
|
||||
arr.Append(i * i)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
sum := 0
|
||||
for _, v := range arr.Values() {
|
||||
sum += v
|
||||
}
|
||||
}
|
||||
|
|
@ -1,174 +0,0 @@
|
|||
package xar
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"math/bits"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultChunkSizeShift = 4
|
||||
)
|
||||
|
||||
// Xar is an implementation of Andrew Reece's Xar (Exponential Array).
|
||||
//
|
||||
// See: https://www.youtube.com/watch?v=i-h95QIGchY
|
||||
type Xar[T any] struct {
|
||||
shift uint8
|
||||
count uint64
|
||||
chunks [][]T
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Init() {
|
||||
x.InitWithSize(DefaultChunkSizeShift)
|
||||
}
|
||||
|
||||
func (x *Xar[T]) InitWithSize(size_shift uint8) {
|
||||
if len(x.chunks) != 0 {
|
||||
x.Reset()
|
||||
}
|
||||
|
||||
x.shift = size_shift
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Reset() {
|
||||
for _, c := range x.chunks {
|
||||
clear(c)
|
||||
}
|
||||
|
||||
x.count = 0
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Append(value T) *T {
|
||||
if x.shift == 0 {
|
||||
x.Init()
|
||||
}
|
||||
|
||||
chunk_idx, idx_in_chunk, chunk_cap := x.getChunk(x.count)
|
||||
x.count += 1
|
||||
if chunk_idx >= uint64(len(x.chunks)) {
|
||||
x.chunks = append(x.chunks, make([]T, chunk_cap))
|
||||
}
|
||||
|
||||
slot := &x.chunks[chunk_idx][idx_in_chunk]
|
||||
*slot = value
|
||||
return slot
|
||||
}
|
||||
|
||||
func (x *Xar[T]) AppendMany(values ...T) *T {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
first := x.Append(values[0])
|
||||
if len(values) > 1 {
|
||||
for _, v := range values[1:] {
|
||||
x.Append(v)
|
||||
}
|
||||
}
|
||||
|
||||
return first
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Get(index int) *T {
|
||||
chunk_idx, idx_in_chunk, _ := x.getChunk(uint64(index))
|
||||
return &x.chunks[chunk_idx][idx_in_chunk]
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Set(index int, value T) {
|
||||
*x.Get(index) = value
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Remove(index int) {
|
||||
x.Set(index, *x.Get(int(x.count - 1)))
|
||||
x.count -= 1
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Len() int {
|
||||
return int(x.count)
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Cap() (l int) {
|
||||
for _, c := range x.chunks {
|
||||
l += cap(c)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Pointers() iter.Seq2[int, *T] {
|
||||
return func(yield func(int, *T) bool) {
|
||||
idx := -1
|
||||
for chunk_idx, idx_in_chunk := range x.iter() {
|
||||
idx += 1
|
||||
if !yield(idx, &x.chunks[chunk_idx][idx_in_chunk]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Values() iter.Seq2[int, T] {
|
||||
return func(yield func(int, T) bool) {
|
||||
idx := -1
|
||||
for chunk_idx, idx_in_chunk := range x.iter() {
|
||||
idx += 1
|
||||
if !yield(idx, x.chunks[chunk_idx][idx_in_chunk]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Xar[T]) iter() iter.Seq2[uint64, uint64] {
|
||||
return func(yield func(uint64, uint64) bool) {
|
||||
chunk_size := 1 << x.shift
|
||||
outer:
|
||||
for chunk_idx := range x.chunks {
|
||||
for idx_in_chunk := range chunk_size - 1 {
|
||||
if uint64(chunk_idx+idx_in_chunk) >= uint64(x.count) {
|
||||
break outer
|
||||
}
|
||||
|
||||
if !yield(uint64(chunk_idx), uint64(idx_in_chunk)) {
|
||||
break outer
|
||||
}
|
||||
}
|
||||
|
||||
chunk_size <<= xx.BoolUint(chunk_idx > 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Xar[T]) getChunk(index uint64) (chunk_idx uint64, idx_in_chunk uint64, chunk_cap uint64) {
|
||||
if true /* branchless */ {
|
||||
var (
|
||||
i_shift = index >> x.shift
|
||||
i_shift2 = i_shift != 0
|
||||
msb = msb64(i_shift | 1)
|
||||
b = uint64(*(*uint8)(unsafe.Pointer(&i_shift2)))
|
||||
)
|
||||
|
||||
chunk_idx = msb + b
|
||||
idx_in_chunk = index - (b << (msb + uint64(x.shift)))
|
||||
chunk_cap = 1 << (msb + uint64(x.shift))
|
||||
} else {
|
||||
idx_in_chunk = index
|
||||
chunk_cap = 1 << x.shift
|
||||
|
||||
i_shift := index >> x.shift
|
||||
if i_shift > 0 {
|
||||
chunk_idx = msb64(i_shift | 1)
|
||||
chunk_cap = 1 << (chunk_idx + uint64(x.shift))
|
||||
idx_in_chunk -= chunk_cap
|
||||
chunk_idx += 1
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func msb64(x uint64) uint64 {
|
||||
return uint64(63 - bits.LeadingZeros64(x))
|
||||
}
|
||||
|
|
@ -1,105 +0,0 @@
|
|||
package xar_test
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"git.brut.systems/judah/xx/containerx/xar"
|
||||
"git.brut.systems/judah/xx/testx"
|
||||
)
|
||||
|
||||
func TestXar_StableWithGC(t *testing.T) {
|
||||
type valuewithptr struct {
|
||||
value int
|
||||
ptr *int
|
||||
}
|
||||
|
||||
var x xar.Xar[valuewithptr]
|
||||
x.InitWithSize(8)
|
||||
|
||||
aptr := x.Append(valuewithptr{value: 10, ptr: nil})
|
||||
bptr := x.Append(valuewithptr{value: 20, ptr: &aptr.value})
|
||||
|
||||
const N = 1000
|
||||
for i := range N {
|
||||
x.Append(valuewithptr{value: i})
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
testx.Expect(t, x.Get(0) == bptr)
|
||||
testx.Expect(t, x.Get(1) == bptr)
|
||||
testx.Expect(t, x.Len() == N+2, "len was %d", x.Len())
|
||||
testx.Expect(t, bptr.ptr != nil && bptr.value == 20)
|
||||
testx.Expect(t, bptr.ptr == &aptr.value, "%p vs. %p", bptr.ptr, &aptr.value)
|
||||
}
|
||||
|
||||
func TestXar_ResetAndReuse(t *testing.T) {
|
||||
var x xar.Xar[int]
|
||||
start := x.Append(60)
|
||||
x.AppendMany(10, 20, 30, 40, 50)
|
||||
|
||||
x.Reset()
|
||||
runtime.GC()
|
||||
|
||||
testx.Expect(t, x.Cap() != 0)
|
||||
testx.Expect(t, x.Len() == 0)
|
||||
|
||||
x.Append(0xFF)
|
||||
x.Append(0xFC)
|
||||
x.Append(0xFB)
|
||||
|
||||
testx.Expect(t, x.Get(0) == start)
|
||||
testx.Expect(t, x.Len() == 3)
|
||||
}
|
||||
|
||||
func TestXar_Iterators(t *testing.T) {
|
||||
var x xar.Xar[int]
|
||||
x.AppendMany(0, 1, 2, 3, 4, 5)
|
||||
|
||||
iterations := 0
|
||||
for i, v := range x.Values() {
|
||||
iterations += 1
|
||||
testx.Expect(t, v == i, "v: %d, i: %d", v, i)
|
||||
}
|
||||
|
||||
testx.Expect(t, iterations == x.Len())
|
||||
}
|
||||
|
||||
func BenchmarkXar_Append(b *testing.B) {
|
||||
var x xar.Xar[int]
|
||||
for i := range b.N {
|
||||
x.Append(i * i)
|
||||
}
|
||||
|
||||
x.Reset()
|
||||
for i := range b.N {
|
||||
x.Append(i * i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkXar_RandomAccess(b *testing.B) {
|
||||
var x xar.Xar[int]
|
||||
for i := range b.N {
|
||||
x.Append(i * i)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := range b.N {
|
||||
x.Get(i % b.N)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkXar_Iteration(b *testing.B) {
|
||||
var x xar.Xar[int]
|
||||
for i := range b.N {
|
||||
x.Append(i * i)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
sum := 0
|
||||
for _, v := range x.Values() {
|
||||
sum += v
|
||||
}
|
||||
}
|
||||
2
go.mod
2
go.mod
|
|
@ -1,5 +1,3 @@
|
|||
module git.brut.systems/judah/xx
|
||||
|
||||
go 1.25.0
|
||||
|
||||
require github.com/ebitengine/purego v0.9.1
|
||||
|
|
|
|||
2
go.sum
2
go.sum
|
|
@ -1,2 +0,0 @@
|
|||
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
|
||||
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
112
mem/mem.go
112
mem/mem.go
|
|
@ -1,112 +0,0 @@
|
|||
package mem
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
Kilobyte uintptr = 1 << (10 * (iota + 1))
|
||||
Megabyte
|
||||
Gigabyte
|
||||
Terabyte
|
||||
)
|
||||
|
||||
// Sizeof returns the size (in bytes) of the given type.
|
||||
//
|
||||
// Not to be confused with [unsafe.Sizeof] which returns the size of a type via an expression.
|
||||
func Sizeof[T any]() uintptr {
|
||||
return unsafe.Sizeof(*(*T)(nil))
|
||||
}
|
||||
|
||||
// Alignof returns the alignment (in bytes) of the given type.
|
||||
//
|
||||
// Not to be confused with [unsafe.AlignOf] which returns the alignment of a type via an expression.
|
||||
func Alignof[T any]() uintptr {
|
||||
return unsafe.Alignof(*(*T)(nil))
|
||||
}
|
||||
|
||||
// ZeroValue returns the zero value of a given type.
|
||||
func ZeroValue[T any]() (_ T) {
|
||||
return
|
||||
}
|
||||
|
||||
// BitCast performs a bit conversion between two types of the same size.
|
||||
//
|
||||
// BitCast panics if the sizes of the types differ.
|
||||
func BitCast[TOut any, TIn any](value *TIn) TOut {
|
||||
if Sizeof[TOut]() != Sizeof[TIn]() {
|
||||
panic("bitcast: sizes of types must match")
|
||||
}
|
||||
return *((*TOut)(unsafe.Pointer(value)))
|
||||
}
|
||||
|
||||
// BitCastValue performs a bit conversion between two types of the same size.
|
||||
//
|
||||
// BitCastValue panics if the sizes of the types differ.
|
||||
func BitCastValue[TOut any, TIn any](value TIn) TOut {
|
||||
if Sizeof[TOut]() != Sizeof[TIn]() {
|
||||
panic("bitcast: sizes of types must match")
|
||||
}
|
||||
return *((*TOut)(unsafe.Pointer(&value)))
|
||||
}
|
||||
|
||||
// UnsafeCast performs a bit conversion between two types without checking if their sizes match.
|
||||
func UnsafeCast[TOut any, TIn any](value *TIn) TOut {
|
||||
return *((*TOut)(unsafe.Pointer(value)))
|
||||
}
|
||||
|
||||
// UnsafeCastValue performs a bit conversion between two types without checking if their sizes match.
|
||||
func UnsafeCastValue[TOut any, TIn any](value TIn) TOut {
|
||||
return *((*TOut)(unsafe.Pointer(&value)))
|
||||
}
|
||||
|
||||
// Copy copies size number of bytes from src into dst.
|
||||
//
|
||||
// Returns dst.
|
||||
func Copy(dst, src unsafe.Pointer, size uintptr) unsafe.Pointer {
|
||||
copy(unsafe.Slice((*byte)(dst), size), unsafe.Slice((*byte)(src), size))
|
||||
return dst
|
||||
}
|
||||
|
||||
// Clear overwrites 'count' number of bytes in 'dst' with a particular value.
|
||||
//
|
||||
// Returns dst.
|
||||
func Clear(dst unsafe.Pointer, value byte, count uintptr) unsafe.Pointer {
|
||||
b := (*byte)(dst)
|
||||
for range count { // @todo: loop unroll/maybe use asm?
|
||||
*b = value
|
||||
b = (*byte)(unsafe.Add(dst, 1))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Zero overwrites 'count' number of bytes in 'dst' with zeros.
|
||||
//
|
||||
// Returns dst.
|
||||
func Zero(dst unsafe.Pointer, count uintptr) unsafe.Pointer {
|
||||
return Clear(dst, 0, count)
|
||||
}
|
||||
|
||||
// AlignForward returns an address align to the next power-of-two alignment.
|
||||
func AlignForward(address uintptr, alignment uintptr) uintptr {
|
||||
if alignment == 0 || (alignment&(alignment-1)) != 0 {
|
||||
panic("alignforward: alignment must be a power of two")
|
||||
}
|
||||
return (address + alignment - 1) &^ (alignment - 1)
|
||||
}
|
||||
|
||||
// AlignBackward returns an address align to the previous power-of-two alignment.
|
||||
func AlignBackward(address uintptr, alignment uintptr) uintptr {
|
||||
if alignment == 0 || (alignment&(alignment-1)) != 0 {
|
||||
panic("alignbackward: alignment must be a power of two")
|
||||
}
|
||||
return address &^ (alignment - 1)
|
||||
}
|
||||
|
||||
// Aligned returns if the address is aligned to the given power-of-two alignment.
|
||||
func Aligned(address uintptr, alignment uintptr) bool {
|
||||
if alignment == 0 || (alignment&(alignment-1)) != 0 {
|
||||
panic("aligned: alignment must be a power of two")
|
||||
}
|
||||
return address&(alignment-1) == 0
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
package mem_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
func TestBitCast(t *testing.T) {
|
||||
a := uint32(0xFFFF_FFFF)
|
||||
b := mem.BitCast[float32](&a)
|
||||
c := mem.BitCast[uint32](&b)
|
||||
if a != c {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
v := uint8(0xFF)
|
||||
d := mem.BitCast[int8](&v)
|
||||
if d != -1 {
|
||||
t.Fail()
|
||||
}
|
||||
e := mem.BitCast[uint8](&d)
|
||||
if e != 255 {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
|
@ -1,78 +0,0 @@
|
|||
// Package osthread allows functions to be called on the main operating system thread.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// func main() {
|
||||
// osthread.Start(Entrypoint) // Initialize osthread and calls Entrypoint. Blocks until Entrypoint returns.
|
||||
// }
|
||||
//
|
||||
// func Entrypoint() {
|
||||
// osthread.Call(FuncA) // Call FuncA on the main operating system thread, block until it returns
|
||||
// // ...
|
||||
// osthread.Go(FuncB) // Schedule FuncB to be called on the main operating system thread
|
||||
// // ...
|
||||
// }
|
||||
package osthread
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Start allows arbitrary functions to be run on the main operating system thread.
|
||||
//
|
||||
// Start must be called from the program's main function. Once called, it blocks until entrypoint returns.
|
||||
func Start(entrypoint func()) {
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
done := make(chan any)
|
||||
|
||||
// Immediately queue entrypoint
|
||||
go func() {
|
||||
defer func() {
|
||||
done <- nil
|
||||
}()
|
||||
|
||||
entrypoint()
|
||||
}()
|
||||
|
||||
// Call functions in our queue until entrypoint returns.
|
||||
// These functions are called on the main operating system thread.
|
||||
for {
|
||||
select {
|
||||
case fn := <-queue:
|
||||
fn()
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Go schedules a function to be run on the main operating system thread, returning immediately.
|
||||
func Go(fn func()) {
|
||||
queue <- fn
|
||||
}
|
||||
|
||||
// Call schedules a function to be run on the main operating system thread, blocking until it returns.
|
||||
func Call(fn func()) {
|
||||
if onmainthread() {
|
||||
fn()
|
||||
} else {
|
||||
done := make(chan any)
|
||||
queue <- func() {
|
||||
defer func() {
|
||||
done <- nil
|
||||
}()
|
||||
|
||||
fn()
|
||||
}
|
||||
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
var queue chan func()
|
||||
|
||||
func init() {
|
||||
runtime.LockOSThread()
|
||||
queue = make(chan func(), runtime.GOMAXPROCS(0))
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
//go:build !(windows || linux || darwin)
|
||||
|
||||
package osthread
|
||||
|
||||
func onmainthread() bool {
|
||||
return false
|
||||
}
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
//go:build darwin
|
||||
|
||||
package osthread
|
||||
|
||||
import "github.com/ebitengine/purego/objc"
|
||||
|
||||
func onmainthread() bool {
|
||||
return objc.Send[bool](objc.ID(cls_nsthread), sel_isMainThread)
|
||||
}
|
||||
|
||||
var (
|
||||
cls_nsthread objc.Class
|
||||
cls_nsapplication objc.Class
|
||||
cls_nsapp objc.Class
|
||||
|
||||
sel_isMainThread objc.SEL
|
||||
sel_sharedApplication objc.SEL
|
||||
sel_run objc.SEL
|
||||
)
|
||||
|
||||
func init() {
|
||||
cls_nsthread = objc.GetClass("NSThread")
|
||||
cls_nsapplication = objc.GetClass("NSApplication")
|
||||
cls_nsapp = objc.GetClass("NSApp")
|
||||
|
||||
sel_isMainThread = objc.RegisterName("isMainThread")
|
||||
sel_sharedApplication = objc.RegisterName("sharedApplication")
|
||||
sel_run = objc.RegisterName("run")
|
||||
|
||||
// Just a bit of magic
|
||||
objc.ID(cls_nsapplication).Send(sel_sharedApplication)
|
||||
objc.ID(cls_nsapp).Send(sel_run)
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
//go:build linux
|
||||
|
||||
package osthread
|
||||
|
||||
import (
|
||||
"github.com/ebitengine/purego"
|
||||
)
|
||||
|
||||
func onmainthread() bool {
|
||||
return getpid() == gettid()
|
||||
}
|
||||
|
||||
var (
|
||||
getpid func() int32
|
||||
gettid func() int32
|
||||
)
|
||||
|
||||
func init() {
|
||||
libc, err := purego.Dlopen("libc.so.6", purego.RTLD_GLOBAL|purego.RTLD_NOW)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
purego.RegisterLibFunc(&getpid, libc, "getpid")
|
||||
purego.RegisterLibFunc(&gettid, libc, "gettid")
|
||||
}
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
//go:build windows
|
||||
|
||||
package osthread
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ebitengine/purego"
|
||||
)
|
||||
|
||||
func onmainthread() bool {
|
||||
return mainThreadId == getCurrentThreadId()
|
||||
}
|
||||
|
||||
var (
|
||||
mainThreadId int32
|
||||
getCurrentThreadId func() int32
|
||||
)
|
||||
|
||||
func init() {
|
||||
kernel32 := syscall.NewLazyDLL("kernel32.dll").Handle()
|
||||
purego.RegisterLibFunc(&getCurrentThreadId, kernel32, "GetCurrentThreadId")
|
||||
|
||||
mainThreadId = getCurrentThreadId() // init is always called on the main thread
|
||||
}
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
package pointer
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
type Pinned[T any] struct {
|
||||
base unsafe.Pointer
|
||||
pinner runtime.Pinner
|
||||
}
|
||||
|
||||
func Pin[T any](ptr *T) (r Pinned[T]) {
|
||||
r.pinner.Pin(ptr)
|
||||
r.base = unsafe.Pointer(ptr)
|
||||
return
|
||||
}
|
||||
|
||||
func Cast[TOut, TIn any](p Pinned[TIn]) Pinned[TOut] {
|
||||
return Pinned[TOut]{
|
||||
base: unsafe.Pointer(p.base),
|
||||
pinner: p.pinner,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Unpin() {
|
||||
p.pinner.Unpin()
|
||||
p.base = nil
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Pointer() unsafe.Pointer {
|
||||
return p.base
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Address() uintptr {
|
||||
return uintptr(p.base)
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Nil() bool {
|
||||
return p.base == nil
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Add(amount uintptr) Pinned[T] {
|
||||
return Pinned[T]{
|
||||
base: unsafe.Pointer(uintptr(p.base) + amount),
|
||||
pinner: p.pinner,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Sub(amount uintptr) Pinned[T] {
|
||||
return Pinned[T]{
|
||||
base: unsafe.Pointer(uintptr(p.base) - amount),
|
||||
pinner: p.pinner,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Aligned() bool {
|
||||
return mem.Aligned(uintptr(p.base), mem.Alignof[T]())
|
||||
}
|
||||
|
||||
func (p Pinned[T]) AlignForward() Pinned[T] {
|
||||
return Pinned[T]{
|
||||
base: unsafe.Pointer(mem.AlignForward(uintptr(p.base), mem.Alignof[T]())),
|
||||
pinner: p.pinner,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Pinned[T]) AlignBackward() Pinned[T] {
|
||||
return Pinned[T]{
|
||||
base: unsafe.Pointer(mem.AlignBackward(uintptr(p.base), mem.Alignof[T]())),
|
||||
pinner: p.pinner,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Load() T {
|
||||
return *(*T)(p.base)
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Store(value T) {
|
||||
*(*T)(p.base) = value
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Nth(index int) T {
|
||||
return p.Add(uintptr(index) * mem.Sizeof[T]()).Load()
|
||||
}
|
||||
|
|
@ -1,131 +0,0 @@
|
|||
package pointer_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.brut.systems/judah/xx/pointer"
|
||||
)
|
||||
|
||||
func TestPointer_AlignForward(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
offset uintptr
|
||||
}{
|
||||
{"align 8 bytes", 1},
|
||||
{"align 16 bytes", 3},
|
||||
{"align 32 bytes", 7},
|
||||
{"align 64 bytes", 15},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
value := int32(789)
|
||||
pinned := pointer.Pin(&value)
|
||||
defer pinned.Unpin()
|
||||
|
||||
// Add offset to misalign
|
||||
misaligned := pinned.Add(tt.offset)
|
||||
aligned := misaligned.AlignForward()
|
||||
|
||||
// Check alignment
|
||||
if !aligned.Aligned() {
|
||||
t.Errorf("Address %d is not aligned", aligned.Address())
|
||||
}
|
||||
|
||||
// Check it's forward aligned (greater or equal)
|
||||
if aligned.Address() < misaligned.Address() {
|
||||
t.Errorf("Forward aligned address %d should be >= original %d", aligned.Address(), misaligned.Address())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointer_AlignBackward(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
offset uintptr
|
||||
}{
|
||||
{"align 8 bytes", 5},
|
||||
{"align 16 bytes", 10},
|
||||
{"align 32 bytes", 20},
|
||||
{"align 64 bytes", 40},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
value := int32(321)
|
||||
pinned := pointer.Pin(&value)
|
||||
defer pinned.Unpin()
|
||||
|
||||
// Add offset to misalign
|
||||
misaligned := pinned.Add(tt.offset)
|
||||
aligned := misaligned.AlignBackward()
|
||||
|
||||
// Check alignment
|
||||
if !aligned.Aligned() {
|
||||
t.Errorf("Address %d is not aligned", aligned.Address())
|
||||
}
|
||||
|
||||
// Check it's backward aligned (less or equal)
|
||||
if aligned.Address() > misaligned.Address() {
|
||||
t.Errorf("Backward aligned address %d should be <= original %d", aligned.Address(), misaligned.Address())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointer_Nth(t *testing.T) {
|
||||
// Test with int32 array
|
||||
arr := []int32{10, 20, 30, 40, 50}
|
||||
pinned := pointer.Pin(&arr[0])
|
||||
defer pinned.Unpin()
|
||||
|
||||
for i := 0; i < len(arr); i++ {
|
||||
value := pinned.Nth(i)
|
||||
if value != arr[i] {
|
||||
t.Errorf("Index %d: expected %d, got %d", i, arr[i], value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointer_NthFloat64(t *testing.T) {
|
||||
// Test with a float64 array
|
||||
arr := []float64{1.1, 2.2, 3.3, 4.4, 5.5}
|
||||
pinned := pointer.Pin(&arr[0])
|
||||
defer pinned.Unpin()
|
||||
|
||||
for i := 0; i < len(arr); i++ {
|
||||
value := pinned.Nth(i)
|
||||
if value != arr[i] {
|
||||
t.Errorf("Index %d: expected %f, got %f", i, arr[i], value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointerArithmeticChain(t *testing.T) {
|
||||
value := int32(888)
|
||||
pinned := pointer.Pin(&value)
|
||||
defer pinned.Unpin()
|
||||
|
||||
// Test chaining operations
|
||||
result := pinned.Add(16).Add(8).Sub(4)
|
||||
expected := pinned.Address() + 16 + 8 - 4
|
||||
|
||||
if result.Address() != expected {
|
||||
t.Errorf("Expected address %d, got %d", expected, result.Address())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointer_Cast(t *testing.T) {
|
||||
value := int32(123)
|
||||
|
||||
i32 := pointer.Pin(&value)
|
||||
defer i32.Unpin()
|
||||
|
||||
f32 := pointer.Cast[float32](i32)
|
||||
f32.Store(3.14)
|
||||
|
||||
if value == 123 {
|
||||
t.Errorf("Value should have been changed")
|
||||
}
|
||||
}
|
||||
149
spmd/spmd.go
149
spmd/spmd.go
|
|
@ -1,149 +0,0 @@
|
|||
// Package spmd contains useful primitives for SPMD programs.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// func main() {
|
||||
// // Create N execution lanes (<= 0 for GOMAXPROCS lanes),
|
||||
// // and run 'Compute' across N lanes.
|
||||
// spmd.Run(-1, Compute)
|
||||
// }
|
||||
//
|
||||
// func Compute(lane spmd.Lane) {
|
||||
// log.Printf("Lane %d/%d is executing", lane.Index, lane.Count)
|
||||
//
|
||||
// // Execute this code on a lane locked to the main thread (aka lane.Index == 0)
|
||||
// // One lane will always be locked to the main thread
|
||||
// if lane.Main() {
|
||||
// data, err := os.ReadFile(...)
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
//
|
||||
// // Send data to all lanes ("DATA" can be any value)
|
||||
// lane.Store("DATA", string(data))
|
||||
// }
|
||||
//
|
||||
// // Wait until all lanes are at this point
|
||||
// lane.Sync()
|
||||
//
|
||||
// // Load stored data
|
||||
// data := lane.Load("DATA").(string)
|
||||
//
|
||||
// // Get lane-specific access range for data
|
||||
// lo, hi := lane.Range(len(data))
|
||||
// for i := lo; i < hi; i++ {
|
||||
// // ...
|
||||
// }
|
||||
// }
|
||||
package spmd
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"git.brut.systems/judah/xx/osthread"
|
||||
)
|
||||
|
||||
// Run will start executing the given function across N execution lanes,
|
||||
// blocking until they have all finished executing.
|
||||
//
|
||||
// If nLanes is <= 0, GOMAXPROCS will be used.
|
||||
//
|
||||
// Run must be called from the program's main function.
|
||||
func Run(nLanes int, fn func(lane Lane)) {
|
||||
if nLanes <= 0 {
|
||||
nLanes = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
|
||||
osthread.Start(func() {
|
||||
s := new(state)
|
||||
s.cond.L = &s.mtx
|
||||
s.total = uint64(nLanes)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := range s.total {
|
||||
if i == 0 { // Lane 0 is always on the main thread
|
||||
wg.Add(1)
|
||||
osthread.Go(func() {
|
||||
fn(Lane{state: s, Index: uint32(i), Count: uint32(s.total)})
|
||||
wg.Done()
|
||||
})
|
||||
} else { // Everyone else gets scheduled like usual
|
||||
wg.Go(func() {
|
||||
fn(Lane{state: s, Index: uint32(i), Count: uint32(s.total)})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
type state struct {
|
||||
mtx sync.Mutex
|
||||
cond sync.Cond
|
||||
waiting atomic.Uint64
|
||||
total uint64
|
||||
userdata sync.Map
|
||||
}
|
||||
|
||||
type Lane struct {
|
||||
state *state
|
||||
Index uint32
|
||||
Count uint32
|
||||
}
|
||||
|
||||
// Main returns if the lane is locked to the main thread.
|
||||
func (l Lane) Main() bool {
|
||||
return l.Index == 0
|
||||
}
|
||||
|
||||
// Sync pauses the current lane until all lanes are at the same sync point.
|
||||
func (l Lane) Sync() {
|
||||
l.state.mtx.Lock()
|
||||
defer l.state.mtx.Unlock()
|
||||
|
||||
if l.state.waiting.Add(1) >= l.state.total {
|
||||
l.state.waiting.Store(0)
|
||||
l.state.cond.Broadcast()
|
||||
return
|
||||
}
|
||||
|
||||
l.state.cond.Wait()
|
||||
}
|
||||
|
||||
// Store sends 'value' to all lanes.
|
||||
//
|
||||
// Store can be called concurrently.
|
||||
func (l Lane) Store(key, value any) {
|
||||
l.state.userdata.Store(key, value)
|
||||
}
|
||||
|
||||
// Load fetches a named value, returning nil if it does not exist.
|
||||
//
|
||||
// Load can be called concurrently.
|
||||
func (l Lane) Load(key any) any {
|
||||
v, ok := l.state.userdata.Load(key)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Range returns a lane's data range for the given length.
|
||||
func (l Lane) Range(length int) (lo, hi uint) {
|
||||
size := uint(length) / uint(l.state.total)
|
||||
rem := uint(length) % uint(l.state.total)
|
||||
|
||||
if uint(l.Index) < rem {
|
||||
lo = uint(l.Index) * (size + 1)
|
||||
hi = lo + size + 1
|
||||
} else {
|
||||
lo = uint(l.Index)*size + rem
|
||||
hi = lo + size
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
package testx
|
||||
|
||||
import "testing"
|
||||
|
||||
func Expect(t *testing.T, cond bool, message ...any) {
|
||||
t.Helper()
|
||||
|
||||
if !cond {
|
||||
if len(message) == 0 {
|
||||
message = append(message, "expectation failed")
|
||||
}
|
||||
|
||||
str := message[0].(string)
|
||||
t.Fatalf(str, message[1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
func ShouldPanic(t *testing.T, f func()) {
|
||||
t.Helper()
|
||||
|
||||
defer func() { recover() }()
|
||||
f()
|
||||
t.Fatal("expected panic")
|
||||
}
|
||||
150
union/union.go
150
union/union.go
|
|
@ -1,150 +0,0 @@
|
|||
package union
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUninitializedAccess = errors.New("access of uninitialized union")
|
||||
ErrInvalidType = errors.New("type does not exist within union")
|
||||
)
|
||||
|
||||
// anystruct represents a struct type with any members.
|
||||
//
|
||||
// Note: because Go's type constraint system can't enforce
|
||||
// this, anystruct is here for documentation purposes.
|
||||
type anystruct any
|
||||
|
||||
// @note(judah): is there a way to declare the type parameters
|
||||
// to allow 'type Value union.Of[...]' so users can define their
|
||||
// own methods?
|
||||
|
||||
// Of represents a union of different types.
|
||||
//
|
||||
// Since members are accessed by type instead of name,
|
||||
// T is expected to be a struct of types like so:
|
||||
//
|
||||
// type Value = union.Of[struct {
|
||||
// int32
|
||||
// uint32
|
||||
// float32
|
||||
// })
|
||||
type Of[T anystruct] struct {
|
||||
typ reflect.Type
|
||||
mem []byte
|
||||
}
|
||||
|
||||
func (u Of[T]) Size() uintptr {
|
||||
return mem.Sizeof[T]()
|
||||
}
|
||||
|
||||
// String returns the string representation of a union.
|
||||
func (u Of[T]) String() string {
|
||||
var b strings.Builder
|
||||
|
||||
fmt.Fprintf(&b, "union[%s] = ", reflect.TypeFor[T]().String())
|
||||
if u.typ == nil {
|
||||
b.WriteString("none")
|
||||
} else {
|
||||
b.WriteString(u.typ.String())
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Is returns true if the given type is currently stored in the union.
|
||||
func Is[E any, T anystruct](u Of[T]) bool {
|
||||
// Explicit invalid check to make sure invalid types don't result in false-positives.
|
||||
if u.typ == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return u.typ == reflect.TypeFor[E]()
|
||||
}
|
||||
|
||||
// Set overwrites the backing memory of a union with the given value; initializing the union if uninitialized.
|
||||
//
|
||||
// Set is unsafe and will not verify if the backing memory has enough capacity to store the value.
|
||||
// Use [SetSafe] for more safety checks.
|
||||
func Set[V any, T anystruct](u *Of[T], value V) {
|
||||
if u.mem == nil {
|
||||
u.mem = make([]byte, mem.Sizeof[T]())
|
||||
}
|
||||
|
||||
unsafe.Slice((*V)(unsafe.Pointer(&u.mem[0])), 1)[0] = value
|
||||
u.typ = reflect.TypeFor[V]()
|
||||
}
|
||||
|
||||
// SetSafe overwrites the backing memory of a union with the given value,
|
||||
// returning an error if the value cannot be stored in the union.
|
||||
//
|
||||
// Use [Set] for fewer safety checks.
|
||||
func SetSafe[V any, T anystruct](u *Of[T], value V) error {
|
||||
if u.mem == nil {
|
||||
u.mem = make([]byte, mem.Sizeof[T]())
|
||||
}
|
||||
|
||||
vt := reflect.TypeFor[V]()
|
||||
for _, field := range getInternalFields(*u) {
|
||||
if field.Type == vt {
|
||||
unsafe.Slice((*V)(unsafe.Pointer(&u.mem[0])), 1)[0] = value
|
||||
u.typ = reflect.TypeFor[V]()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("%s - %w", vt, ErrInvalidType)
|
||||
}
|
||||
|
||||
// Get returns the union's backing memory interpreted as a value of type V, panicking if the union is uninitialized.
|
||||
//
|
||||
// Get is unsafe and will not verify if the type exists within the union.
|
||||
// Use [GetSafe] for more safety checks.
|
||||
func Get[V any, T anystruct](u Of[T]) V {
|
||||
if u.mem == nil {
|
||||
panic(ErrUninitializedAccess)
|
||||
}
|
||||
|
||||
return unsafe.Slice((*V)(unsafe.Pointer(&u.mem[0])), 1)[0]
|
||||
}
|
||||
|
||||
// GetSafe returns the union's backing memory interpreted as a value of type V, returning an error if the type
|
||||
// does not exist within the union or the union is uninitialized.
|
||||
//
|
||||
// Use [Get] for fewer safety checks.
|
||||
func GetSafe[V any, T anystruct](u Of[T]) (V, error) {
|
||||
if u.mem == nil {
|
||||
return mem.ZeroValue[V](), ErrUninitializedAccess
|
||||
}
|
||||
|
||||
vt := reflect.TypeFor[V]()
|
||||
for _, field := range getInternalFields(u) {
|
||||
if field.Type == vt {
|
||||
return unsafe.Slice((*V)(unsafe.Pointer(&u.mem[0])), 1)[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
return mem.ZeroValue[V](), ErrInvalidType
|
||||
}
|
||||
|
||||
// getInternalFields returns an array of reflect.StructField belonging
|
||||
// to the internal type of a union.
|
||||
func getInternalFields[U Of[T], T anystruct](_ U) []reflect.StructField {
|
||||
backing := reflect.TypeFor[T]()
|
||||
if backing.Kind() != reflect.Struct {
|
||||
return nil
|
||||
}
|
||||
|
||||
var fields []reflect.StructField
|
||||
for i := range backing.NumField() {
|
||||
fields = append(fields, backing.Field(i))
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
|
@ -1,192 +0,0 @@
|
|||
package union_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.brut.systems/judah/xx/union"
|
||||
)
|
||||
|
||||
func TestUnion_BasicGetSet(t *testing.T) {
|
||||
type Numbers = union.Of[struct {
|
||||
uint8
|
||||
bool
|
||||
}]
|
||||
|
||||
var num Numbers
|
||||
union.Set[uint8](&num, 1)
|
||||
|
||||
b := union.Get[bool](num)
|
||||
if !b {
|
||||
t.Errorf("expected bool value to be true, was %v", b)
|
||||
}
|
||||
|
||||
union.Set(&num, false)
|
||||
|
||||
i := union.Get[uint8](num)
|
||||
if i != 0 {
|
||||
t.Errorf("expected uint8 value to be 0, was %v", i)
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
expr = union.Of[struct {
|
||||
binaryExpr
|
||||
intExpr
|
||||
floatExpr
|
||||
}]
|
||||
binaryExpr struct {
|
||||
Op string
|
||||
Lhs expr
|
||||
Rhs expr
|
||||
}
|
||||
intExpr int64
|
||||
floatExpr float64
|
||||
)
|
||||
|
||||
func TestUnion_OfStructs(t *testing.T) {
|
||||
makeInt := func(value int64) (e expr) {
|
||||
union.Set(&e, intExpr(value))
|
||||
return
|
||||
}
|
||||
makeFloat := func(value float64) (e expr) {
|
||||
union.Set(&e, floatExpr(value))
|
||||
return
|
||||
}
|
||||
makeBinop := func(op string, lhs, rhs expr) (e expr) {
|
||||
union.Set(&e, binaryExpr{
|
||||
Op: op,
|
||||
Lhs: lhs,
|
||||
Rhs: rhs,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
expr1 := makeBinop("+", makeInt(10), makeInt(20))
|
||||
bin1 := union.Get[binaryExpr](expr1)
|
||||
if bin1.Op != "+" {
|
||||
t.Errorf("incorrect op returned from union: %s", bin1.Op)
|
||||
}
|
||||
if lhs := union.Get[intExpr](bin1.Lhs); lhs != 10 {
|
||||
t.Errorf("incorrect lhs returned from union: %v", lhs)
|
||||
}
|
||||
if rhs := union.Get[intExpr](bin1.Rhs); rhs != 20 {
|
||||
t.Errorf("incorrect rhs returned from union: %v", rhs)
|
||||
}
|
||||
|
||||
expr2 := makeBinop("-", expr1, makeFloat(3.14))
|
||||
bin2 := union.Get[binaryExpr](expr2)
|
||||
if bin2.Op != "-" {
|
||||
t.Errorf("incorrect op returned from union of union: %s", bin2.Op)
|
||||
}
|
||||
if lhs := union.Get[binaryExpr](bin2.Lhs); lhs.Op != "+" {
|
||||
t.Errorf("incorrect lhs returned from union of union: %v", lhs)
|
||||
}
|
||||
if rhs := union.Get[floatExpr](bin2.Rhs); rhs != 3.14 {
|
||||
t.Errorf("incorrect rhs returned from union of union: %v", rhs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnion_OfPointers(t *testing.T) {
|
||||
type Value = union.Of[struct {
|
||||
*float64
|
||||
*uint64
|
||||
}]
|
||||
|
||||
var (
|
||||
original uint64 = 100
|
||||
value Value
|
||||
)
|
||||
|
||||
if union.Is[*uint64](value) || union.Is[*float64](value) {
|
||||
t.Error("union internal type was incorrect before usage")
|
||||
}
|
||||
|
||||
union.Set(&value, &original)
|
||||
|
||||
if !union.Is[*uint64](value) {
|
||||
t.Error("union internal type was incorrect after Set")
|
||||
}
|
||||
|
||||
fptr := union.Get[*float64](value)
|
||||
*fptr = 3.14
|
||||
|
||||
if original == 100 {
|
||||
t.Error("original value did not change")
|
||||
}
|
||||
|
||||
uptr := union.Get[*uint64](value)
|
||||
*uptr = 200
|
||||
|
||||
if *fptr == 3.14 {
|
||||
t.Error("float pointer value did not change after modification")
|
||||
}
|
||||
|
||||
if original != 200 {
|
||||
t.Errorf("original value was incorrect: %v", original)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnion_ToString(t *testing.T) {
|
||||
type (
|
||||
Struct = union.Of[struct {
|
||||
int32
|
||||
uint32
|
||||
}]
|
||||
Interface = union.Of[interface {
|
||||
Int()
|
||||
Bool()
|
||||
}]
|
||||
Bool = union.Of[bool]
|
||||
)
|
||||
|
||||
var (
|
||||
s Struct
|
||||
i Interface
|
||||
b Bool
|
||||
)
|
||||
|
||||
if s.String() != "union[none] { int32; uint32 }" {
|
||||
t.Errorf("valid union had invalid stringification: %s", s.String())
|
||||
}
|
||||
|
||||
if i.String() != b.String() {
|
||||
t.Errorf("invalid union had invalid stringification: %s, %s", i.String(), b.String())
|
||||
}
|
||||
|
||||
union.Set[int32](&s, 10)
|
||||
|
||||
if s.String() != "union[int32] { int32; uint32 }" {
|
||||
t.Errorf("valid union had invalid stringification after Set: %s", s.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnion_SafeUsage(t *testing.T) {
|
||||
type Value = union.Of[struct {
|
||||
int32
|
||||
uint32
|
||||
float32
|
||||
}]
|
||||
|
||||
var v Value
|
||||
if _, err := union.GetSafe[int32](v); err == nil {
|
||||
t.Errorf("GetSafe did not error for an uninitialized union")
|
||||
}
|
||||
|
||||
if err := union.SetSafe(&v, false); err == nil {
|
||||
t.Error("SetSafe allowed invalid type")
|
||||
}
|
||||
|
||||
if err := union.SetSafe[int32](&v, 10); err != nil {
|
||||
t.Errorf("SetSafe failed with valid type: %s", err)
|
||||
}
|
||||
|
||||
if _, err := union.GetSafe[bool](v); err == nil {
|
||||
t.Errorf("GetSafe allowed invalid type")
|
||||
}
|
||||
|
||||
if v, err := union.GetSafe[int32](v); err != nil {
|
||||
t.Errorf("GetSafe failed with valid type: %s", err)
|
||||
} else if v != 10 {
|
||||
t.Errorf("GetSafe returned invalid value: %v", v)
|
||||
}
|
||||
}
|
||||
49
utils.go
Normal file
49
utils.go
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
package xx
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// New returns a newly allocated value with an initial value.
|
||||
func New[T any](expr T) *T {
|
||||
p := new(T)
|
||||
*p = expr
|
||||
return p
|
||||
}
|
||||
|
||||
// Bitcast performs a bit conversion between two types of the same size.
|
||||
//
|
||||
// Bitcast panics if the sizes of the types differ.
|
||||
func Bitcast[TOut any, TIn any](value TIn) TOut {
|
||||
if SizeOf[TOut]() != SizeOf[TIn]() {
|
||||
panic("bitcast: sizes of types must match")
|
||||
}
|
||||
return *((*TOut)(unsafe.Pointer(&value)))
|
||||
}
|
||||
|
||||
// Copy copies src number of bytes into dst.
|
||||
// Returns dst.
|
||||
//
|
||||
// Copy panics if src is smaller than dst.
|
||||
func Copy[TDst any, TSrc any](dst *TDst, src *TSrc) *TDst {
|
||||
if SizeOf[TSrc]() < SizeOf[TDst]() {
|
||||
panic("copy: size of src must be >= dst")
|
||||
}
|
||||
MemCopy(unsafe.Pointer(dst), unsafe.Pointer(src), SizeOf[TDst]())
|
||||
return dst
|
||||
}
|
||||
|
||||
// MemCopy copies size number of bytes from src into dst.
|
||||
// Returns dst.
|
||||
func MemCopy(dst, src unsafe.Pointer, size uintptr) unsafe.Pointer {
|
||||
copy(unsafe.Slice((*byte)(dst), size), unsafe.Slice((*byte)(src), size))
|
||||
return dst
|
||||
}
|
||||
|
||||
// SizeOf returns the size in bytes of the given type.
|
||||
//
|
||||
// Not to be confused with [unsafe.Sizeof] which returns the size of an expression.
|
||||
func SizeOf[T any]() uintptr {
|
||||
var zero T
|
||||
return unsafe.Sizeof(zero)
|
||||
}
|
||||
62
utils_test.go
Normal file
62
utils_test.go
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
package xx_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
a := xx.New(uint32(1024))
|
||||
if *a != 1024 {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if unsafe.Sizeof(*a) != xx.SizeOf[uint32]() {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
b := xx.New(struct{ x, y, z float32 }{10, 20, 30})
|
||||
if b.x != 10 {
|
||||
t.Fail()
|
||||
}
|
||||
if b.y != 20 {
|
||||
t.Fail()
|
||||
}
|
||||
if b.z != 30 {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
c := xx.New(b)
|
||||
if c == &b {
|
||||
t.Fail()
|
||||
}
|
||||
if (*c).x != 10 {
|
||||
t.Fail()
|
||||
}
|
||||
if (*c).y != 20 {
|
||||
t.Fail()
|
||||
}
|
||||
if (*c).z != 30 {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestBitcast(t *testing.T) {
|
||||
a := uint32(0xFFFF_FFFF)
|
||||
b := xx.Bitcast[float32](a)
|
||||
c := xx.Bitcast[uint32](b)
|
||||
if a != c {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
d := xx.Bitcast[int8](uint8(0xFF))
|
||||
if d != -1 {
|
||||
t.Fail()
|
||||
}
|
||||
e := xx.Bitcast[uint8](d)
|
||||
if e != 255 {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
60
xx.go
60
xx.go
|
|
@ -1,60 +0,0 @@
|
|||
package xx
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
// Copy copies src number of bytes into dst.
|
||||
// Returns dst.
|
||||
//
|
||||
// Copy panics if src is smaller than dst.
|
||||
func Copy[TDst any, TSrc any](dst *TDst, src *TSrc) *TDst {
|
||||
if mem.Sizeof[TSrc]() < mem.Sizeof[TDst]() {
|
||||
panic("copy: size of src must be >= dst")
|
||||
}
|
||||
mem.Copy(unsafe.Pointer(dst), unsafe.Pointer(src), mem.Sizeof[TDst]())
|
||||
return dst
|
||||
}
|
||||
|
||||
// Clone returns a newly allocated shallow copy of the given value.
|
||||
func Clone[T any](value *T) *T {
|
||||
return Copy(new(T), value)
|
||||
}
|
||||
|
||||
// BoolUint converts a boolean to an integer.
|
||||
func BoolUint(b bool) uint {
|
||||
return uint(*(*uint8)(unsafe.Pointer(&b)))
|
||||
}
|
||||
|
||||
// CallerLocation returns the source location of the function CallerLocation is called in.
|
||||
func CallerLocation() (file string, line int) {
|
||||
_, file, line, _ = runtime.Caller(2)
|
||||
|
||||
// @todo: I'm sure there's a better way to do this
|
||||
// Special-case when CallerLocation is called from main
|
||||
if strings.Contains(file, "runtime") && strings.Contains(file, "proc.go") {
|
||||
_, file, line, _ = runtime.Caller(1)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashLocation returns a hash of file and line, most likely returned from [CallerLocation].
|
||||
func HashLocation(file string, line int) uint64 {
|
||||
const (
|
||||
FNV64_PRIME uint64 = 0x100000001B3
|
||||
FNV64_BIAS uint64 = 0xCBF29CE484222325
|
||||
)
|
||||
|
||||
h := FNV64_BIAS
|
||||
for _, c := range file {
|
||||
h = (h ^ uint64(c)) * FNV64_PRIME
|
||||
}
|
||||
|
||||
h = (h ^ uint64(line)) * FNV64_PRIME
|
||||
return h
|
||||
}
|
||||
Loading…
Reference in a new issue