Compare commits
No commits in common. "master" and "main" have entirely different histories.
32 changed files with 112 additions and 2412 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
|
@ -1,2 +0,0 @@
|
|||
.DS_Store
|
||||
.idea/
|
||||
3
README
3
README
|
|
@ -1,3 +0,0 @@
|
|||
XX
|
||||
|
||||
Random experiments in Go.
|
||||
|
|
@ -1,91 +0,0 @@
|
|||
package arena
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
// New returns a pointer to an Arena allocated value of type T.
|
||||
// If allocation fails, New will panic.
|
||||
//
|
||||
// Note: Accessing the returned value after calling Reset is unsafe and may result in a fault.
|
||||
func New[T any](arena Arena) *T {
|
||||
ptr, err := arena(ACTION_ALLOC, mem.Sizeof[T](), mem.Alignof[T](), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return (*T)(ptr)
|
||||
}
|
||||
|
||||
// MakeSlice creates an Arena allocated []T with the given capacity and length.
|
||||
// If allocation fails, MakeSlice will panic.
|
||||
//
|
||||
// Note: Accessing the returned slice after calling Reset is unsafe and may result in a fault.
|
||||
func MakeSlice[T any](arena Arena, len, cap int) []T {
|
||||
ptr, err := arena(ACTION_ALLOC, mem.Sizeof[T]()*uintptr(cap), mem.Alignof[T](), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if len == -1 {
|
||||
len = cap
|
||||
}
|
||||
|
||||
return unsafe.Slice((*T)(ptr), cap)[:len]
|
||||
}
|
||||
|
||||
// Reset restores an Arena to its initial state.
|
||||
//
|
||||
// Note: Accessing memory returned by an Arena after calling Reset is unsafe and may result in a fault.
|
||||
func Reset(arena Arena) {
|
||||
if _, err := arena(ACTION_RESET, 0, 0, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Save returns the restorable state of an Arena.
|
||||
// The returned value is internal to the particular Arena and should not be modified.
|
||||
func Save(arena Arena) (watermark uintptr) {
|
||||
if _, err := arena(ACTION_SAVE, 0, 0, &watermark); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Restore restores an Arena to a previously saved state.
|
||||
func Restore(arena Arena, watermark uintptr) {
|
||||
if _, err := arena(ACTION_RESTORE, 0, 0, &watermark); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Arena represents a memory allocator.
|
||||
type Arena func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error)
|
||||
|
||||
// Action is a list of distinct events an Arena may respond to.
|
||||
type Action int
|
||||
|
||||
const (
|
||||
ACTION_ALLOC Action = iota
|
||||
ACTION_RESET
|
||||
ACTION_SAVE
|
||||
ACTION_RESTORE
|
||||
)
|
||||
|
||||
func (a Action) String() string {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
return "ALLOC"
|
||||
case ACTION_RESET:
|
||||
return "RESET"
|
||||
case ACTION_SAVE:
|
||||
return "SAVE"
|
||||
case ACTION_RESTORE:
|
||||
return "RESTORE"
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
package arena_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.brut.systems/judah/xx/arena"
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
"git.brut.systems/judah/xx/testx"
|
||||
)
|
||||
|
||||
func TestMakeSlice(t *testing.T) {
|
||||
a := arena.Linear(1024 * mem.Kilobyte)
|
||||
defer arena.Reset(a)
|
||||
|
||||
s := arena.MakeSlice[int](a, 99, 100)
|
||||
testx.Expect(t, len(s) == 99, "len = %d, expected 99", len(s))
|
||||
testx.Expect(t, cap(s) == 100, "cap = %d, expected 100", cap(s))
|
||||
|
||||
p := &s[0]
|
||||
|
||||
s[2] = 0xCAFE_DECAF
|
||||
s = append(s, 2)
|
||||
|
||||
testx.Expect(t, p == &s[0], "p = %p, expected %p", p, &s[0])
|
||||
|
||||
p = &s[0]
|
||||
s = append(s, 3) // cause a reallocation
|
||||
|
||||
testx.Expect(t, p != &s[0], "p = %p, expected %p", p, &s[0])
|
||||
}
|
||||
365
arena/arenas.go
365
arena/arenas.go
|
|
@ -1,365 +0,0 @@
|
|||
package arena
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/bits"
|
||||
"runtime"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
// Fixed is a simple bump allocator that uses the given buffer.
|
||||
//
|
||||
// Fixed will NOT resize when it runs out of memory.
|
||||
func Fixed(data []byte) Arena {
|
||||
if len(data) == 0 || len(data) != cap(data) {
|
||||
panic("fixed: length & capacity must be equal and greater than zero")
|
||||
}
|
||||
|
||||
var offset uintptr
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
aligned := mem.AlignForward(size, align)
|
||||
if offset+aligned > uintptr(cap(data)) {
|
||||
return nil, errors.New("fixed: out of memory")
|
||||
}
|
||||
|
||||
ptr := &data[offset]
|
||||
offset += aligned
|
||||
return unsafe.Pointer(ptr), nil
|
||||
case ACTION_RESET:
|
||||
clear(data)
|
||||
offset = 0
|
||||
case ACTION_SAVE:
|
||||
if watermark == nil {
|
||||
return nil, errors.New("fixed: cannot save to nil watermark")
|
||||
}
|
||||
|
||||
*watermark = offset
|
||||
case ACTION_RESTORE:
|
||||
if watermark == nil {
|
||||
return nil, errors.New("fixed: cannot restore nil watermark")
|
||||
}
|
||||
|
||||
clear(data[*watermark:offset])
|
||||
offset = *watermark
|
||||
default:
|
||||
panic("fixed: unimplemented action - " + a.String())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Linear is a simple bump allocator with a fixed amount of backing memory.
|
||||
func Linear(capacity_in_bytes uintptr) Arena {
|
||||
return Fixed(make([]byte, capacity_in_bytes))
|
||||
}
|
||||
|
||||
// Ring is an Arena that only allocates values of the given type.
|
||||
// When capacity is exceeded, previous allocations will be reused to accommodate new ones
|
||||
//
|
||||
// Note: Allocating different types from the same Pool is unsafe and may cause memory corruption.
|
||||
func Ring[T any](capacity uintptr) Arena {
|
||||
if capacity <= 0 {
|
||||
panic("pool: capacity must be greater than zero")
|
||||
}
|
||||
|
||||
pointers := make([]T, 0, capacity)
|
||||
return func(a Action, _, _ uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
if len(pointers) == cap(pointers) {
|
||||
pointers = pointers[:0]
|
||||
}
|
||||
|
||||
pointers = append(pointers, mem.ZeroValue[T]())
|
||||
return unsafe.Pointer(&pointers[len(pointers)-1]), nil
|
||||
case ACTION_RESET:
|
||||
clear(pointers)
|
||||
pointers = pointers[:0]
|
||||
case ACTION_SAVE:
|
||||
if watermark == nil {
|
||||
return nil, errors.New("pool: cannot save to nil watermark")
|
||||
}
|
||||
|
||||
*watermark = uintptr(len(pointers))
|
||||
case ACTION_RESTORE:
|
||||
if watermark == nil {
|
||||
return nil, errors.New("pool: cannot restore nil watermark")
|
||||
}
|
||||
|
||||
clear(pointers[*watermark:])
|
||||
pointers = pointers[:*watermark]
|
||||
default:
|
||||
panic("pool: unimplemented action - " + a.String())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Chunked is an Arena that groups allocations by size.
|
||||
func Chunked(max_allocs_per_chunk uintptr) Arena {
|
||||
type chunk struct {
|
||||
data []byte
|
||||
offset uintptr
|
||||
saved uintptr
|
||||
}
|
||||
|
||||
groups := make([][]chunk, 64)
|
||||
return func(a Action, size, align uintptr, _ *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
aligned := mem.AlignForward(size, align)
|
||||
if aligned == 0 {
|
||||
aligned = 1
|
||||
}
|
||||
aligned = 1 << bits.Len(uint(aligned-1))
|
||||
|
||||
idx := bits.TrailingZeros(uint(aligned))
|
||||
if idx >= len(groups) {
|
||||
groups = append(groups, make([][]chunk, idx-len(groups)+1)...)
|
||||
}
|
||||
|
||||
group := groups[idx]
|
||||
if len(group) == 0 {
|
||||
group = append(group, chunk{
|
||||
data: make([]byte, aligned*max_allocs_per_chunk),
|
||||
})
|
||||
}
|
||||
|
||||
c := &group[len(group)-1]
|
||||
if c.offset+aligned > uintptr(len(c.data)) {
|
||||
group = append(group, chunk{
|
||||
data: make([]byte, aligned*max_allocs_per_chunk),
|
||||
})
|
||||
|
||||
c = &group[len(group)-1]
|
||||
}
|
||||
|
||||
ptr := &c.data[c.offset]
|
||||
c.offset += aligned
|
||||
groups[idx] = group
|
||||
|
||||
return unsafe.Pointer(ptr), nil
|
||||
case ACTION_RESET:
|
||||
for _, g := range groups {
|
||||
for i := range len(g) {
|
||||
g[i].offset = 0
|
||||
g[i].saved = 0
|
||||
clear(g[i].data)
|
||||
}
|
||||
}
|
||||
case ACTION_SAVE:
|
||||
for _, g := range groups {
|
||||
for i := range len(g) {
|
||||
g[i].saved = g[i].offset
|
||||
}
|
||||
}
|
||||
case ACTION_RESTORE:
|
||||
for _, g := range groups {
|
||||
for i := range len(g) {
|
||||
g[i].offset = g[i].saved
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic("chunked: unimplemented action - " + a.String())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Paged is a linear arena that allocates pages of virtual memory.
|
||||
// The memory allocated is only committed to physical memory as it is used,
|
||||
// so total_reserved_in_bytes should is the total amount of addressable memory to reserve.
|
||||
//
|
||||
// Note: resetting a Paged arena will cause the currently commited memory to be decommited (i.e. unmapped from physical memory).
|
||||
func Paged(page_size, total_reserved_in_bytes uintptr) Arena {
|
||||
var (
|
||||
committed uintptr
|
||||
offset uintptr
|
||||
)
|
||||
|
||||
// @note(judah): Because, runtime.AddCleanup requires a Go-allocated
|
||||
// pointer, we need to keep *something* around so we don't leak.
|
||||
type cleanupwrapper struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
data, err := mem.Reserve(total_reserved_in_bytes)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("paged: failed to reserve address space - %s", err))
|
||||
}
|
||||
|
||||
base := new(cleanupwrapper{data})
|
||||
|
||||
// @todo(judah): is this needed?
|
||||
runtime.AddCleanup(base, func(memory []byte) {
|
||||
if err := mem.Release(memory); err != nil {
|
||||
panic(fmt.Sprintf("paged: failed to release memory - %s", err))
|
||||
}
|
||||
}, data)
|
||||
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
aligned := mem.AlignForward(size, align)
|
||||
if offset+aligned > total_reserved_in_bytes {
|
||||
return nil, errors.New("paged: out of addressable memory")
|
||||
}
|
||||
|
||||
if offset+aligned > committed {
|
||||
required := offset + aligned
|
||||
to_commit := mem.AlignForward(required, page_size)
|
||||
|
||||
if err := mem.Commit(base.data[committed:to_commit], mem.AccessRead|mem.AccessWrite); err != nil {
|
||||
return nil, fmt.Errorf("paged: failed to commit memory - %w", err)
|
||||
}
|
||||
|
||||
committed = to_commit
|
||||
}
|
||||
|
||||
ptr := &base.data[offset]
|
||||
offset += aligned
|
||||
return unsafe.Pointer(ptr), nil
|
||||
|
||||
case ACTION_RESET:
|
||||
if committed > 0 {
|
||||
if err := mem.Decommit(base.data[:mem.AlignForward(committed, page_size)]); err != nil {
|
||||
return nil, fmt.Errorf("paged: failed to decommit memory - %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
offset = 0
|
||||
committed = 0
|
||||
return nil, nil
|
||||
|
||||
// @todo(judah): should save/restore also decommit memory?
|
||||
|
||||
case ACTION_SAVE:
|
||||
if watermark == nil {
|
||||
return nil, errors.New("paged: cannot save to nil watermark")
|
||||
}
|
||||
|
||||
*watermark = offset
|
||||
return nil, nil
|
||||
|
||||
case ACTION_RESTORE:
|
||||
if watermark == nil {
|
||||
return nil, errors.New("paged: cannot restore nil watermark")
|
||||
}
|
||||
|
||||
clear(base.data[*watermark:offset])
|
||||
offset = *watermark
|
||||
|
||||
return nil, nil
|
||||
|
||||
default:
|
||||
panic("paged: unimplemented action - " + a.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Nil is an Arena that always returns an error.
|
||||
//
|
||||
// Note: This is useful for tracking usage locations
|
||||
func Nil() Arena {
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
return nil, errors.New("use of nil allocator")
|
||||
}
|
||||
}
|
||||
|
||||
// Region wraps an Arena, restoring it to its previous state when Reset is called.
|
||||
func Region(arena Arena) Arena {
|
||||
watermark := Save(arena)
|
||||
return func(a Action, size, align uintptr, wm *uintptr) (unsafe.Pointer, error) {
|
||||
if a == ACTION_RESET {
|
||||
Restore(arena, watermark)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return arena(a, size, align, wm)
|
||||
}
|
||||
}
|
||||
|
||||
// Split wraps two [[Arena]]s, dispatching allocations to a particular one based on the requested size.
|
||||
func Split(split_size uintptr, smaller, larger Arena) Arena {
|
||||
var watermarks [2]uintptr
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
if size <= split_size {
|
||||
return smaller(a, size, align, watermark)
|
||||
}
|
||||
return larger(a, size, align, watermark)
|
||||
case ACTION_RESET:
|
||||
Reset(smaller)
|
||||
Reset(larger)
|
||||
case ACTION_SAVE:
|
||||
watermarks[0] = Save(smaller)
|
||||
watermarks[1] = Save(larger)
|
||||
case ACTION_RESTORE:
|
||||
Restore(smaller, watermarks[0])
|
||||
Restore(larger, watermarks[1])
|
||||
default:
|
||||
panic("split: unimplemented action - " + a.String())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Logger wraps an Arena, logging its usage locations.
|
||||
func Logger(arena Arena) Arena {
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
// We expect allocators to be used via the high-level API, so we grab the caller location relative to that.
|
||||
// @todo(judah): can we determine this dynamically?
|
||||
_, file, line, ok := runtime.Caller(2)
|
||||
if !ok {
|
||||
file = "<unknown>"
|
||||
line = 0
|
||||
}
|
||||
|
||||
log.Printf("%s:%d - %s (size: %d, align: %d, watermark: %p)", file, line, a, size, align, watermark)
|
||||
return arena(a, size, align, watermark)
|
||||
}
|
||||
}
|
||||
|
||||
// Concurrent wraps an Arena, ensuring it is safe for concurrent use.
|
||||
func Concurrent(arena Arena) Arena {
|
||||
mtx := new(sync.Mutex)
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
mtx.Lock()
|
||||
ptr, err := arena(a, size, align, watermark)
|
||||
mtx.Unlock()
|
||||
return ptr, err
|
||||
}
|
||||
}
|
||||
|
||||
// Pinned wraps an Arena, ensuring the memory returned is stable until Reset is called.
|
||||
//
|
||||
// The memory returned by Pinned is safe to pass over cgo boundaries.
|
||||
func Pinned(arena Arena) Arena {
|
||||
var pinner runtime.Pinner
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
ptr, err := arena(a, size, align, watermark)
|
||||
if err != nil {
|
||||
return ptr, err
|
||||
}
|
||||
|
||||
if a == ACTION_RESET {
|
||||
pinner.Unpin()
|
||||
} else {
|
||||
pinner.Pin(ptr)
|
||||
}
|
||||
|
||||
return ptr, err
|
||||
}
|
||||
}
|
||||
|
|
@ -1,279 +0,0 @@
|
|||
package arena_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/arena"
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
func BenchmarkAlloc_New_Small(b *testing.B) {
|
||||
var last *int
|
||||
for i := range b.N {
|
||||
v := new(int)
|
||||
*v = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
last = nil
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Closure_Small(b *testing.B) {
|
||||
alloc := arena.Ring[int](16)
|
||||
|
||||
var last *int
|
||||
for i := range b.N {
|
||||
v := arena.New[int](alloc)
|
||||
*v = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
arena.Reset(alloc)
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Interface_Small(b *testing.B) {
|
||||
alloc := NewLinear(16 * mem.Kilobyte)
|
||||
|
||||
var last *int
|
||||
for i := range b.N {
|
||||
v := New[int](alloc)
|
||||
*v = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
Reset(alloc)
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
type large struct{ a, b, c, d, e, f, g, h, i int }
|
||||
|
||||
func BenchmarkAlloc_New_Large(b *testing.B) {
|
||||
var last *large
|
||||
for i := range b.N {
|
||||
v := new(large)
|
||||
v.e = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
last = nil
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Closure_Large(b *testing.B) {
|
||||
alloc := arena.Linear(128 * mem.Kilobyte)
|
||||
|
||||
var last *large
|
||||
for i := range b.N {
|
||||
v := arena.New[large](alloc)
|
||||
v.e = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
arena.Reset(alloc)
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Interface_Large(b *testing.B) {
|
||||
alloc := NewLinear(128 * mem.Kilobyte)
|
||||
|
||||
var last *large
|
||||
for i := range b.N {
|
||||
v := New[large](alloc)
|
||||
v.e = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
Reset(alloc)
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(last)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Closure_HotPath(b *testing.B) {
|
||||
alloc := arena.Linear(256)
|
||||
|
||||
var (
|
||||
lastlarge *large
|
||||
lastsmall *int
|
||||
)
|
||||
for i := range b.N {
|
||||
if i%2 == 0 {
|
||||
lastsmall = arena.New[int](alloc)
|
||||
} else {
|
||||
lastlarge = arena.New[large](alloc)
|
||||
}
|
||||
|
||||
arena.Reset(alloc)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(lastlarge)
|
||||
runtime.KeepAlive(lastsmall)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Interface_HotPath(b *testing.B) {
|
||||
alloc := NewLinear(256)
|
||||
|
||||
var (
|
||||
lastlarge *large
|
||||
lastsmall *int
|
||||
)
|
||||
for i := range b.N {
|
||||
if i%2 == 0 {
|
||||
lastsmall = New[int](alloc)
|
||||
} else {
|
||||
lastlarge = New[large](alloc)
|
||||
}
|
||||
|
||||
Reset(alloc)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(lastlarge)
|
||||
runtime.KeepAlive(lastsmall)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Closure_Wrapped(b *testing.B) {
|
||||
alloc := arena.Pinned(arena.Pinned(arena.Pinned(arena.Linear(256))))
|
||||
|
||||
var (
|
||||
lastlarge *large
|
||||
lastsmall *int
|
||||
)
|
||||
for i := range b.N {
|
||||
if i%2 == 0 {
|
||||
lastsmall = arena.New[int](alloc)
|
||||
} else {
|
||||
lastlarge = arena.New[large](alloc)
|
||||
}
|
||||
|
||||
arena.Reset(alloc)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(lastlarge)
|
||||
runtime.KeepAlive(lastsmall)
|
||||
}
|
||||
|
||||
func BenchmarkAlloc_Interface_Wrapped(b *testing.B) {
|
||||
alloc := NewPinned(NewPinned(NewPinned(NewLinear(256))))
|
||||
|
||||
var (
|
||||
lastlarge *large
|
||||
lastsmall *int
|
||||
)
|
||||
for i := range b.N {
|
||||
if i%2 == 0 {
|
||||
lastsmall = New[int](alloc)
|
||||
} else {
|
||||
lastlarge = New[large](alloc)
|
||||
}
|
||||
|
||||
Reset(alloc)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(lastlarge)
|
||||
runtime.KeepAlive(lastsmall)
|
||||
}
|
||||
|
||||
type Allocator interface {
|
||||
Proc(a arena.Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error)
|
||||
}
|
||||
|
||||
func New[T any](a Allocator) *T {
|
||||
ptr, err := a.Proc(arena.ACTION_ALLOC, mem.Sizeof[T](), mem.Alignof[T](), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return (*T)(ptr)
|
||||
}
|
||||
|
||||
func Reset(a Allocator) {
|
||||
if _, err := a.Proc(arena.ACTION_RESET, 0, 0, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
type Linear struct {
|
||||
data []byte
|
||||
maxsize uintptr
|
||||
offset uintptr
|
||||
}
|
||||
|
||||
func NewLinear(maxsize uintptr) *Linear {
|
||||
return &Linear{
|
||||
data: make([]byte, maxsize),
|
||||
maxsize: maxsize,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Linear) Proc(a arena.Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case arena.ACTION_ALLOC:
|
||||
aligned := mem.AlignForward(size, align)
|
||||
if l.offset+aligned > l.maxsize {
|
||||
return nil, errors.New("linear: out of memory")
|
||||
}
|
||||
|
||||
ptr := &l.data[l.offset]
|
||||
l.offset += aligned
|
||||
return unsafe.Pointer(ptr), nil
|
||||
|
||||
case arena.ACTION_RESET:
|
||||
clear(l.data)
|
||||
l.offset = 0
|
||||
|
||||
case arena.ACTION_SAVE:
|
||||
*watermark = l.offset
|
||||
case arena.ACTION_RESTORE:
|
||||
l.offset = *watermark
|
||||
|
||||
default:
|
||||
panic("unimplemented action: " + a.String())
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type Pinned struct {
|
||||
arena Allocator
|
||||
pinner runtime.Pinner
|
||||
}
|
||||
|
||||
func NewPinned(arena Allocator) *Pinned {
|
||||
return &Pinned{arena: arena}
|
||||
}
|
||||
|
||||
func (p *Pinned) Proc(a arena.Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
ptr, err := p.arena.Proc(a, size, align, watermark)
|
||||
if err != nil {
|
||||
return ptr, err
|
||||
}
|
||||
|
||||
if a == arena.ACTION_RESET {
|
||||
p.pinner.Unpin()
|
||||
} else {
|
||||
p.pinner.Pin(ptr)
|
||||
}
|
||||
|
||||
return ptr, err
|
||||
}
|
||||
|
|
@ -1,122 +0,0 @@
|
|||
package arena_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/arena"
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
"git.brut.systems/judah/xx/testx"
|
||||
)
|
||||
|
||||
func TestArenas_ThatShouldPanicWhenOOM(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(1),
|
||||
arena.Nil(),
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
testx.ShouldPanic(t, func() {
|
||||
_ = arena.New[int](a)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestArenas_ThatShouldClearAfterReset(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(16),
|
||||
arena.Chunked(16),
|
||||
arena.Ring[uint16](2),
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
x := arena.New[uint16](a)
|
||||
y := arena.New[uint16](a)
|
||||
*x, *y = 100, 200
|
||||
arena.Reset(a)
|
||||
|
||||
testx.Expect(t, *x == 0, "x = %d, expected 0", *x)
|
||||
testx.Expect(t, *y == 0, "y = %d, expected 0", *y)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArenas_ThatShouldReuseMemoryAfterReset(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(16),
|
||||
arena.Chunked(16),
|
||||
arena.Ring[uint16](2),
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
x1 := arena.New[uint16](a)
|
||||
y1 := arena.New[uint16](a)
|
||||
|
||||
arena.Reset(a)
|
||||
|
||||
x2 := arena.New[uint16](a)
|
||||
y2 := arena.New[uint16](a)
|
||||
|
||||
testx.Expect(t, x1 == x2, "x1 = %p, x2 = %p", x1, x2)
|
||||
testx.Expect(t, y1 == y2, "y1 = %p, y2 = %p", y1, y2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArenas_WithRegion(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(256),
|
||||
arena.Chunked(256),
|
||||
arena.Ring[uint16](16),
|
||||
}
|
||||
|
||||
var baseptrs []*uint16
|
||||
for i, a := range arenas {
|
||||
v := arena.New[uint16](a)
|
||||
*v = uint16(i)
|
||||
baseptrs = append(baseptrs, v)
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
a := arena.Region(a)
|
||||
for range 10 {
|
||||
_ = arena.New[uint16](a)
|
||||
}
|
||||
arena.Reset(a)
|
||||
}
|
||||
|
||||
for i, a := range arenas {
|
||||
testx.Expect(t, *baseptrs[i] == uint16(i), "baseptrs[%d] = %d, expected %d", i, *baseptrs[i], i)
|
||||
|
||||
base := uintptr(unsafe.Pointer(baseptrs[i]))
|
||||
next := uintptr(unsafe.Pointer(arena.New[uint16](a)))
|
||||
testx.Expect(t, next-base == mem.Sizeof[uint16](), "delta was %d", next-base)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrent(t *testing.T) {
|
||||
a := arena.Concurrent(arena.Linear(16))
|
||||
|
||||
base, err := a(arena.ACTION_ALLOC, 0, 1, nil)
|
||||
testx.Expect(t, err == nil, "ACTION_ALLOC failed: %v", err)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Go(func() {
|
||||
_ = arena.New[uint8](a)
|
||||
_ = arena.New[uint8](a)
|
||||
_ = arena.New[uint8](a)
|
||||
_ = arena.New[uint8](a)
|
||||
})
|
||||
|
||||
wg.Go(func() {
|
||||
_ = arena.New[uint16](a)
|
||||
_ = arena.New[uint16](a)
|
||||
_ = arena.New[uint16](a)
|
||||
_ = arena.New[uint16](a)
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
|
||||
after, err := a(arena.ACTION_ALLOC, 0, 1, nil)
|
||||
testx.Expect(t, err == nil, "ACTION_ALLOC failed: %v", err)
|
||||
testx.Expect(t, uintptr(after)-uintptr(base) == 12, "diff is: %v", uintptr(after)-uintptr(base))
|
||||
}
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
//go:build !XX_DISABLE_ASSERT
|
||||
|
||||
package xx
|
||||
|
||||
func Assert(cond bool) {
|
||||
if !cond {
|
||||
panic("assertion failed")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
//go:build XX_DISABLE_ASSERT
|
||||
|
||||
package xx
|
||||
|
||||
func Assert(cond bool) {}
|
||||
|
||||
func Unreachable() {}
|
||||
|
|
@ -1,112 +0,0 @@
|
|||
package bucket
|
||||
|
||||
import (
|
||||
"iter"
|
||||
)
|
||||
|
||||
const DefaultElementsPerBucket = 32
|
||||
|
||||
// Array is a resizable array whose values will never move in memory.
|
||||
// This means it is safe to take a pointer to a value within the array
|
||||
// while continuing to append to it.
|
||||
type Array[T any] struct {
|
||||
last int
|
||||
elements_per_bucket int
|
||||
buckets []bucket[T]
|
||||
}
|
||||
|
||||
func (s *Array[T]) Init() {
|
||||
s.InitWithCapacity(DefaultElementsPerBucket)
|
||||
}
|
||||
|
||||
func (s *Array[T]) InitWithCapacity(elements_per_bucket int) {
|
||||
if elements_per_bucket <= 0 {
|
||||
elements_per_bucket = DefaultElementsPerBucket
|
||||
}
|
||||
|
||||
s.elements_per_bucket = elements_per_bucket
|
||||
|
||||
s.buckets = s.buckets[:0]
|
||||
s.buckets = append(s.buckets, make(bucket[T], 0, s.elements_per_bucket))
|
||||
s.last = 0
|
||||
}
|
||||
|
||||
func (s *Array[T]) Reset() {
|
||||
s.buckets = s.buckets[:0]
|
||||
s.last = 0
|
||||
}
|
||||
|
||||
func (s *Array[T]) Append(value T) *T {
|
||||
if len(s.buckets) == 0 {
|
||||
s.Init()
|
||||
}
|
||||
|
||||
if len(s.buckets[s.last]) == cap(s.buckets[s.last]) {
|
||||
s.buckets = append(s.buckets, make(bucket[T], 0, s.elements_per_bucket))
|
||||
s.last += 1
|
||||
}
|
||||
|
||||
s.buckets[s.last] = append(s.buckets[s.last], value)
|
||||
return &s.buckets[s.last][len(s.buckets[s.last])-1]
|
||||
}
|
||||
|
||||
func (s *Array[T]) AppendMany(values ...T) (first *T) {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
first = s.Append(values[0])
|
||||
|
||||
if len(values) > 1 {
|
||||
for _, v := range values[1:] {
|
||||
s.Append(v)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Array[T]) Get(index int) *T {
|
||||
b := s.buckets[index/s.elements_per_bucket]
|
||||
return &b[index%s.elements_per_bucket]
|
||||
}
|
||||
|
||||
func (s *Array[T]) Set(index int, value T) {
|
||||
*s.Get(index) = value
|
||||
}
|
||||
|
||||
func (s *Array[T]) Len() int {
|
||||
return s.Cap() - (cap(s.buckets[s.last]) - len(s.buckets[s.last]))
|
||||
}
|
||||
|
||||
func (s *Array[T]) Cap() int {
|
||||
return len(s.buckets) * s.elements_per_bucket
|
||||
}
|
||||
|
||||
func (s *Array[T]) Pointers() iter.Seq2[int, *T] {
|
||||
return func(yield func(int, *T) bool) {
|
||||
for bi := range s.buckets {
|
||||
startIdx := bi * s.elements_per_bucket
|
||||
for i := range s.buckets[bi] {
|
||||
if !yield(startIdx+i, &s.buckets[bi][i]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Array[T]) Values() iter.Seq2[int, T] {
|
||||
return func(yield func(int, T) bool) {
|
||||
for bi, b := range s.buckets {
|
||||
startIdx := bi * s.elements_per_bucket
|
||||
for i := range b {
|
||||
if !yield(startIdx+i, b[i]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type bucket[T any] = []T
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
package bucket_test
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"git.brut.systems/judah/xx/containerx/bucket"
|
||||
"git.brut.systems/judah/xx/testx"
|
||||
)
|
||||
|
||||
func TestArray_StableWithGC(t *testing.T) {
|
||||
type valuewithptr struct {
|
||||
value int
|
||||
ptr *int
|
||||
}
|
||||
|
||||
var arr bucket.Array[valuewithptr]
|
||||
aptr := arr.Append(valuewithptr{value: 10, ptr: nil})
|
||||
bptr := arr.Append(valuewithptr{value: 20, ptr: &aptr.value})
|
||||
|
||||
const N = 1000
|
||||
for i := range N {
|
||||
arr.Append(valuewithptr{value: i})
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
testx.Expect(t, arr.Get(0) == aptr)
|
||||
testx.Expect(t, arr.Get(1) == bptr)
|
||||
testx.Expect(t, arr.Len() == N+2, "len was %d", arr.Len())
|
||||
testx.Expect(t, bptr.ptr != nil && bptr.value == 20)
|
||||
testx.Expect(t, bptr.ptr == &aptr.value, "%p vs. %p", bptr.ptr, &aptr.value)
|
||||
}
|
||||
|
||||
func BenchmarkArray_RandomAccess(b *testing.B) {
|
||||
var arr bucket.Array[int]
|
||||
for i := range b.N {
|
||||
arr.Append(i * i)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := range b.N {
|
||||
arr.Get(i % b.N)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkArray_Append(b *testing.B) {
|
||||
var arr bucket.Array[int]
|
||||
for i := range b.N {
|
||||
arr.Append(i * i)
|
||||
}
|
||||
|
||||
arr.Reset()
|
||||
for i := range b.N {
|
||||
arr.Append(i * i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkArray_Iteration(b *testing.B) {
|
||||
var arr bucket.Array[int]
|
||||
for i := range b.N {
|
||||
arr.Append(i * i)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
sum := 0
|
||||
for _, v := range arr.Values() {
|
||||
sum += v
|
||||
}
|
||||
}
|
||||
|
|
@ -1,174 +0,0 @@
|
|||
package xar
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"math/bits"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultChunkSizeShift = 4
|
||||
)
|
||||
|
||||
// Xar is an implementation of Andrew Reece's Xar (Exponential Array).
|
||||
//
|
||||
// See: https://www.youtube.com/watch?v=i-h95QIGchY
|
||||
type Xar[T any] struct {
|
||||
shift uint8
|
||||
count uint64
|
||||
chunks [][]T
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Init() {
|
||||
x.InitWithSize(DefaultChunkSizeShift)
|
||||
}
|
||||
|
||||
func (x *Xar[T]) InitWithSize(size_shift uint8) {
|
||||
if len(x.chunks) != 0 {
|
||||
x.Reset()
|
||||
}
|
||||
|
||||
x.shift = size_shift
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Reset() {
|
||||
for _, c := range x.chunks {
|
||||
clear(c)
|
||||
}
|
||||
|
||||
x.count = 0
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Append(value T) *T {
|
||||
if x.shift == 0 {
|
||||
x.Init()
|
||||
}
|
||||
|
||||
chunk_idx, idx_in_chunk, chunk_cap := x.getChunk(x.count)
|
||||
x.count += 1
|
||||
if chunk_idx >= uint64(len(x.chunks)) {
|
||||
x.chunks = append(x.chunks, make([]T, chunk_cap))
|
||||
}
|
||||
|
||||
slot := &x.chunks[chunk_idx][idx_in_chunk]
|
||||
*slot = value
|
||||
return slot
|
||||
}
|
||||
|
||||
func (x *Xar[T]) AppendMany(values ...T) *T {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
first := x.Append(values[0])
|
||||
if len(values) > 1 {
|
||||
for _, v := range values[1:] {
|
||||
x.Append(v)
|
||||
}
|
||||
}
|
||||
|
||||
return first
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Get(index int) *T {
|
||||
chunk_idx, idx_in_chunk, _ := x.getChunk(uint64(index))
|
||||
return &x.chunks[chunk_idx][idx_in_chunk]
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Set(index int, value T) {
|
||||
*x.Get(index) = value
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Remove(index int) {
|
||||
x.Set(index, *x.Get(int(x.count - 1)))
|
||||
x.count -= 1
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Len() int {
|
||||
return int(x.count)
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Cap() (l int) {
|
||||
for _, c := range x.chunks {
|
||||
l += cap(c)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Pointers() iter.Seq2[int, *T] {
|
||||
return func(yield func(int, *T) bool) {
|
||||
idx := -1
|
||||
for chunk_idx, idx_in_chunk := range x.iter() {
|
||||
idx += 1
|
||||
if !yield(idx, &x.chunks[chunk_idx][idx_in_chunk]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Xar[T]) Values() iter.Seq2[int, T] {
|
||||
return func(yield func(int, T) bool) {
|
||||
idx := -1
|
||||
for chunk_idx, idx_in_chunk := range x.iter() {
|
||||
idx += 1
|
||||
if !yield(idx, x.chunks[chunk_idx][idx_in_chunk]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Xar[T]) iter() iter.Seq2[uint64, uint64] {
|
||||
return func(yield func(uint64, uint64) bool) {
|
||||
chunk_size := 1 << x.shift
|
||||
outer:
|
||||
for chunk_idx := range x.chunks {
|
||||
for idx_in_chunk := range chunk_size - 1 {
|
||||
if uint64(chunk_idx+idx_in_chunk) >= uint64(x.count) {
|
||||
break outer
|
||||
}
|
||||
|
||||
if !yield(uint64(chunk_idx), uint64(idx_in_chunk)) {
|
||||
break outer
|
||||
}
|
||||
}
|
||||
|
||||
chunk_size <<= xx.BoolUint(chunk_idx > 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Xar[T]) getChunk(index uint64) (chunk_idx uint64, idx_in_chunk uint64, chunk_cap uint64) {
|
||||
if true /* branchless */ {
|
||||
var (
|
||||
i_shift = index >> x.shift
|
||||
i_shift2 = i_shift != 0
|
||||
msb = msb64(i_shift | 1)
|
||||
b = uint64(*(*uint8)(unsafe.Pointer(&i_shift2)))
|
||||
)
|
||||
|
||||
chunk_idx = msb + b
|
||||
idx_in_chunk = index - (b << (msb + uint64(x.shift)))
|
||||
chunk_cap = 1 << (msb + uint64(x.shift))
|
||||
} else {
|
||||
idx_in_chunk = index
|
||||
chunk_cap = 1 << x.shift
|
||||
|
||||
i_shift := index >> x.shift
|
||||
if i_shift > 0 {
|
||||
chunk_idx = msb64(i_shift | 1)
|
||||
chunk_cap = 1 << (chunk_idx + uint64(x.shift))
|
||||
idx_in_chunk -= chunk_cap
|
||||
chunk_idx += 1
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func msb64(x uint64) uint64 {
|
||||
return uint64(63 - bits.LeadingZeros64(x))
|
||||
}
|
||||
|
|
@ -1,105 +0,0 @@
|
|||
package xar_test
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"git.brut.systems/judah/xx/containerx/xar"
|
||||
"git.brut.systems/judah/xx/testx"
|
||||
)
|
||||
|
||||
func TestXar_StableWithGC(t *testing.T) {
|
||||
type valuewithptr struct {
|
||||
value int
|
||||
ptr *int
|
||||
}
|
||||
|
||||
var x xar.Xar[valuewithptr]
|
||||
x.InitWithSize(8)
|
||||
|
||||
aptr := x.Append(valuewithptr{value: 10, ptr: nil})
|
||||
bptr := x.Append(valuewithptr{value: 20, ptr: &aptr.value})
|
||||
|
||||
const N = 1000
|
||||
for i := range N {
|
||||
x.Append(valuewithptr{value: i})
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
testx.Expect(t, x.Get(0) == bptr)
|
||||
testx.Expect(t, x.Get(1) == bptr)
|
||||
testx.Expect(t, x.Len() == N+2, "len was %d", x.Len())
|
||||
testx.Expect(t, bptr.ptr != nil && bptr.value == 20)
|
||||
testx.Expect(t, bptr.ptr == &aptr.value, "%p vs. %p", bptr.ptr, &aptr.value)
|
||||
}
|
||||
|
||||
func TestXar_ResetAndReuse(t *testing.T) {
|
||||
var x xar.Xar[int]
|
||||
start := x.Append(60)
|
||||
x.AppendMany(10, 20, 30, 40, 50)
|
||||
|
||||
x.Reset()
|
||||
runtime.GC()
|
||||
|
||||
testx.Expect(t, x.Cap() != 0)
|
||||
testx.Expect(t, x.Len() == 0)
|
||||
|
||||
x.Append(0xFF)
|
||||
x.Append(0xFC)
|
||||
x.Append(0xFB)
|
||||
|
||||
testx.Expect(t, x.Get(0) == start)
|
||||
testx.Expect(t, x.Len() == 3)
|
||||
}
|
||||
|
||||
func TestXar_Iterators(t *testing.T) {
|
||||
var x xar.Xar[int]
|
||||
x.AppendMany(0, 1, 2, 3, 4, 5)
|
||||
|
||||
iterations := 0
|
||||
for i, v := range x.Values() {
|
||||
iterations += 1
|
||||
testx.Expect(t, v == i, "v: %d, i: %d", v, i)
|
||||
}
|
||||
|
||||
testx.Expect(t, iterations == x.Len())
|
||||
}
|
||||
|
||||
func BenchmarkXar_Append(b *testing.B) {
|
||||
var x xar.Xar[int]
|
||||
for i := range b.N {
|
||||
x.Append(i * i)
|
||||
}
|
||||
|
||||
x.Reset()
|
||||
for i := range b.N {
|
||||
x.Append(i * i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkXar_RandomAccess(b *testing.B) {
|
||||
var x xar.Xar[int]
|
||||
for i := range b.N {
|
||||
x.Append(i * i)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := range b.N {
|
||||
x.Get(i % b.N)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkXar_Iteration(b *testing.B) {
|
||||
var x xar.Xar[int]
|
||||
for i := range b.N {
|
||||
x.Append(i * i)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
sum := 0
|
||||
for _, v := range x.Values() {
|
||||
sum += v
|
||||
}
|
||||
}
|
||||
4
go.mod
4
go.mod
|
|
@ -1,5 +1,3 @@
|
|||
module git.brut.systems/judah/xx
|
||||
|
||||
go 1.26.0
|
||||
|
||||
require github.com/ebitengine/purego v0.9.1
|
||||
go 1.25.0
|
||||
|
|
|
|||
2
go.sum
2
go.sum
|
|
@ -1,2 +0,0 @@
|
|||
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
|
||||
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
154
mem/mem.go
154
mem/mem.go
|
|
@ -1,154 +0,0 @@
|
|||
package mem
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
Kilobyte = 1 << (10 * (iota + 1))
|
||||
Megabyte
|
||||
Gigabyte
|
||||
Terabyte
|
||||
)
|
||||
|
||||
// Sizeof returns the size (in bytes) of the given type.
|
||||
//
|
||||
// Not to be confused with [unsafe.Sizeof] which returns the size of a type via an expression.
|
||||
func Sizeof[T any]() uintptr {
|
||||
return unsafe.Sizeof(*(*T)(nil))
|
||||
}
|
||||
|
||||
// Alignof returns the alignment (in bytes) of the given type.
|
||||
//
|
||||
// Not to be confused with [unsafe.AlignOf] which returns the alignment of a type via an expression.
|
||||
func Alignof[T any]() uintptr {
|
||||
return unsafe.Alignof(*(*T)(nil))
|
||||
}
|
||||
|
||||
// ZeroValue returns the zero value of a given type.
|
||||
func ZeroValue[T any]() (_ T) {
|
||||
return
|
||||
}
|
||||
|
||||
// BitCast performs a bit conversion between two types of the same size.
|
||||
//
|
||||
// BitCast panics if the sizes of the types differ.
|
||||
func BitCast[TOut any, TIn any](value *TIn) TOut {
|
||||
if Sizeof[TOut]() != Sizeof[TIn]() {
|
||||
panic("bitcast: sizes of types must match")
|
||||
}
|
||||
return *((*TOut)(unsafe.Pointer(value)))
|
||||
}
|
||||
|
||||
// BitCastValue performs a bit conversion between two types of the same size.
|
||||
//
|
||||
// BitCastValue panics if the sizes of the types differ.
|
||||
func BitCastValue[TOut any, TIn any](value TIn) TOut {
|
||||
if Sizeof[TOut]() != Sizeof[TIn]() {
|
||||
panic("bitcast: sizes of types must match")
|
||||
}
|
||||
return *((*TOut)(unsafe.Pointer(&value)))
|
||||
}
|
||||
|
||||
// UnsafeCast performs a bit conversion between two types without checking if their sizes match.
|
||||
func UnsafeCast[TOut any, TIn any](value *TIn) TOut {
|
||||
return *((*TOut)(unsafe.Pointer(value)))
|
||||
}
|
||||
|
||||
// UnsafeCastValue performs a bit conversion between two types without checking if their sizes match.
|
||||
func UnsafeCastValue[TOut any, TIn any](value TIn) TOut {
|
||||
return *((*TOut)(unsafe.Pointer(&value)))
|
||||
}
|
||||
|
||||
// Copy copies size number of bytes from src into dst.
|
||||
//
|
||||
// Returns dst.
|
||||
func Copy(dst, src unsafe.Pointer, size uintptr) unsafe.Pointer {
|
||||
copy(unsafe.Slice((*byte)(dst), size), unsafe.Slice((*byte)(src), size))
|
||||
return dst
|
||||
}
|
||||
|
||||
// Clear overwrites 'count' number of bytes in 'dst' with a particular value.
|
||||
//
|
||||
// Returns dst.
|
||||
func Clear(dst unsafe.Pointer, value byte, count uintptr) unsafe.Pointer {
|
||||
b := (*byte)(dst)
|
||||
for range count { // @todo: loop unroll/maybe use asm?
|
||||
*b = value
|
||||
b = (*byte)(unsafe.Add(dst, 1))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Zero overwrites 'count' number of bytes in 'dst' with zeros.
|
||||
//
|
||||
// Returns dst.
|
||||
func Zero(dst unsafe.Pointer, count uintptr) unsafe.Pointer {
|
||||
return Clear(dst, 0, count)
|
||||
}
|
||||
|
||||
// AlignForward returns an address align to the next power-of-two alignment.
|
||||
func AlignForward(address uintptr, alignment uintptr) uintptr {
|
||||
if alignment == 0 || (alignment&(alignment-1)) != 0 {
|
||||
panic("alignforward: alignment must be a power of two")
|
||||
}
|
||||
return (address + alignment - 1) &^ (alignment - 1)
|
||||
}
|
||||
|
||||
// AlignBackward returns an address align to the previous power-of-two alignment.
|
||||
func AlignBackward(address uintptr, alignment uintptr) uintptr {
|
||||
if alignment == 0 || (alignment&(alignment-1)) != 0 {
|
||||
panic("alignbackward: alignment must be a power of two")
|
||||
}
|
||||
return address &^ (alignment - 1)
|
||||
}
|
||||
|
||||
// Aligned returns if the address is aligned to the given power-of-two alignment.
|
||||
func Aligned(address uintptr, alignment uintptr) bool {
|
||||
if alignment == 0 || (alignment&(alignment-1)) != 0 {
|
||||
panic("aligned: alignment must be a power of two")
|
||||
}
|
||||
return address&(alignment-1) == 0
|
||||
}
|
||||
|
||||
// ExtendSlice returns a copy of the given slice, increasing its length, but leaving the capacity intact.
|
||||
func ExtendSlice[T any](slice []T, amount uintptr) []T {
|
||||
if amount+uintptr(len(slice)) > uintptr(cap(slice)) {
|
||||
panic("extendslice: cannot extend slice past its capacity")
|
||||
}
|
||||
|
||||
return slice[: uintptr(len(slice))+amount : cap(slice)]
|
||||
}
|
||||
|
||||
// Access describes memory access permissions.
|
||||
type Access int
|
||||
|
||||
const (
|
||||
AccessNone Access = 1 << iota
|
||||
AccessRead
|
||||
AccessWrite
|
||||
AccessExecute
|
||||
)
|
||||
|
||||
// Reserve returns a slice of bytes pointing to uncommitted virtual memory.
|
||||
// The length and capacity of the slice will be total_address_space bytes.
|
||||
//
|
||||
// The underlying memory of the slice must be comitted to phyiscal memory before being accessed (see: Commit).
|
||||
//
|
||||
// Use Release to return the virtual address space back to the operating system.
|
||||
func Reserve(total_address_space uintptr) ([]byte, error) { return reserve(total_address_space) }
|
||||
|
||||
// Release returns reserved virtual address space back to the operating system.
|
||||
//
|
||||
// Note: Any committed memory within its address space will be freed as well.
|
||||
func Release(reserved []byte) error { return release(reserved) }
|
||||
|
||||
// Commit maps virtual memory to physical memory.
|
||||
func Commit(reserved []byte, access Access) error { return commit(reserved, access) }
|
||||
|
||||
// Decommit unmaps committed memory, leaving the underlying addresss space intact.
|
||||
//
|
||||
// Decommitted memory can be re-committed at a later time using Commit.
|
||||
//
|
||||
// Note: Accessing the memory after calling Decommit is unsafe and may cause a panic.
|
||||
func Decommit(committed []byte) (err error) { return decommit(committed) }
|
||||
|
|
@ -1,78 +0,0 @@
|
|||
package mem_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
"git.brut.systems/judah/xx/testx"
|
||||
)
|
||||
|
||||
func TestBitCast(t *testing.T) {
|
||||
a := uint32(0xFFFF_FFFF)
|
||||
b := mem.BitCast[float32](&a)
|
||||
c := mem.BitCast[uint32](&b)
|
||||
if a != c {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
v := uint8(0xFF)
|
||||
d := mem.BitCast[int8](&v)
|
||||
if d != -1 {
|
||||
t.Fail()
|
||||
}
|
||||
e := mem.BitCast[uint8](&d)
|
||||
if e != 255 {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocationPrimitives(t *testing.T) {
|
||||
t.Run("reserve, unreserve", func(t *testing.T) {
|
||||
data, err := mem.Reserve(1 * mem.Gigabyte)
|
||||
testx.Expect(t, err == nil, "mem.Reserve returned an error - %s", err)
|
||||
|
||||
testx.Expect(t, len(data) == 1*mem.Gigabyte, "len was %d", len(data))
|
||||
testx.Expect(t, cap(data) == 1*mem.Gigabyte, "len was %d", cap(data))
|
||||
|
||||
err = mem.Release(data)
|
||||
testx.Expect(t, err == nil, "mem.Unreserve returned an error - %s", err)
|
||||
})
|
||||
|
||||
t.Run("commit", func(t *testing.T) {
|
||||
data, err := mem.Reserve(1 * mem.Gigabyte)
|
||||
testx.Expect(t, err == nil, "mem.Reserve returned an error - %s", err)
|
||||
|
||||
err = mem.Commit(data, mem.AccessRead|mem.AccessWrite)
|
||||
testx.Expect(t, err == nil, "mem.Commit returned an error - %s", err)
|
||||
|
||||
for i := range data {
|
||||
data[i] = byte(i * i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("decommit", func(t *testing.T) {
|
||||
data, err := mem.Reserve(1 * mem.Gigabyte)
|
||||
testx.Expect(t, err == nil, "mem.Reserve returned an error - %s", err)
|
||||
|
||||
err = mem.Commit(data, mem.AccessRead|mem.AccessWrite)
|
||||
testx.Expect(t, err == nil, "mem.Commit returned an error - %s", err)
|
||||
|
||||
before := uintptr(unsafe.Pointer(&data[0]))
|
||||
|
||||
err = mem.Decommit(data)
|
||||
testx.Expect(t, err == nil, "mem.Decommit returned an error - %s", err)
|
||||
|
||||
// accessing data before recommitting it will fail
|
||||
|
||||
err = mem.Commit(data, mem.AccessRead|mem.AccessWrite)
|
||||
testx.Expect(t, err == nil, "mem.Commit returned an error - %s", err)
|
||||
|
||||
after := uintptr(unsafe.Pointer(&data[0]))
|
||||
testx.Expect(t, before == after, "base pointers did not match between after recommit %d != %d", before, after)
|
||||
|
||||
for i := range data {
|
||||
data[i] = byte(i * i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
//go:build unix
|
||||
|
||||
package mem
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func reserve(total_address_space uintptr) ([]byte, error) {
|
||||
data, err := syscall.Mmap(-1, 0, int(total_address_space), syscall.PROT_NONE, syscall.MAP_PRIVATE|syscall.MAP_ANON)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func release(reserved []byte) error {
|
||||
return syscall.Munmap(reserved)
|
||||
}
|
||||
|
||||
func commit(reserved []byte, access Access) error {
|
||||
return syscall.Mprotect(reserved, access_to_prot(access))
|
||||
}
|
||||
|
||||
func decommit(committed []byte) (err error) {
|
||||
err = syscall.Mprotect(committed, syscall.PROT_NONE)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return madvise(committed, syscall.MADV_DONTNEED)
|
||||
}
|
||||
|
||||
var _zero uintptr
|
||||
|
||||
func madvise(b []byte, advice int) (err error) {
|
||||
var _p0 unsafe.Pointer
|
||||
if len(b) > 0 {
|
||||
_p0 = unsafe.Pointer(&b[0])
|
||||
} else {
|
||||
_p0 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
|
||||
_, _, e := syscall.Syscall(syscall.SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice))
|
||||
if e != 0 {
|
||||
err = syscall.Errno(e)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func access_to_prot(access Access) (prot int) {
|
||||
prot = syscall.PROT_NONE
|
||||
|
||||
if access&AccessRead != 0 {
|
||||
prot |= syscall.PROT_READ
|
||||
}
|
||||
if access&AccessWrite != 0 {
|
||||
prot |= syscall.PROT_WRITE
|
||||
}
|
||||
if access&AccessExecute != 0 {
|
||||
prot |= syscall.PROT_EXEC
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
//go:build wasm
|
||||
|
||||
package mem
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func reserve(total_address_space uintptr) ([]byte, error) {
|
||||
data := make([]byte, total_address_space)
|
||||
p.Pin(unsafe.SliceData(data))
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func release(_ []byte) error {
|
||||
p.Unpin()
|
||||
return nil
|
||||
}
|
||||
|
||||
func commit(_ []byte, _ Access) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func decommit(committed []byte) (err error) {
|
||||
clear(committed)
|
||||
return nil
|
||||
}
|
||||
|
||||
var p runtime.Pinner
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
//go:build windows
|
||||
|
||||
package mem
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func reserve(total_address_space uintptr) ([]byte, error) {
|
||||
addr, _, err := _VirtualAlloc.Call(0, total_address_space, _MEM_RESERVE, _PAGE_NOACCESS)
|
||||
if addr == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return unsafe.Slice((*byte)(unsafe.Pointer(addr)), total_address_space), nil
|
||||
}
|
||||
|
||||
func release(reserved []byte) error {
|
||||
res, _, err := _VirtualFree.Call(uintptr(unsafe.Pointer(&reserved[0])), 0, _MEM_RELEASE)
|
||||
if res == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func commit(reserved []byte, access Access) error {
|
||||
ret, _, err := _VirtualAlloc.Call(
|
||||
uintptr(unsafe.Pointer(&reserved[0])),
|
||||
uintptr(len(reserved)),
|
||||
_MEM_COMMIT,
|
||||
uintptr(access_to_prot(access)),
|
||||
)
|
||||
if ret == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decommit(committed []byte) error {
|
||||
ret, _, err := _VirtualFree.Call(
|
||||
uintptr(unsafe.Pointer(&committed[0])),
|
||||
uintptr(len(committed)),
|
||||
_MEM_DECOMMIT,
|
||||
)
|
||||
if ret == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
_VirtualAlloc = kernel32.NewProc("VirtualAlloc")
|
||||
_VirtualFree = kernel32.NewProc("VirtualFree")
|
||||
)
|
||||
|
||||
const (
|
||||
_MEM_COMMIT = 0x1000
|
||||
_MEM_RESERVE = 0x2000
|
||||
_MEM_DECOMMIT = 0x4000
|
||||
_MEM_RELEASE = 0x8000
|
||||
|
||||
_PAGE_NOACCESS = 0x01
|
||||
_PAGE_READONLY = 0x02
|
||||
_PAGE_READWRITE = 0x04
|
||||
|
||||
_PAGE_EXECUTE_READ = 0x20
|
||||
_PAGE_EXECUTE_READWRITE = 0x40
|
||||
)
|
||||
|
||||
func access_to_prot(access Access) uint32 {
|
||||
switch access {
|
||||
case AccessRead | AccessWrite | AccessExecute:
|
||||
return _PAGE_EXECUTE_READWRITE
|
||||
case AccessRead | AccessExecute:
|
||||
return _PAGE_EXECUTE_READ
|
||||
case AccessRead | AccessWrite:
|
||||
return _PAGE_READWRITE
|
||||
case AccessRead:
|
||||
return _PAGE_READONLY
|
||||
default:
|
||||
return _PAGE_NOACCESS
|
||||
}
|
||||
}
|
||||
|
|
@ -1,78 +0,0 @@
|
|||
// Package osthread allows functions to be called on the main operating system thread.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// func main() {
|
||||
// osthread.Start(Entrypoint) // Initialize osthread and calls Entrypoint. Blocks until Entrypoint returns.
|
||||
// }
|
||||
//
|
||||
// func Entrypoint() {
|
||||
// osthread.Call(FuncA) // Call FuncA on the main operating system thread, block until it returns
|
||||
// // ...
|
||||
// osthread.Go(FuncB) // Schedule FuncB to be called on the main operating system thread
|
||||
// // ...
|
||||
// }
|
||||
package osthread
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Start allows arbitrary functions to be run on the main operating system thread.
|
||||
//
|
||||
// Start must be called from the program's main function. Once called, it blocks until entrypoint returns.
|
||||
func Start(entrypoint func()) {
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
done := make(chan any)
|
||||
|
||||
// Immediately queue entrypoint
|
||||
go func() {
|
||||
defer func() {
|
||||
done <- nil
|
||||
}()
|
||||
|
||||
entrypoint()
|
||||
}()
|
||||
|
||||
// Call functions in our queue until entrypoint returns.
|
||||
// These functions are called on the main operating system thread.
|
||||
for {
|
||||
select {
|
||||
case fn := <-queue:
|
||||
fn()
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Go schedules a function to be run on the main operating system thread, returning immediately.
|
||||
func Go(fn func()) {
|
||||
queue <- fn
|
||||
}
|
||||
|
||||
// Call schedules a function to be run on the main operating system thread, blocking until it returns.
|
||||
func Call(fn func()) {
|
||||
if onmainthread() {
|
||||
fn()
|
||||
} else {
|
||||
done := make(chan any)
|
||||
queue <- func() {
|
||||
defer func() {
|
||||
done <- nil
|
||||
}()
|
||||
|
||||
fn()
|
||||
}
|
||||
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
var queue chan func()
|
||||
|
||||
func init() {
|
||||
runtime.LockOSThread()
|
||||
queue = make(chan func(), runtime.GOMAXPROCS(0))
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
//go:build !(windows || linux || darwin)
|
||||
|
||||
package osthread
|
||||
|
||||
func onmainthread() bool {
|
||||
return false
|
||||
}
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
//go:build darwin
|
||||
|
||||
package osthread
|
||||
|
||||
import "github.com/ebitengine/purego/objc"
|
||||
|
||||
func onmainthread() bool {
|
||||
return objc.Send[bool](objc.ID(cls_nsthread), sel_isMainThread)
|
||||
}
|
||||
|
||||
var (
|
||||
cls_nsthread objc.Class
|
||||
cls_nsapplication objc.Class
|
||||
cls_nsapp objc.Class
|
||||
|
||||
sel_isMainThread objc.SEL
|
||||
sel_sharedApplication objc.SEL
|
||||
sel_run objc.SEL
|
||||
)
|
||||
|
||||
func init() {
|
||||
cls_nsthread = objc.GetClass("NSThread")
|
||||
cls_nsapplication = objc.GetClass("NSApplication")
|
||||
cls_nsapp = objc.GetClass("NSApp")
|
||||
|
||||
sel_isMainThread = objc.RegisterName("isMainThread")
|
||||
sel_sharedApplication = objc.RegisterName("sharedApplication")
|
||||
sel_run = objc.RegisterName("run")
|
||||
|
||||
// Just a bit of magic
|
||||
objc.ID(cls_nsapplication).Send(sel_sharedApplication)
|
||||
objc.ID(cls_nsapp).Send(sel_run)
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
//go:build linux
|
||||
|
||||
package osthread
|
||||
|
||||
import (
|
||||
"github.com/ebitengine/purego"
|
||||
)
|
||||
|
||||
func onmainthread() bool {
|
||||
return getpid() == gettid()
|
||||
}
|
||||
|
||||
var (
|
||||
getpid func() int32
|
||||
gettid func() int32
|
||||
)
|
||||
|
||||
func init() {
|
||||
libc, err := purego.Dlopen("libc.so.6", purego.RTLD_GLOBAL|purego.RTLD_NOW)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
purego.RegisterLibFunc(&getpid, libc, "getpid")
|
||||
purego.RegisterLibFunc(&gettid, libc, "gettid")
|
||||
}
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
//go:build windows
|
||||
|
||||
package osthread
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ebitengine/purego"
|
||||
)
|
||||
|
||||
func onmainthread() bool {
|
||||
return mainThreadId == getCurrentThreadId()
|
||||
}
|
||||
|
||||
var (
|
||||
mainThreadId int32
|
||||
getCurrentThreadId func() int32
|
||||
)
|
||||
|
||||
func init() {
|
||||
kernel32 := syscall.NewLazyDLL("kernel32.dll").Handle()
|
||||
purego.RegisterLibFunc(&getCurrentThreadId, kernel32, "GetCurrentThreadId")
|
||||
|
||||
mainThreadId = getCurrentThreadId() // init is always called on the main thread
|
||||
}
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
package pointer
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
type Pinned[T any] struct {
|
||||
base unsafe.Pointer
|
||||
pinner runtime.Pinner
|
||||
}
|
||||
|
||||
func Pin[T any](ptr *T) (r Pinned[T]) {
|
||||
r.pinner.Pin(ptr)
|
||||
r.base = unsafe.Pointer(ptr)
|
||||
return
|
||||
}
|
||||
|
||||
func Cast[TOut, TIn any](p Pinned[TIn]) Pinned[TOut] {
|
||||
return Pinned[TOut]{
|
||||
base: unsafe.Pointer(p.base),
|
||||
pinner: p.pinner,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Unpin() {
|
||||
p.pinner.Unpin()
|
||||
p.base = nil
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Pointer() unsafe.Pointer {
|
||||
return p.base
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Address() uintptr {
|
||||
return uintptr(p.base)
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Nil() bool {
|
||||
return p.base == nil
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Add(amount uintptr) Pinned[T] {
|
||||
return Pinned[T]{
|
||||
base: unsafe.Pointer(uintptr(p.base) + amount),
|
||||
pinner: p.pinner,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Sub(amount uintptr) Pinned[T] {
|
||||
return Pinned[T]{
|
||||
base: unsafe.Pointer(uintptr(p.base) - amount),
|
||||
pinner: p.pinner,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Aligned() bool {
|
||||
return mem.Aligned(uintptr(p.base), mem.Alignof[T]())
|
||||
}
|
||||
|
||||
func (p Pinned[T]) AlignForward() Pinned[T] {
|
||||
return Pinned[T]{
|
||||
base: unsafe.Pointer(mem.AlignForward(uintptr(p.base), mem.Alignof[T]())),
|
||||
pinner: p.pinner,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Pinned[T]) AlignBackward() Pinned[T] {
|
||||
return Pinned[T]{
|
||||
base: unsafe.Pointer(mem.AlignBackward(uintptr(p.base), mem.Alignof[T]())),
|
||||
pinner: p.pinner,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Load() T {
|
||||
return *(*T)(p.base)
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Store(value T) {
|
||||
*(*T)(p.base) = value
|
||||
}
|
||||
|
||||
func (p Pinned[T]) Nth(index int) T {
|
||||
return p.Add(uintptr(index) * mem.Sizeof[T]()).Load()
|
||||
}
|
||||
|
|
@ -1,131 +0,0 @@
|
|||
package pointer_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.brut.systems/judah/xx/pointer"
|
||||
)
|
||||
|
||||
func TestPointer_AlignForward(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
offset uintptr
|
||||
}{
|
||||
{"align 8 bytes", 1},
|
||||
{"align 16 bytes", 3},
|
||||
{"align 32 bytes", 7},
|
||||
{"align 64 bytes", 15},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
value := int32(789)
|
||||
pinned := pointer.Pin(&value)
|
||||
defer pinned.Unpin()
|
||||
|
||||
// Add offset to misalign
|
||||
misaligned := pinned.Add(tt.offset)
|
||||
aligned := misaligned.AlignForward()
|
||||
|
||||
// Check alignment
|
||||
if !aligned.Aligned() {
|
||||
t.Errorf("Address %d is not aligned", aligned.Address())
|
||||
}
|
||||
|
||||
// Check it's forward aligned (greater or equal)
|
||||
if aligned.Address() < misaligned.Address() {
|
||||
t.Errorf("Forward aligned address %d should be >= original %d", aligned.Address(), misaligned.Address())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointer_AlignBackward(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
offset uintptr
|
||||
}{
|
||||
{"align 8 bytes", 5},
|
||||
{"align 16 bytes", 10},
|
||||
{"align 32 bytes", 20},
|
||||
{"align 64 bytes", 40},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
value := int32(321)
|
||||
pinned := pointer.Pin(&value)
|
||||
defer pinned.Unpin()
|
||||
|
||||
// Add offset to misalign
|
||||
misaligned := pinned.Add(tt.offset)
|
||||
aligned := misaligned.AlignBackward()
|
||||
|
||||
// Check alignment
|
||||
if !aligned.Aligned() {
|
||||
t.Errorf("Address %d is not aligned", aligned.Address())
|
||||
}
|
||||
|
||||
// Check it's backward aligned (less or equal)
|
||||
if aligned.Address() > misaligned.Address() {
|
||||
t.Errorf("Backward aligned address %d should be <= original %d", aligned.Address(), misaligned.Address())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointer_Nth(t *testing.T) {
|
||||
// Test with int32 array
|
||||
arr := []int32{10, 20, 30, 40, 50}
|
||||
pinned := pointer.Pin(&arr[0])
|
||||
defer pinned.Unpin()
|
||||
|
||||
for i := 0; i < len(arr); i++ {
|
||||
value := pinned.Nth(i)
|
||||
if value != arr[i] {
|
||||
t.Errorf("Index %d: expected %d, got %d", i, arr[i], value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointer_NthFloat64(t *testing.T) {
|
||||
// Test with a float64 array
|
||||
arr := []float64{1.1, 2.2, 3.3, 4.4, 5.5}
|
||||
pinned := pointer.Pin(&arr[0])
|
||||
defer pinned.Unpin()
|
||||
|
||||
for i := 0; i < len(arr); i++ {
|
||||
value := pinned.Nth(i)
|
||||
if value != arr[i] {
|
||||
t.Errorf("Index %d: expected %f, got %f", i, arr[i], value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointerArithmeticChain(t *testing.T) {
|
||||
value := int32(888)
|
||||
pinned := pointer.Pin(&value)
|
||||
defer pinned.Unpin()
|
||||
|
||||
// Test chaining operations
|
||||
result := pinned.Add(16).Add(8).Sub(4)
|
||||
expected := pinned.Address() + 16 + 8 - 4
|
||||
|
||||
if result.Address() != expected {
|
||||
t.Errorf("Expected address %d, got %d", expected, result.Address())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointer_Cast(t *testing.T) {
|
||||
value := int32(123)
|
||||
|
||||
i32 := pointer.Pin(&value)
|
||||
defer i32.Unpin()
|
||||
|
||||
f32 := pointer.Cast[float32](i32)
|
||||
f32.Store(3.14)
|
||||
|
||||
if value == 123 {
|
||||
t.Errorf("Value should have been changed")
|
||||
}
|
||||
}
|
||||
149
spmd/spmd.go
149
spmd/spmd.go
|
|
@ -1,149 +0,0 @@
|
|||
// Package spmd contains useful primitives for SPMD programs.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// func main() {
|
||||
// // Create N execution lanes (<= 0 for GOMAXPROCS lanes),
|
||||
// // and run 'Compute' across N lanes.
|
||||
// spmd.Run(-1, Compute)
|
||||
// }
|
||||
//
|
||||
// func Compute(lane spmd.Lane) {
|
||||
// log.Printf("Lane %d/%d is executing", lane.Index, lane.Count)
|
||||
//
|
||||
// // Execute this code on a lane locked to the main thread (aka lane.Index == 0)
|
||||
// // One lane will always be locked to the main thread
|
||||
// if lane.Main() {
|
||||
// data, err := os.ReadFile(...)
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
//
|
||||
// // Send data to all lanes ("DATA" can be any value)
|
||||
// lane.Store("DATA", string(data))
|
||||
// }
|
||||
//
|
||||
// // Wait until all lanes are at this point
|
||||
// lane.Sync()
|
||||
//
|
||||
// // Load stored data
|
||||
// data := lane.Load("DATA").(string)
|
||||
//
|
||||
// // Get lane-specific access range for data
|
||||
// lo, hi := lane.Range(len(data))
|
||||
// for i := lo; i < hi; i++ {
|
||||
// // ...
|
||||
// }
|
||||
// }
|
||||
package spmd
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"git.brut.systems/judah/xx/osthread"
|
||||
)
|
||||
|
||||
// Run will start executing the given function across N execution lanes,
|
||||
// blocking until they have all finished executing.
|
||||
//
|
||||
// If nLanes is <= 0, GOMAXPROCS will be used.
|
||||
//
|
||||
// Run must be called from the program's main function.
|
||||
func Run(nLanes int, fn func(lane Lane)) {
|
||||
if nLanes <= 0 {
|
||||
nLanes = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
|
||||
osthread.Start(func() {
|
||||
s := new(state)
|
||||
s.cond.L = &s.mtx
|
||||
s.total = uint64(nLanes)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := range s.total {
|
||||
if i == 0 { // Lane 0 is always on the main thread
|
||||
wg.Add(1)
|
||||
osthread.Go(func() {
|
||||
fn(Lane{state: s, Index: uint32(i), Count: uint32(s.total)})
|
||||
wg.Done()
|
||||
})
|
||||
} else { // Everyone else gets scheduled like usual
|
||||
wg.Go(func() {
|
||||
fn(Lane{state: s, Index: uint32(i), Count: uint32(s.total)})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
type state struct {
|
||||
mtx sync.Mutex
|
||||
cond sync.Cond
|
||||
waiting atomic.Uint64
|
||||
total uint64
|
||||
userdata sync.Map
|
||||
}
|
||||
|
||||
type Lane struct {
|
||||
state *state
|
||||
Index uint32
|
||||
Count uint32
|
||||
}
|
||||
|
||||
// Main returns if the lane is locked to the main thread.
|
||||
func (l Lane) Main() bool {
|
||||
return l.Index == 0
|
||||
}
|
||||
|
||||
// Sync pauses the current lane until all lanes are at the same sync point.
|
||||
func (l Lane) Sync() {
|
||||
l.state.mtx.Lock()
|
||||
defer l.state.mtx.Unlock()
|
||||
|
||||
if l.state.waiting.Add(1) >= l.state.total {
|
||||
l.state.waiting.Store(0)
|
||||
l.state.cond.Broadcast()
|
||||
return
|
||||
}
|
||||
|
||||
l.state.cond.Wait()
|
||||
}
|
||||
|
||||
// Store sends 'value' to all lanes.
|
||||
//
|
||||
// Store can be called concurrently.
|
||||
func (l Lane) Store(key, value any) {
|
||||
l.state.userdata.Store(key, value)
|
||||
}
|
||||
|
||||
// Load fetches a named value, returning nil if it does not exist.
|
||||
//
|
||||
// Load can be called concurrently.
|
||||
func (l Lane) Load(key any) any {
|
||||
v, ok := l.state.userdata.Load(key)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Range returns a lane's data range for the given length.
|
||||
func (l Lane) Range(length int) (lo, hi uint) {
|
||||
size := uint(length) / uint(l.state.total)
|
||||
rem := uint(length) % uint(l.state.total)
|
||||
|
||||
if uint(l.Index) < rem {
|
||||
lo = uint(l.Index) * (size + 1)
|
||||
hi = lo + size + 1
|
||||
} else {
|
||||
lo = uint(l.Index)*size + rem
|
||||
hi = lo + size
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
package testx
|
||||
|
||||
import "testing"
|
||||
|
||||
func Expect(t *testing.T, cond bool, message ...any) {
|
||||
t.Helper()
|
||||
|
||||
if !cond {
|
||||
if len(message) == 0 {
|
||||
message = append(message, "expectation failed")
|
||||
}
|
||||
|
||||
str := message[0].(string)
|
||||
t.Fatalf(str, message[1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
func ShouldPanic(t *testing.T, f func()) {
|
||||
t.Helper()
|
||||
|
||||
defer func() { recover() }()
|
||||
f()
|
||||
t.Fatal("expected panic")
|
||||
}
|
||||
49
utils.go
Normal file
49
utils.go
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
package xx
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// New returns a newly allocated value with an initial value.
|
||||
func New[T any](expr T) *T {
|
||||
p := new(T)
|
||||
*p = expr
|
||||
return p
|
||||
}
|
||||
|
||||
// Bitcast performs a bit conversion between two types of the same size.
|
||||
//
|
||||
// Bitcast panics if the sizes of the types differ.
|
||||
func Bitcast[TOut any, TIn any](value TIn) TOut {
|
||||
if SizeOf[TOut]() != SizeOf[TIn]() {
|
||||
panic("bitcast: sizes of types must match")
|
||||
}
|
||||
return *((*TOut)(unsafe.Pointer(&value)))
|
||||
}
|
||||
|
||||
// Copy copies src number of bytes into dst.
|
||||
// Returns dst.
|
||||
//
|
||||
// Copy panics if src is smaller than dst.
|
||||
func Copy[TDst any, TSrc any](dst *TDst, src *TSrc) *TDst {
|
||||
if SizeOf[TSrc]() < SizeOf[TDst]() {
|
||||
panic("copy: size of src must be >= dst")
|
||||
}
|
||||
MemCopy(unsafe.Pointer(dst), unsafe.Pointer(src), SizeOf[TDst]())
|
||||
return dst
|
||||
}
|
||||
|
||||
// MemCopy copies size number of bytes from src into dst.
|
||||
// Returns dst.
|
||||
func MemCopy(dst, src unsafe.Pointer, size uintptr) unsafe.Pointer {
|
||||
copy(unsafe.Slice((*byte)(dst), size), unsafe.Slice((*byte)(src), size))
|
||||
return dst
|
||||
}
|
||||
|
||||
// SizeOf returns the size in bytes of the given type.
|
||||
//
|
||||
// Not to be confused with [unsafe.Sizeof] which returns the size of an expression.
|
||||
func SizeOf[T any]() uintptr {
|
||||
var zero T
|
||||
return unsafe.Sizeof(zero)
|
||||
}
|
||||
62
utils_test.go
Normal file
62
utils_test.go
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
package xx_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
a := xx.New(uint32(1024))
|
||||
if *a != 1024 {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if unsafe.Sizeof(*a) != xx.SizeOf[uint32]() {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
b := xx.New(struct{ x, y, z float32 }{10, 20, 30})
|
||||
if b.x != 10 {
|
||||
t.Fail()
|
||||
}
|
||||
if b.y != 20 {
|
||||
t.Fail()
|
||||
}
|
||||
if b.z != 30 {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
c := xx.New(b)
|
||||
if c == &b {
|
||||
t.Fail()
|
||||
}
|
||||
if (*c).x != 10 {
|
||||
t.Fail()
|
||||
}
|
||||
if (*c).y != 20 {
|
||||
t.Fail()
|
||||
}
|
||||
if (*c).z != 30 {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestBitcast(t *testing.T) {
|
||||
a := uint32(0xFFFF_FFFF)
|
||||
b := xx.Bitcast[float32](a)
|
||||
c := xx.Bitcast[uint32](b)
|
||||
if a != c {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
d := xx.Bitcast[int8](uint8(0xFF))
|
||||
if d != -1 {
|
||||
t.Fail()
|
||||
}
|
||||
e := xx.Bitcast[uint8](d)
|
||||
if e != 255 {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
60
xx.go
60
xx.go
|
|
@ -1,60 +0,0 @@
|
|||
package xx
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
// Copy copies src number of bytes into dst.
|
||||
// Returns dst.
|
||||
//
|
||||
// Copy panics if src is smaller than dst.
|
||||
func Copy[TDst any, TSrc any](dst *TDst, src *TSrc) *TDst {
|
||||
if mem.Sizeof[TSrc]() < mem.Sizeof[TDst]() {
|
||||
panic("copy: size of src must be >= dst")
|
||||
}
|
||||
mem.Copy(unsafe.Pointer(dst), unsafe.Pointer(src), mem.Sizeof[TDst]())
|
||||
return dst
|
||||
}
|
||||
|
||||
// Clone returns a newly allocated shallow copy of the given value.
|
||||
func Clone[T any](value *T) *T {
|
||||
return Copy(new(T), value)
|
||||
}
|
||||
|
||||
// BoolUint converts a boolean to an integer.
|
||||
func BoolUint(b bool) uint {
|
||||
return uint(*(*uint8)(unsafe.Pointer(&b)))
|
||||
}
|
||||
|
||||
// CallerLocation returns the source location of the function CallerLocation is called in.
|
||||
func CallerLocation() (file string, line int) {
|
||||
_, file, line, _ = runtime.Caller(2)
|
||||
|
||||
// @todo: I'm sure there's a better way to do this
|
||||
// Special-case when CallerLocation is called from main
|
||||
if strings.Contains(file, "runtime") && strings.Contains(file, "proc.go") {
|
||||
_, file, line, _ = runtime.Caller(1)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashLocation returns a hash of file and line, most likely returned from [CallerLocation].
|
||||
func HashLocation(file string, line int) uint64 {
|
||||
const (
|
||||
FNV64_PRIME uint64 = 0x100000001B3
|
||||
FNV64_BIAS uint64 = 0xCBF29CE484222325
|
||||
)
|
||||
|
||||
h := FNV64_BIAS
|
||||
for _, c := range file {
|
||||
h = (h ^ uint64(c)) * FNV64_PRIME
|
||||
}
|
||||
|
||||
h = (h ^ uint64(line)) * FNV64_PRIME
|
||||
return h
|
||||
}
|
||||
Loading…
Reference in a new issue