1
0
Fork 0
forked from judah/xx
xx/arena/arenas.go
2026-01-31 15:07:01 -07:00

357 lines
9.1 KiB
Go

package arena
import (
"errors"
"fmt"
"log"
"math/bits"
"runtime"
"sync"
"unsafe"
"git.brut.systems/judah/xx/mem"
)
// Fixed is a simple bump allocator that uses the given buffer.
//
// Fixed will NOT resize when it runs out of memory.
func Fixed(data []byte) Arena {
if len(data) == 0 || len(data) != cap(data) {
panic("fixed: length & capacity must be equal and greater than zero")
}
var offset uintptr
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
switch a {
case ACTION_ALLOC:
aligned := mem.AlignForward(size, align)
if offset+aligned > uintptr(cap(data)) {
return nil, errors.New("fixed: out of memory")
}
ptr := &data[offset]
offset += aligned
return unsafe.Pointer(ptr), nil
case ACTION_RESET:
clear(data)
offset = 0
case ACTION_SAVE:
if watermark == nil {
return nil, errors.New("fixed: cannot save to nil watermark")
}
*watermark = offset
case ACTION_RESTORE:
if watermark == nil {
return nil, errors.New("fixed: cannot restore nil watermark")
}
clear(data[*watermark:offset])
offset = *watermark
default:
panic("fixed: unimplemented action - " + a.String())
}
return nil, nil
}
}
// Linear is a simple bump allocator with a fixed amount of backing memory.
func Linear(capacity_in_bytes uintptr) Arena {
return Fixed(make([]byte, capacity_in_bytes))
}
// Ring is an Arena that only allocates values of the given type.
// When capacity is exceeded, previous allocations will be reused to accommodate new ones
//
// Note: Allocating different types from the same Pool is unsafe and may cause memory corruption.
func Ring[T any](capacity uintptr) Arena {
if capacity <= 0 {
panic("pool: capacity must be greater than zero")
}
pointers := make([]T, 0, capacity)
return func(a Action, _, _ uintptr, watermark *uintptr) (unsafe.Pointer, error) {
switch a {
case ACTION_ALLOC:
if len(pointers) == cap(pointers) {
pointers = pointers[:0]
}
pointers = append(pointers, mem.ZeroValue[T]())
return unsafe.Pointer(&pointers[len(pointers)-1]), nil
case ACTION_RESET:
clear(pointers)
pointers = pointers[:0]
case ACTION_SAVE:
if watermark == nil {
return nil, errors.New("pool: cannot save to nil watermark")
}
*watermark = uintptr(len(pointers))
case ACTION_RESTORE:
if watermark == nil {
return nil, errors.New("pool: cannot restore nil watermark")
}
clear(pointers[*watermark:])
pointers = pointers[:*watermark]
default:
panic("pool: unimplemented action - " + a.String())
}
return nil, nil
}
}
// Chunked is an Arena that groups allocations by size.
func Chunked(max_allocs_per_chunk uintptr) Arena {
type chunk struct {
data []byte
offset uintptr
saved uintptr
}
groups := make([][]chunk, 64)
return func(a Action, size, align uintptr, _ *uintptr) (unsafe.Pointer, error) {
switch a {
case ACTION_ALLOC:
aligned := mem.AlignForward(size, align)
if aligned == 0 {
aligned = 1
}
aligned = 1 << bits.Len(uint(aligned-1))
idx := bits.TrailingZeros(uint(aligned))
if idx >= len(groups) {
groups = append(groups, make([][]chunk, idx-len(groups)+1)...)
}
group := groups[idx]
if len(group) == 0 {
group = append(group, chunk{
data: make([]byte, aligned*max_allocs_per_chunk),
})
}
c := &group[len(group)-1]
if c.offset+aligned > uintptr(len(c.data)) {
group = append(group, chunk{
data: make([]byte, aligned*max_allocs_per_chunk),
})
c = &group[len(group)-1]
}
ptr := &c.data[c.offset]
c.offset += aligned
groups[idx] = group
return unsafe.Pointer(ptr), nil
case ACTION_RESET:
for _, g := range groups {
for i := range len(g) {
g[i].offset = 0
g[i].saved = 0
clear(g[i].data)
}
}
case ACTION_SAVE:
for _, g := range groups {
for i := range len(g) {
g[i].saved = g[i].offset
}
}
case ACTION_RESTORE:
for _, g := range groups {
for i := range len(g) {
g[i].offset = g[i].saved
}
}
default:
panic("chunked: unimplemented action - " + a.String())
}
return nil, nil
}
}
// Paged is a linear arena that allocates pages of virtual memory.
// The memory allocated is only committed to physical memory as it is used,
// so total_reserved_in_bytes should is the total amount of addressable memory to reserve.
//
// Note: resetting a Paged arena will cause the currently commited memory to be decommited (i.e. unmapped from physical memory).
func Paged(page_size, total_reserved_in_bytes uintptr) Arena {
var (
committed uintptr
offset uintptr
)
base, err := mem.Reserve(total_reserved_in_bytes)
if err != nil {
panic(fmt.Sprintf("paged: failed to reserve address space - %s", err))
}
// @todo(judah): is this needed?
runtime.AddCleanup(&base, func(_ struct{}) {
if err := mem.Release(base); err != nil {
panic(fmt.Sprintf("paged: failed to release memory - %s", err))
}
}, struct{}{})
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
switch a {
case ACTION_ALLOC:
aligned := mem.AlignForward(size, align)
if offset+aligned > total_reserved_in_bytes {
return nil, errors.New("paged: out of addressable memory")
}
if offset+aligned > committed {
required := offset + aligned
to_commit := mem.AlignForward(required, page_size)
if err := mem.Commit(base[committed:to_commit], mem.AccessRead|mem.AccessWrite); err != nil {
return nil, fmt.Errorf("paged: failed to commit memory - %w", err)
}
committed = to_commit
}
ptr := &base[offset]
offset += aligned
return unsafe.Pointer(ptr), nil
case ACTION_RESET:
if committed > 0 {
if err := mem.Decommit(base[:mem.AlignForward(committed, page_size)]); err != nil {
return nil, fmt.Errorf("paged: failed to decommit memory - %w", err)
}
}
offset = 0
committed = 0
return nil, nil
// @todo(judah): should save/restore also decommit memory?
case ACTION_SAVE:
if watermark == nil {
return nil, errors.New("paged: cannot save to nil watermark")
}
*watermark = offset
return nil, nil
case ACTION_RESTORE:
if watermark == nil {
return nil, errors.New("paged: cannot restore nil watermark")
}
clear(base[*watermark:offset])
offset = *watermark
return nil, nil
default:
panic("paged: unimplemented action - " + a.String())
}
}
}
// Nil is an Arena that always returns an error.
//
// Note: This is useful for tracking usage locations
func Nil() Arena {
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
return nil, errors.New("use of nil allocator")
}
}
// Region wraps an Arena, restoring it to its previous state when Reset is called.
func Region(arena Arena) Arena {
watermark := Save(arena)
return func(a Action, size, align uintptr, wm *uintptr) (unsafe.Pointer, error) {
if a == ACTION_RESET {
Restore(arena, watermark)
return nil, nil
}
return arena(a, size, align, wm)
}
}
// Split wraps two [[Arena]]s, dispatching allocations to a particular one based on the requested size.
func Split(split_size uintptr, smaller, larger Arena) Arena {
var watermarks [2]uintptr
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
switch a {
case ACTION_ALLOC:
if size <= split_size {
return smaller(a, size, align, watermark)
}
return larger(a, size, align, watermark)
case ACTION_RESET:
Reset(smaller)
Reset(larger)
case ACTION_SAVE:
watermarks[0] = Save(smaller)
watermarks[1] = Save(larger)
case ACTION_RESTORE:
Restore(smaller, watermarks[0])
Restore(larger, watermarks[1])
default:
panic("split: unimplemented action - " + a.String())
}
return nil, nil
}
}
// Logger wraps an Arena, logging its usage locations.
func Logger(arena Arena) Arena {
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
// We expect allocators to be used via the high-level API, so we grab the caller location relative to that.
// @todo(judah): can we determine this dynamically?
_, file, line, ok := runtime.Caller(2)
if !ok {
file = "<unknown>"
line = 0
}
log.Printf("%s:%d - %s (size: %d, align: %d, watermark: %p)", file, line, a, size, align, watermark)
return arena(a, size, align, watermark)
}
}
// Concurrent wraps an Arena, ensuring it is safe for concurrent use.
func Concurrent(arena Arena) Arena {
mtx := new(sync.Mutex)
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
mtx.Lock()
ptr, err := arena(a, size, align, watermark)
mtx.Unlock()
return ptr, err
}
}
// Pinned wraps an Arena, ensuring the memory returned is stable until Reset is called.
//
// The memory returned by Pinned is safe to pass over cgo boundaries.
func Pinned(arena Arena) Arena {
var pinner runtime.Pinner
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
ptr, err := arena(a, size, align, watermark)
if err != nil {
return ptr, err
}
if a == ACTION_RESET {
pinner.Unpin()
} else {
pinner.Pin(ptr)
}
return ptr, err
}
}