arena: improvements

This commit is contained in:
Judah Caruso 2025-12-14 14:13:51 -07:00
parent f19a2ad302
commit 150a982f78
3 changed files with 141 additions and 56 deletions

View file

@ -2,8 +2,8 @@ package arena
import (
"errors"
"fmt"
"log"
"math/bits"
"runtime"
"sync"
"unsafe"
@ -12,21 +12,21 @@ import (
)
// Linear is a simple bump allocator with a fixed amount of backing memory.
func Linear(max_size uintptr) Arena {
if max_size <= 0 {
panic("linear: max_size must be greater than zero")
func Linear(capacity_in_bytes uintptr) Arena {
if capacity_in_bytes <= 0 {
panic("linear: capacity_in_bytes must be greater than zero")
}
var (
data = make([]byte, max_size)
data = make([]byte, capacity_in_bytes)
offset uintptr
)
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
switch a {
case ACTION_ALLOC:
aligned := mem.AlignForward(size, align)
if offset+aligned > max_size {
return nil, fmt.Errorf("linear: out of memory - %d bytes requested, %d bytes free", size, max_size-offset)
if offset+aligned > capacity_in_bytes {
return nil, errors.New("linear: out of memory")
}
ptr := &data[offset]
@ -36,8 +36,17 @@ func Linear(max_size uintptr) Arena {
clear(data)
offset = 0
case ACTION_SAVE:
if watermark == nil {
return nil, errors.New("linear: cannot save to nil watermark")
}
*watermark = offset
case ACTION_RESTORE:
if watermark == nil {
return nil, errors.New("linear: cannot restore nil watermark")
}
clear(data[*watermark:offset])
offset = *watermark
default:
panic("linear: unimplemented action - " + a.String())
@ -47,26 +56,39 @@ func Linear(max_size uintptr) Arena {
}
}
// Pool is an Arena that only allocates values of the given type.
// Ring is an Arena that only allocates values of the given type.
// When capacity is exceeded, previous allocations will be reused to accommodate new ones
//
// Note: Allocating different types from the same Pool is unsafe and may cause memory corruption.
func Pool[T any](base_capacity uintptr) Arena {
if base_capacity <= 0 {
panic("pool: base_capacity must be greater than zero")
func Ring[T any](capacity uintptr) Arena {
if capacity <= 0 {
panic("pool: capacity must be greater than zero")
}
pointers := make([]T, 0, base_capacity)
pointers := make([]T, 0, capacity)
return func(a Action, _, _ uintptr, watermark *uintptr) (unsafe.Pointer, error) {
switch a {
case ACTION_ALLOC:
if len(pointers) == cap(pointers) {
pointers = pointers[:0]
}
pointers = append(pointers, mem.ZeroValue[T]())
return unsafe.Pointer(&pointers[len(pointers)-1]), nil
case ACTION_RESET:
clear(pointers)
pointers = pointers[:0]
case ACTION_SAVE:
if watermark == nil {
return nil, errors.New("pool: cannot save to nil watermark")
}
*watermark = uintptr(len(pointers))
case ACTION_RESTORE:
if watermark == nil {
return nil, errors.New("pool: cannot restore nil watermark")
}
clear(pointers[*watermark:])
pointers = pointers[:*watermark]
default:
@ -78,69 +100,67 @@ func Pool[T any](base_capacity uintptr) Arena {
}
// Chunked is an Arena that groups allocations by size.
func Chunked(chunk_size uintptr) Arena {
if chunk_size <= 0 {
panic("chunked: chunk_size must be greater than zero")
}
// @todo(judah): this can be drastically improved.
func Chunked(max_allocs_per_chunk uintptr) Arena {
type chunk struct {
data []byte
offset uintptr
saved uintptr
}
groups := make(map[uintptr][]chunk)
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
groups := make([][]chunk, 64)
return func(a Action, size, align uintptr, _ *uintptr) (unsafe.Pointer, error) {
switch a {
case ACTION_ALLOC:
aligned := mem.AlignForward(size, align)
group, ok := groups[aligned]
if !ok {
group = make([]chunk, 0, 16)
group = append(group, chunk{
data: make([]byte, chunk_size),
})
if aligned == 0 {
aligned = 1
}
aligned = 1 << bits.Len(uint(aligned-1))
groups[aligned] = group
idx := bits.TrailingZeros(uint(aligned))
if idx >= len(groups) {
groups = append(groups, make([][]chunk, idx-len(groups)+1)...)
}
group := groups[idx]
if len(group) == 0 {
group = append(group, chunk{
data: make([]byte, aligned*max_allocs_per_chunk),
})
}
c := &group[len(group)-1]
if c.offset+aligned > chunk_size {
if c.offset+aligned > uintptr(len(c.data)) {
group = append(group, chunk{
data: make([]byte, chunk_size),
data: make([]byte, aligned*max_allocs_per_chunk),
})
c = &group[len(group)-1]
groups[aligned] = group
}
ptr := &c.data[c.offset]
c.offset += aligned
groups[idx] = group
return unsafe.Pointer(ptr), nil
case ACTION_RESET:
for _, g := range groups {
for i := range len(g) {
c := &g[i]
c.offset = 0
c.saved = 0
clear(c.data)
g[i].offset = 0
g[i].saved = 0
clear(g[i].data)
}
}
case ACTION_SAVE:
for _, g := range groups {
for i := range len(g) {
c := &g[i]
c.saved = c.offset
g[i].saved = g[i].offset
}
}
case ACTION_RESTORE:
for _, g := range groups {
for i := range len(g) {
c := &g[i]
c.offset = c.saved
g[i].offset = g[i].saved
}
}
default:

View file

@ -2,7 +2,6 @@ package arena_test
import (
"errors"
"fmt"
"runtime"
"testing"
"unsafe"
@ -27,7 +26,7 @@ func BenchmarkAlloc_New_Small(b *testing.B) {
}
func BenchmarkAlloc_Closure_Small(b *testing.B) {
alloc := arena.Pool[int](16)
alloc := arena.Ring[int](16)
var last *int
for i := range b.N {
@ -48,12 +47,12 @@ func BenchmarkAlloc_Interface_Small(b *testing.B) {
var last *int
for i := range b.N {
v := New[int](&alloc)
v := New[int](alloc)
*v = i
last = v
if i%1000 == 0 {
Reset(&alloc)
Reset(alloc)
}
}
@ -99,12 +98,12 @@ func BenchmarkAlloc_Interface_Large(b *testing.B) {
var last *large
for i := range b.N {
v := New[large](&alloc)
v := New[large](alloc)
v.e = i
last = v
if i%1000 == 0 {
Reset(&alloc)
Reset(alloc)
}
}
@ -112,7 +111,7 @@ func BenchmarkAlloc_Interface_Large(b *testing.B) {
}
func BenchmarkAlloc_Closure_HotPath(b *testing.B) {
alloc := arena.Chunked(1 * mem.Kilobyte)
alloc := arena.Linear(256)
var (
lastlarge *large
@ -133,7 +132,7 @@ func BenchmarkAlloc_Closure_HotPath(b *testing.B) {
}
func BenchmarkAlloc_Interface_HotPath(b *testing.B) {
alloc := NewLinear(8 * mem.Kilobyte)
alloc := NewLinear(256)
var (
lastlarge *large
@ -141,12 +140,54 @@ func BenchmarkAlloc_Interface_HotPath(b *testing.B) {
)
for i := range b.N {
if i%2 == 0 {
lastsmall = New[int](&alloc)
lastsmall = New[int](alloc)
} else {
lastlarge = New[large](&alloc)
lastlarge = New[large](alloc)
}
Reset(&alloc)
Reset(alloc)
}
runtime.KeepAlive(lastlarge)
runtime.KeepAlive(lastsmall)
}
func BenchmarkAlloc_Closure_Wrapped(b *testing.B) {
alloc := arena.Pinned(arena.Pinned(arena.Pinned(arena.Linear(256))))
var (
lastlarge *large
lastsmall *int
)
for i := range b.N {
if i%2 == 0 {
lastsmall = arena.New[int](alloc)
} else {
lastlarge = arena.New[large](alloc)
}
arena.Reset(alloc)
}
runtime.KeepAlive(lastlarge)
runtime.KeepAlive(lastsmall)
}
func BenchmarkAlloc_Interface_Wrapped(b *testing.B) {
alloc := NewPinned(NewPinned(NewPinned(NewLinear(256))))
var (
lastlarge *large
lastsmall *int
)
for i := range b.N {
if i%2 == 0 {
lastsmall = New[int](alloc)
} else {
lastlarge = New[large](alloc)
}
Reset(alloc)
}
runtime.KeepAlive(lastlarge)
@ -178,8 +219,8 @@ type Linear struct {
offset uintptr
}
func NewLinear(maxsize uintptr) Linear {
return Linear{
func NewLinear(maxsize uintptr) *Linear {
return &Linear{
data: make([]byte, maxsize),
maxsize: maxsize,
}
@ -190,7 +231,7 @@ func (l *Linear) Proc(a arena.Action, size, align uintptr, watermark *uintptr) (
case arena.ACTION_ALLOC:
aligned := mem.AlignForward(size, align)
if l.offset+aligned > l.maxsize {
return nil, errors.New(fmt.Sprintf("Linear: out of memory - %d bytes requested, (%d/%d) bytes available", size, l.maxsize-l.offset, l.maxsize))
return nil, errors.New("linear: out of memory")
}
ptr := &l.data[l.offset]
@ -212,3 +253,27 @@ func (l *Linear) Proc(a arena.Action, size, align uintptr, watermark *uintptr) (
return nil, nil
}
type Pinned struct {
arena Allocator
pinner runtime.Pinner
}
func NewPinned(arena Allocator) *Pinned {
return &Pinned{arena: arena}
}
func (p *Pinned) Proc(a arena.Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
ptr, err := p.arena.Proc(a, size, align, watermark)
if err != nil {
return ptr, err
}
if a == arena.ACTION_RESET {
p.pinner.Unpin()
} else {
p.pinner.Pin(ptr)
}
return ptr, err
}

View file

@ -27,7 +27,7 @@ func TestArenas_ThatShouldClearAfterReset(t *testing.T) {
arenas := []arena.Arena{
arena.Linear(16),
arena.Chunked(16),
arena.Pool[uint16](2),
arena.Ring[uint16](2),
}
for _, a := range arenas {
@ -45,7 +45,7 @@ func TestArenas_ThatShouldReuseMemoryAfterReset(t *testing.T) {
arenas := []arena.Arena{
arena.Linear(16),
arena.Chunked(16),
arena.Pool[uint16](2),
arena.Ring[uint16](2),
}
for _, a := range arenas {
@ -66,7 +66,7 @@ func TestArenas_WithRegion(t *testing.T) {
arenas := []arena.Arena{
arena.Linear(256),
arena.Chunked(256),
arena.Pool[uint16](16),
arena.Ring[uint16](16),
}
var baseptrs []*uint16