arena: tests + improvements
This commit is contained in:
parent
843730e11b
commit
e2cec86c39
6 changed files with 346 additions and 156 deletions
|
|
@ -1,78 +0,0 @@
|
|||
package alloc
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
// New returns a pointer to an Allocator allocated value of type T.
|
||||
//
|
||||
// Note: If allocation fails, New will panic.
|
||||
func New[T any](alloc Allocator) *T {
|
||||
ptr, err := alloc(ActionAlloc, mem.SizeOf[T](), mem.AlignOf[T](), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return (*T)(ptr)
|
||||
}
|
||||
|
||||
// Reset restores an Allocator to its initial state.
|
||||
//
|
||||
// Note: Use of memory allocated by an Allocator after calling Reset is unsafe.
|
||||
func Reset(alloc Allocator) {
|
||||
if _, err := alloc(ActionReset, 0, 0, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Save returns the current state of an Allocator.
|
||||
//
|
||||
// Note: The value returned is internal to the particular Allocator Save was called on.
|
||||
// The value should not be modified.
|
||||
func Save(alloc Allocator) (watermark uintptr) {
|
||||
if _, err := alloc(ActionSave, 0, 0, &watermark); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Restore restores an Allocator to a previously saved state.
|
||||
func Restore(alloc Allocator, watermark uintptr) {
|
||||
if _, err := alloc(ActionRestore, 0, 0, &watermark); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Allocator represents a memory allocator.
|
||||
type Allocator func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error)
|
||||
|
||||
// Action is a list of distinct events an Allocator may respond to.
|
||||
type Action int
|
||||
|
||||
const (
|
||||
ActionAlloc Action = iota
|
||||
ActionReset
|
||||
ActionSave
|
||||
ActionRestore
|
||||
ActionReport
|
||||
)
|
||||
|
||||
func (a Action) String() string {
|
||||
switch a {
|
||||
case ActionAlloc:
|
||||
return "Alloc"
|
||||
case ActionReset:
|
||||
return "Reset"
|
||||
case ActionSave:
|
||||
return "Save"
|
||||
case ActionRestore:
|
||||
return "Restore"
|
||||
case ActionReport:
|
||||
return "Report"
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
87
arena/arena.go
Normal file
87
arena/arena.go
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
package arena
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
// New returns a pointer to an Arena allocated value of type T.
|
||||
// If allocation fails, New will panic.
|
||||
//
|
||||
// Note: Accessing the returned value after calling Reset is unsafe and may result in a fault.
|
||||
func New[T any](arena Arena) *T {
|
||||
ptr, err := arena(ACTION_ALLOC, mem.Sizeof[T](), mem.Alignof[T](), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return (*T)(ptr)
|
||||
}
|
||||
|
||||
// MakeSlice creates an Arena allocated []T with the given capacity and length.
|
||||
// If allocation fails, MakeSlice will panic.
|
||||
//
|
||||
// Note: Accessing the returned slice after calling Reset is unsafe and may result in a fault.
|
||||
func MakeSlice[T any](arena Arena, len, cap int) []T {
|
||||
ptr, err := arena(ACTION_ALLOC, mem.Sizeof[T]()*uintptr(len), mem.Alignof[T](), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return unsafe.Slice((*T)(ptr), cap)[:len]
|
||||
}
|
||||
|
||||
// Reset restores an Arena to its initial state.
|
||||
//
|
||||
// Note: Accessing memory returned by an Arena after calling Reset is unsafe and may result in a fault.
|
||||
func Reset(arena Arena) {
|
||||
if _, err := arena(ACTION_RESET, 0, 0, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Save returns the restorable state of an Arena.
|
||||
// The returned value is internal to the particular Arena and should not be modified.
|
||||
func Save(arena Arena) (watermark uintptr) {
|
||||
if _, err := arena(ACTION_SAVE, 0, 0, &watermark); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Restore restores an Arena to a previously saved state.
|
||||
func Restore(arena Arena, watermark uintptr) {
|
||||
if _, err := arena(ACTION_RESTORE, 0, 0, &watermark); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Arena represents a memory allocator.
|
||||
type Arena func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error)
|
||||
|
||||
// Action is a list of distinct events an Arena may respond to.
|
||||
type Action int
|
||||
|
||||
const (
|
||||
ACTION_ALLOC Action = iota
|
||||
ACTION_RESET
|
||||
ACTION_SAVE
|
||||
ACTION_RESTORE
|
||||
)
|
||||
|
||||
func (a Action) String() string {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
return "ALLOC"
|
||||
case ACTION_RESET:
|
||||
return "RESET"
|
||||
case ACTION_SAVE:
|
||||
return "SAVE"
|
||||
case ACTION_RESTORE:
|
||||
return "RESTORE"
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
30
arena/arena_test.go
Normal file
30
arena/arena_test.go
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
package arena_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.brut.systems/judah/xx/arena"
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
"git.brut.systems/judah/xx/testx"
|
||||
)
|
||||
|
||||
func TestMakeSlice(t *testing.T) {
|
||||
a := arena.Linear(1024 * mem.Kilobyte)
|
||||
defer arena.Reset(a)
|
||||
|
||||
s := arena.MakeSlice[int](a, 99, 100)
|
||||
testx.Expect(t, len(s) == 99, "len = %d, expected 99", len(s))
|
||||
testx.Expect(t, cap(s) == 100, "cap = %d, expected 100", cap(s))
|
||||
|
||||
p := &s[0]
|
||||
|
||||
s[2] = 0xCAFE_DECAF
|
||||
s = append(s, 2)
|
||||
|
||||
testx.Expect(t, p == &s[0], "p = %p, expected %p", p, &s[0])
|
||||
|
||||
p = &s[0]
|
||||
s = append(s, 3) // cause a reallocation
|
||||
|
||||
testx.Expect(t, p != &s[0], "p = %p, expected %p", p, &s[0])
|
||||
}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package alloc
|
||||
package arena
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
|
@ -12,8 +12,8 @@ import (
|
|||
)
|
||||
|
||||
// Linear is a simple bump allocator with a fixed amount of backing memory.
|
||||
func Linear(max_size uintptr) Allocator {
|
||||
if max_size == 0 {
|
||||
func Linear(max_size uintptr) Arena {
|
||||
if max_size <= 0 {
|
||||
panic("linear: max_size must be greater than zero")
|
||||
}
|
||||
|
||||
|
|
@ -23,7 +23,7 @@ func Linear(max_size uintptr) Allocator {
|
|||
)
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ActionAlloc:
|
||||
case ACTION_ALLOC:
|
||||
aligned := mem.AlignForward(size, align)
|
||||
if offset+aligned > max_size {
|
||||
return nil, fmt.Errorf("linear: out of memory - %d bytes requested, %d bytes free", size, max_size-offset)
|
||||
|
|
@ -33,13 +33,13 @@ func Linear(max_size uintptr) Allocator {
|
|||
offset += aligned
|
||||
return unsafe.Pointer(ptr), nil
|
||||
|
||||
case ActionReset:
|
||||
case ACTION_RESET:
|
||||
clear(data)
|
||||
offset = 0
|
||||
|
||||
case ActionSave:
|
||||
case ACTION_SAVE:
|
||||
*watermark = offset
|
||||
case ActionRestore:
|
||||
case ACTION_RESTORE:
|
||||
offset = *watermark
|
||||
|
||||
default:
|
||||
|
|
@ -50,29 +50,29 @@ func Linear(max_size uintptr) Allocator {
|
|||
}
|
||||
}
|
||||
|
||||
// Pool is an Allocator that only allocates values of a single type.
|
||||
// Pool is an Arena that only allocates values of the given type.
|
||||
//
|
||||
// Note: Allocating different types from the same Pool is unsafe and may cause memory corruption.
|
||||
func Pool[T any](base_capacity uintptr) Allocator {
|
||||
if base_capacity == 0 {
|
||||
func Pool[T any](base_capacity uintptr) Arena {
|
||||
if base_capacity <= 0 {
|
||||
panic("pool: base_capacity must be greater than zero")
|
||||
}
|
||||
|
||||
pointers := make([]T, 0, base_capacity)
|
||||
return func(a Action, _, _ uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ActionAlloc:
|
||||
case ACTION_ALLOC:
|
||||
pointers = append(pointers, mem.ZeroValue[T]())
|
||||
return unsafe.Pointer(&pointers[len(pointers)-1]), nil
|
||||
|
||||
case ActionReset:
|
||||
case ACTION_RESET:
|
||||
clear(pointers)
|
||||
pointers = pointers[:0]
|
||||
|
||||
case ActionSave:
|
||||
case ACTION_SAVE:
|
||||
*watermark = uintptr(len(pointers))
|
||||
|
||||
case ActionRestore:
|
||||
case ACTION_RESTORE:
|
||||
clear(pointers[*watermark:])
|
||||
pointers = pointers[:*watermark]
|
||||
|
||||
|
|
@ -83,28 +83,28 @@ func Pool[T any](base_capacity uintptr) Allocator {
|
|||
}
|
||||
}
|
||||
|
||||
// Chunked is an Allocator that groups allocations by size.
|
||||
func Chunked(chunk_size uintptr) Allocator {
|
||||
if chunk_size == 0 {
|
||||
// Chunked is an Arena that groups allocations by size.
|
||||
func Chunked(chunk_size uintptr) Arena {
|
||||
if chunk_size <= 0 {
|
||||
panic("chunked: chunk_size must be greater than zero")
|
||||
}
|
||||
|
||||
type chunk struct {
|
||||
data []byte
|
||||
offset uintptr
|
||||
saved uintptr
|
||||
}
|
||||
|
||||
groups := make(map[uintptr][]chunk)
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ActionAlloc:
|
||||
case ACTION_ALLOC:
|
||||
aligned := mem.AlignForward(size, align)
|
||||
group, ok := groups[aligned]
|
||||
if !ok {
|
||||
group = make([]chunk, 0, 16)
|
||||
group = append(group, chunk{
|
||||
data: make([]byte, chunk_size),
|
||||
offset: 0,
|
||||
})
|
||||
|
||||
groups[aligned] = group
|
||||
|
|
@ -114,7 +114,6 @@ func Chunked(chunk_size uintptr) Allocator {
|
|||
if c.offset+aligned > chunk_size {
|
||||
group = append(group, chunk{
|
||||
data: make([]byte, chunk_size),
|
||||
offset: 0,
|
||||
})
|
||||
|
||||
c = &group[len(group)-1]
|
||||
|
|
@ -126,17 +125,31 @@ func Chunked(chunk_size uintptr) Allocator {
|
|||
|
||||
return unsafe.Pointer(ptr), nil
|
||||
|
||||
case ActionReset:
|
||||
case ACTION_RESET:
|
||||
for _, g := range groups {
|
||||
for i := range len(g) {
|
||||
c := &g[i]
|
||||
c.offset = 0
|
||||
c.saved = 0
|
||||
clear(c.data)
|
||||
}
|
||||
}
|
||||
|
||||
case ActionSave:
|
||||
case ActionRestore:
|
||||
case ACTION_SAVE:
|
||||
for _, g := range groups {
|
||||
for i := range len(g) {
|
||||
c := &g[i]
|
||||
c.saved = c.offset
|
||||
}
|
||||
}
|
||||
|
||||
case ACTION_RESTORE:
|
||||
for _, g := range groups {
|
||||
for i := range len(g) {
|
||||
c := &g[i]
|
||||
c.offset = c.saved
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
panic("unimplemented action: " + a.String())
|
||||
|
|
@ -146,40 +159,56 @@ func Chunked(chunk_size uintptr) Allocator {
|
|||
}
|
||||
}
|
||||
|
||||
// Nil is an Allocator that always returns an error.
|
||||
// Nil is an Arena that always returns an error.
|
||||
//
|
||||
// Note: This is useful for tracking usage locations
|
||||
func Nil() Allocator {
|
||||
func Nil() Arena {
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
return nil, errors.New("use of nil allocator")
|
||||
}
|
||||
}
|
||||
|
||||
// Temporary wraps an Allocator, restoring it to its previous state when Reset is called.
|
||||
func Temporary(alloc Allocator) Allocator {
|
||||
watermark := Save(alloc)
|
||||
// Region wraps an Arena, restoring it to its previous state when Reset is called.
|
||||
func Region(arena Arena) Arena {
|
||||
watermark := Save(arena)
|
||||
return func(a Action, size, align uintptr, wm *uintptr) (unsafe.Pointer, error) {
|
||||
if a == ActionReset {
|
||||
Restore(alloc, watermark)
|
||||
if a == ACTION_RESET {
|
||||
Restore(arena, watermark)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return alloc(a, size, align, wm)
|
||||
return arena(a, size, align, wm)
|
||||
}
|
||||
}
|
||||
|
||||
// Split wraps two [[Allocator]]s, dispatching actions based on the size of the allocation.
|
||||
func Split(split_size uintptr, smaller, larger Allocator) Allocator {
|
||||
// Split wraps two [[Arena]]s, dispatching allocations to a particular one based on the requested size.
|
||||
func Split(split_size uintptr, smaller, larger Arena) Arena {
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case ACTION_ALLOC:
|
||||
if size <= split_size {
|
||||
return smaller(a, size, align, watermark)
|
||||
}
|
||||
return larger(a, size, align, watermark)
|
||||
|
||||
case ACTION_RESET:
|
||||
Reset(smaller)
|
||||
Reset(larger)
|
||||
|
||||
case ACTION_SAVE:
|
||||
panic("split: saving is not supported")
|
||||
case ACTION_RESTORE:
|
||||
panic("split: restoring is not supported")
|
||||
|
||||
default:
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Logger wraps an Allocator, logging its usage locations.
|
||||
func Logger(alloc Allocator) Allocator {
|
||||
// Logger wraps an Arena, logging its usage locations.
|
||||
func Logger(arena Arena) Arena {
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
// We expect allocators to be used via the high-level API, so we grab the caller location relative to that.
|
||||
// @todo(judah): can we determine this dynamically?
|
||||
|
|
@ -190,33 +219,33 @@ func Logger(alloc Allocator) Allocator {
|
|||
}
|
||||
|
||||
log.Printf("%s:%d - %s (size: %d, align: %d, watermark: %p)", file, line, a, size, align, watermark)
|
||||
return alloc(a, size, align, watermark)
|
||||
return arena(a, size, align, watermark)
|
||||
}
|
||||
}
|
||||
|
||||
// Concurrent wraps an Allocator, ensuring it is safe for concurrent use.
|
||||
func Concurrent(alloc Allocator) Allocator {
|
||||
// Concurrent wraps an Arena, ensuring it is safe for concurrent use.
|
||||
func Concurrent(arena Arena) Arena {
|
||||
mtx := new(sync.Mutex)
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
mtx.Lock()
|
||||
ptr, err := alloc(a, size, align, watermark)
|
||||
ptr, err := arena(a, size, align, watermark)
|
||||
mtx.Unlock()
|
||||
return ptr, err
|
||||
}
|
||||
}
|
||||
|
||||
// Pinned wraps an Allocator, ensuring the memory returned is stable until Reset is called.
|
||||
// Pinned wraps an Arena, ensuring the memory returned is stable until Reset is called.
|
||||
//
|
||||
// The memory returned by Pinned is safe to pass over cgo boundaries.
|
||||
func Pinned(alloc Allocator) Allocator {
|
||||
func Pinned(arena Arena) Arena {
|
||||
var pinner runtime.Pinner
|
||||
return func(a Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
ptr, err := alloc(a, size, align, watermark)
|
||||
ptr, err := arena(a, size, align, watermark)
|
||||
if err != nil {
|
||||
return ptr, err
|
||||
}
|
||||
|
||||
if a == ActionReset {
|
||||
if a == ACTION_RESET {
|
||||
pinner.Unpin()
|
||||
} else {
|
||||
pinner.Pin(ptr)
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package alloc_test
|
||||
package arena_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
|
@ -7,7 +7,7 @@ import (
|
|||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/alloc"
|
||||
"git.brut.systems/judah/xx/arena"
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
)
|
||||
|
||||
|
|
@ -27,16 +27,16 @@ func BenchmarkAlloc_New_Small(b *testing.B) {
|
|||
}
|
||||
|
||||
func BenchmarkAlloc_Closure_Small(b *testing.B) {
|
||||
allocator := alloc.Pool[int](16)
|
||||
alloc := arena.Pool[int](16)
|
||||
|
||||
var last *int
|
||||
for i := range b.N {
|
||||
v := alloc.New[int](allocator)
|
||||
v := arena.New[int](alloc)
|
||||
*v = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
alloc.Reset(allocator)
|
||||
arena.Reset(alloc)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -44,16 +44,16 @@ func BenchmarkAlloc_Closure_Small(b *testing.B) {
|
|||
}
|
||||
|
||||
func BenchmarkAlloc_Interface_Small(b *testing.B) {
|
||||
allocator := NewLinear(16 * mem.Kilobyte)
|
||||
alloc := NewLinear(16 * mem.Kilobyte)
|
||||
|
||||
var last *int
|
||||
for i := range b.N {
|
||||
v := New[int](&allocator)
|
||||
v := New[int](&alloc)
|
||||
*v = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
Reset(&allocator)
|
||||
Reset(&alloc)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -78,16 +78,16 @@ func BenchmarkAlloc_New_Large(b *testing.B) {
|
|||
}
|
||||
|
||||
func BenchmarkAlloc_Closure_Large(b *testing.B) {
|
||||
allocator := alloc.Linear(128 * mem.Kilobyte)
|
||||
alloc := arena.Linear(128 * mem.Kilobyte)
|
||||
|
||||
var last *large
|
||||
for i := range b.N {
|
||||
v := alloc.New[large](allocator)
|
||||
v := arena.New[large](alloc)
|
||||
v.e = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
alloc.Reset(allocator)
|
||||
arena.Reset(alloc)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -95,16 +95,16 @@ func BenchmarkAlloc_Closure_Large(b *testing.B) {
|
|||
}
|
||||
|
||||
func BenchmarkAlloc_Interface_Large(b *testing.B) {
|
||||
allocator := NewLinear(128 * mem.Kilobyte)
|
||||
alloc := NewLinear(128 * mem.Kilobyte)
|
||||
|
||||
var last *large
|
||||
for i := range b.N {
|
||||
v := New[large](&allocator)
|
||||
v := New[large](&alloc)
|
||||
v.e = i
|
||||
last = v
|
||||
|
||||
if i%1000 == 0 {
|
||||
Reset(&allocator)
|
||||
Reset(&alloc)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -112,7 +112,7 @@ func BenchmarkAlloc_Interface_Large(b *testing.B) {
|
|||
}
|
||||
|
||||
func BenchmarkAlloc_Closure_HotPath(b *testing.B) {
|
||||
allocator := alloc.Chunked(1 * mem.Kilobyte)
|
||||
alloc := arena.Chunked(1 * mem.Kilobyte)
|
||||
|
||||
var (
|
||||
lastlarge *large
|
||||
|
|
@ -120,12 +120,12 @@ func BenchmarkAlloc_Closure_HotPath(b *testing.B) {
|
|||
)
|
||||
for i := range b.N {
|
||||
if i%2 == 0 {
|
||||
lastsmall = alloc.New[int](allocator)
|
||||
lastsmall = arena.New[int](alloc)
|
||||
} else {
|
||||
lastlarge = alloc.New[large](allocator)
|
||||
lastlarge = arena.New[large](alloc)
|
||||
}
|
||||
|
||||
alloc.Reset(allocator)
|
||||
arena.Reset(alloc)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(lastlarge)
|
||||
|
|
@ -133,7 +133,7 @@ func BenchmarkAlloc_Closure_HotPath(b *testing.B) {
|
|||
}
|
||||
|
||||
func BenchmarkAlloc_Interface_HotPath(b *testing.B) {
|
||||
allocator := NewLinear(8 * mem.Kilobyte)
|
||||
alloc := NewLinear(8 * mem.Kilobyte)
|
||||
|
||||
var (
|
||||
lastlarge *large
|
||||
|
|
@ -141,12 +141,12 @@ func BenchmarkAlloc_Interface_HotPath(b *testing.B) {
|
|||
)
|
||||
for i := range b.N {
|
||||
if i%2 == 0 {
|
||||
lastsmall = New[int](&allocator)
|
||||
lastsmall = New[int](&alloc)
|
||||
} else {
|
||||
lastlarge = New[large](&allocator)
|
||||
lastlarge = New[large](&alloc)
|
||||
}
|
||||
|
||||
Reset(&allocator)
|
||||
Reset(&alloc)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(lastlarge)
|
||||
|
|
@ -154,11 +154,11 @@ func BenchmarkAlloc_Interface_HotPath(b *testing.B) {
|
|||
}
|
||||
|
||||
type Allocator interface {
|
||||
Proc(a alloc.Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error)
|
||||
Proc(a arena.Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error)
|
||||
}
|
||||
|
||||
func New[T any](a Allocator) *T {
|
||||
ptr, err := a.Proc(alloc.ActionAlloc, mem.SizeOf[T](), mem.AlignOf[T](), nil)
|
||||
ptr, err := a.Proc(arena.ACTION_ALLOC, mem.Sizeof[T](), mem.Alignof[T](), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
@ -167,7 +167,7 @@ func New[T any](a Allocator) *T {
|
|||
}
|
||||
|
||||
func Reset(a Allocator) {
|
||||
if _, err := a.Proc(alloc.ActionReset, 0, 0, nil); err != nil {
|
||||
if _, err := a.Proc(arena.ACTION_RESET, 0, 0, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
@ -185,9 +185,9 @@ func NewLinear(maxsize uintptr) Linear {
|
|||
}
|
||||
}
|
||||
|
||||
func (l *Linear) Proc(a alloc.Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
func (l *Linear) Proc(a arena.Action, size, align uintptr, watermark *uintptr) (unsafe.Pointer, error) {
|
||||
switch a {
|
||||
case alloc.ActionAlloc:
|
||||
case arena.ACTION_ALLOC:
|
||||
aligned := mem.AlignForward(size, align)
|
||||
if l.offset+aligned > l.maxsize {
|
||||
return nil, errors.New(fmt.Sprintf("Linear: out of memory - %d bytes requested, (%d/%d) bytes available", size, l.maxsize-l.offset, l.maxsize))
|
||||
|
|
@ -197,13 +197,13 @@ func (l *Linear) Proc(a alloc.Action, size, align uintptr, watermark *uintptr) (
|
|||
l.offset += aligned
|
||||
return unsafe.Pointer(ptr), nil
|
||||
|
||||
case alloc.ActionReset:
|
||||
case arena.ACTION_RESET:
|
||||
clear(l.data)
|
||||
l.offset = 0
|
||||
|
||||
case alloc.ActionSave:
|
||||
case arena.ACTION_SAVE:
|
||||
*watermark = l.offset
|
||||
case alloc.ActionRestore:
|
||||
case arena.ACTION_RESTORE:
|
||||
l.offset = *watermark
|
||||
|
||||
default:
|
||||
122
arena/arenas_test.go
Normal file
122
arena/arenas_test.go
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
package arena_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"git.brut.systems/judah/xx/arena"
|
||||
"git.brut.systems/judah/xx/mem"
|
||||
"git.brut.systems/judah/xx/testx"
|
||||
)
|
||||
|
||||
func TestArenas_ThatShouldPanicWhenOOM(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(1),
|
||||
arena.Nil(),
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
testx.ShouldPanic(t, func() {
|
||||
_ = arena.New[int](a)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestArenas_ThatShouldClearAfterReset(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(16),
|
||||
arena.Chunked(16),
|
||||
arena.Pool[uint16](2),
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
x := arena.New[uint16](a)
|
||||
y := arena.New[uint16](a)
|
||||
*x, *y = 100, 200
|
||||
arena.Reset(a)
|
||||
|
||||
testx.Expect(t, *x == 0, "x = %d, expected 0", *x)
|
||||
testx.Expect(t, *y == 0, "y = %d, expected 0", *y)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArenas_ThatShouldReuseMemoryAfterReset(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(16),
|
||||
arena.Chunked(16),
|
||||
arena.Pool[uint16](2),
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
x1 := arena.New[uint16](a)
|
||||
y1 := arena.New[uint16](a)
|
||||
|
||||
arena.Reset(a)
|
||||
|
||||
x2 := arena.New[uint16](a)
|
||||
y2 := arena.New[uint16](a)
|
||||
|
||||
testx.Expect(t, x1 == x2, "x1 = %p, x2 = %p", x1, x2)
|
||||
testx.Expect(t, y1 == y2, "y1 = %p, y2 = %p", y1, y2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArenas_WithRegion(t *testing.T) {
|
||||
arenas := []arena.Arena{
|
||||
arena.Linear(256),
|
||||
arena.Chunked(256),
|
||||
arena.Pool[uint16](16),
|
||||
}
|
||||
|
||||
var baseptrs []*uint16
|
||||
for i, a := range arenas {
|
||||
v := arena.New[uint16](a)
|
||||
*v = uint16(i)
|
||||
baseptrs = append(baseptrs, v)
|
||||
}
|
||||
|
||||
for _, a := range arenas {
|
||||
a := arena.Region(a)
|
||||
for range 10 {
|
||||
_ = arena.New[uint16](a)
|
||||
}
|
||||
arena.Reset(a)
|
||||
}
|
||||
|
||||
for i, a := range arenas {
|
||||
testx.Expect(t, *baseptrs[i] == uint16(i), "baseptrs[%d] = %d, expected %d", i, *baseptrs[i], i)
|
||||
|
||||
base := uintptr(unsafe.Pointer(baseptrs[i]))
|
||||
next := uintptr(unsafe.Pointer(arena.New[uint16](a)))
|
||||
testx.Expect(t, next-base == mem.Sizeof[uint16](), "delta was %d", next-base)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrent(t *testing.T) {
|
||||
a := arena.Concurrent(arena.Linear(16))
|
||||
|
||||
base, err := a(arena.ACTION_ALLOC, 0, 1, nil)
|
||||
testx.Expect(t, err == nil, "ACTION_ALLOC failed: %v", err)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Go(func() {
|
||||
_ = arena.New[uint8](a)
|
||||
_ = arena.New[uint8](a)
|
||||
_ = arena.New[uint8](a)
|
||||
_ = arena.New[uint8](a)
|
||||
})
|
||||
|
||||
wg.Go(func() {
|
||||
_ = arena.New[uint16](a)
|
||||
_ = arena.New[uint16](a)
|
||||
_ = arena.New[uint16](a)
|
||||
_ = arena.New[uint16](a)
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
|
||||
after, err := a(arena.ACTION_ALLOC, 0, 1, nil)
|
||||
testx.Expect(t, err == nil, "ACTION_ALLOC failed: %v", err)
|
||||
testx.Expect(t, uintptr(after)-uintptr(base) == 12, "diff is: %v", uintptr(after)-uintptr(base))
|
||||
}
|
||||
Loading…
Reference in a new issue