1
0
Fork 0
orderless-search/src/arena.c

92 lines
2.1 KiB
C
Raw Normal View History

2024-04-27 09:03:30 +00:00
internal Arena *
arena_initialize(u64 size, b8 growable)
{
Arena *result = 0;
void *backing_buffer = platform_memory_reserve(size);
if (backing_buffer != NULL)
{
platform_memory_commit(backing_buffer, ARENA_INITIAL_COMMIT_SIZE);
result = (Arena*) backing_buffer;
result->backing_buffer = backing_buffer;
result->current = result;
// rhjr: immutable arena header
result->base_pos =
memory_align_power_of_two(sizeof(Arena), ARENA_DEFAULT_ALIGNMENT);
result->offset = result->base_pos;
result->growable = growable;
result->size = size;
}
// rhjr: arenas should be instantiated early on in the programs lifetime, that
// is why this assertion will also be used in production.
ASSERT(result != 0);
return result;
}
internal Arena *
arena_initialize_default()
{
Arena *result = arena_initialize(ARENA_DEFAULT_RESERVE_SIZE, 1);
return result;
}
internal void *
arena_allocate(Arena *arena, u64 size)
{
Arena *current = arena->current;
u64 pos_mem =
memory_align_power_of_two(current->offset, ARENA_DEFAULT_ALIGNMENT);
u64 pos_new = current->offset + size;
if (current->size < pos_new && current->growable)
{
Arena *new_memory_bock;
if (size > ARENA_DEFAULT_RESERVE_SIZE)
{
// rhjr: TODO add support for allocations larger then a single page, in a
// single allocation call.
ASSERT(0);
}
else
{
new_memory_bock = arena_initialize_default();
if (new_memory_bock)
{
sll_stack_push(arena->current, new_memory_bock, prev);
current = new_memory_bock;
pos_mem =
memory_align_power_of_two(current->offset, ARENA_DEFAULT_ALIGNMENT);
pos_new = current->offset + size;
}
}
}
void *memory = (void*)((u64) current->backing_buffer + pos_mem);
arena->offset = pos_new;
platform_memory_commit(memory, size);
memset(memory, 0, size);
return memory;
}
internal void
arena_release(Arena *arena)
{
for (Arena *node = arena->current, *prev = 0; node != 0; node = prev)
{
prev = node->prev;
platform_memory_release(node, node->size);
}
}