// Doesn't need to be aligned to page size struct Block { pub Arr: arr pub bool: allocated } // Always aligned to page size struct Chunk { pub u64: cap pub *Block: start } inline fn Chunk.next(*Block: blk_p) -> [*Block] { blk_p @ as [blk] blk_p cast(u64) blk::arr::size + sizeOf(Block) + cast(*Block) } fn Chunk.print(Chunk: chunk) { "[" print chunk::start cast(u64) chunk::cap + cast(u64) as [end] 0 chunk::start cast(u64) while dup end < do { cast(*Block) as [n blk_p] blk_p @ as [blk] " | " print //blk::arr::data cast(u64) print " " print blk::arr::size print " " print blk::allocated print " | " print n 1 + blk_p Chunk.next cast(u64) } drop drop "] " print chunk::cap println } fn Heap.debug_summary() { chunks_p @ as [chunks] "Heap:" println 0 while dup n_chunks_p @ < do { as [i] " * " print i chunks Arr.get Chunk.print i 1 + } drop } inline fn PAGE_SIZE() -> [u64] { 4096 } inline fn bytes_to_pages(u64) -> [u64] { PAGE_SIZE / 1 + } inline fn align(u64: n) -> [u64] { n sizeOf(Block) + bytes_to_pages PAGE_SIZE * } // [(N ptr bool)[ .. data .. ](N ptr bool)[ .. data .. ] ... ] // |-------^ |-------^ fn Chunk.try_alloc(u64: n *Chunk: chunk_p) -> [Option>] { chunk_p @ as [chunk] chunk::start cast(u64) chunk::cap + cast(u64) cast(*Block) as [end] Option.None::> chunk::start as [mut maybe_alloc mut block_p] while &maybe_alloc Option.is_none block_p cast(u64) end cast(u64) < land do { block_p cast(u64) cast(*u8) as [void_block_p] block_p @ as [block] block::allocated if { Option.None::> block_p Chunk.next } else { n sizeOf(Block) + as [required_size] block::arr::size required_size < if { Option.None::> } else { // check to see if there's space for another block: block::arr::size sizeOf(Block) - n >= if { // Update the block. n block::arr::data cast(Arr) as [arr] arr true cast(Block) block_p ! block_p Chunk.next as [next_block_p] block::arr::size required_size - next_block_p cast(u64) sizeOf(Block) + cast(u64) cast(*u8) cast(Arr) false cast(Block) next_block_p ! // create the return result arr Option.Some } else { block::arr true cast(Block) block_p ! block::arr Option.Some } } block_p Chunk.next } *block_p ! *maybe_alloc ! } maybe_alloc } fn Chunk.consolidate(*Chunk: chunk_p) { chunk_p @ as [chunk] chunk::start cast(u64) chunk::cap + as [end] chunk::start chunk::start Chunk.next while dup cast(u64) end < do { as [first_p second_p] first_p @ second_p @ as [first second] first::allocated lnot second::allocated lnot land if { first::arr::size second::arr::size + sizeOf(Block) + first::arr::data cast(Arr) false cast(Block) first_p ! first_p first_p Chunk.next } else { second_p second_p Chunk.next } } drop drop } var Chunk[100000]: chunks_p var u64: n_chunks_p fn mmap_new_chunk(u64: n) -> [*Chunk] { n PAGE_SIZE % 0 != if { "Size `" print n print "` isn't page-size aligned" println 1 exit } 0 // offset 0 // fd 34 // MAP_ANONYMOUS | MAP_PRIVATE 3 // PROT_READ | PROT_WRITE n sizeOf(Block) + // length 0 cast(*u8) // addr sys_mmap as [addr] addr cast(u64) 0 1 - == if { "Error: mmap call failed..." println 1 exit } n sizeOf(Block) - addr cast(u64) sizeOf(Block) + cast(*u8) cast(Arr) false cast(Block) addr cast(u64) cast(*Block) ! n addr cast(u64) cast(*Block) cast(Chunk) as [new_chunk] n_chunks_p @ chunks_p @ as [n_chunks chunks] n_chunks_p @ chunks::size >= if { "Global allocator ran out of room for new chunks" println " Note: Maxed out on heap memory" println 1 exit } new_chunk n_chunks chunks Arr.set n_chunks 1 + n_chunks_p ! n_chunks chunks Arr.get_ref_mut } fn arr_convert(Arr: a) -> [Arr] { a::size a::data as [bytes ptr] bytes sizeOf(T) / ptr cast(u64) cast(*T) cast(Arr) } fn malloc(u64: count) -> [Arr] { count sizeOf(T) * as [n] n sizeOf(Block) + PAGE_SIZE >= if { n sizeOf(Block) + align mmap_new_chunk n swap Chunk.try_alloc Option.unwrap } else { n_chunks_p @ chunks_p @ as [n_chunks chunks] Option.None::> as [mut maybe_alloc] 0 while dup n_chunks < &maybe_alloc Option.is_none land do { // clear the previous option and capture the index as [idx] idx chunks Arr.get_ref_mut as [chunk_p] n chunk_p Chunk.try_alloc *maybe_alloc ! idx 1 + } drop &maybe_alloc Option.is_some if { maybe_alloc Option.unwrap } else { n sizeOf(Block) + align mmap_new_chunk as [new_chunk_p] n new_chunk_p Chunk.try_alloc Option.unwrap } } as [arr] 0u8 arr memset arr arr_convert:: } fn malloc_obj() -> [*T] { sizeOf(T) malloc:: as [bytes] bytes::data cast(u64) cast(*T) } fn find_chunk(Arr: arr) -> [Option<*Chunk>] { n_chunks_p @ chunks_p @ as [n_chunks chunks] Option.None::<*Chunk> as [mut maybe_blk] 0 while dup n_chunks < &maybe_blk Option.is_none land do { as [n] n chunks Arr.get as [chunk] arr::data cast(u64) chunk::start cast(u64) >= arr::data cast(u64) chunk::start cast(u64) chunk::cap + <= land if { n chunks Arr.get_ref_mut Option.Some } else { Option.None::<*Chunk> } *maybe_blk ! n 1 + } drop maybe_blk } fn free(Arr: arr) { arr find_chunk as [maybe_chunk] &maybe_chunk Option.is_some if { maybe_chunk Option.unwrap as [chunk_p] chunk_p @ as [chunk] arr::data cast(u64) sizeOf(Block) - cast(*Block) as [blk_p] chunk::start while dup cast(u64) blk_p cast(u64) <= do { as [chunk_blk_p] chunk_blk_p cast(u64) blk_p cast(u64) == if { blk_p @ as [blk] blk::arr false cast(Block) blk_p ! chunk_p Chunk.consolidate } chunk_blk_p Chunk.next } drop } } inline fn free_obj(*T: ptr) { sizeOf(T) ptr cast(Arr) free } fn realloc(u64: new_size Arr: arr) -> [Arr] { new_size malloc:: as [new_arr] arr::size new_size < if { arr::size } else { new_size } arr::data new_arr::data memcpy arr free new_arr }