use crate::IdAble; use core::ops::Range; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; #[derive(Debug, PartialEq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] ///Error Type returned by Allocator pub enum AllocError { SizeNotAvailable(Id), } /* * Allocator contains multiple ranges of IDs, you can allocate and free * ranges */ #[derive(Debug)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub(crate) struct Allocator { available: BTreeMap, } impl Allocator { pub fn new(range: Range) -> Self { let mut available = BTreeMap::new(); if range.end != range.start { available.insert(range.end - range.start, range.start); } Allocator { available } } //Allocates a given size pub fn alloc(&mut self, size: Id) -> Result, AllocError> { let entry = self.available.range_mut(size..).next(); match entry { None => Err(AllocError::SizeNotAvailable(size)), Some((&e_size, &mut e_start)) => { let range = e_start..e_start + size; // update map self.available.remove(&e_size); let n_size = e_size - size; let n_start = e_start + size; self.available.insert(n_size, n_start); Ok(range) }, } } //Returns range to allocator, ready to be allocated again //This functions might take *longer* especially since it tries to keep the // internal btree as small as possible No verification is done that the // range was originally allocated! pub fn free(&mut self, range: Range) { //TODO: implemenet MERGE of multiple entries! let entry = self.available.iter().find( |(&entry_size, &entry_start)| { range.start == entry_start+entry_size //check end of entry || range.end == entry_start }, //check start of entry ); let range_size = range.end - range.start; if let Some((&entry_size, &entry_start)) = entry { if range.end == entry_start { //add to start of entry self.available.remove(&entry_size); self.available.insert(entry_size + range_size, range.start); } else { //add to end of entry self.available.remove(&entry_size); self.available.insert(entry_size + range_size, entry_start); } } else { // other range self.available.insert(range_size, range.start); } } } #[allow(dead_code)] pub(crate) fn count_allocator(alloc: &Allocator) -> Id { let mut result = Id::default(); for &i in alloc.available.keys() { result += i; } result } pub(crate) fn biggest_region_allocator(alloc: &Allocator) -> Id { for (&size, _) in alloc.available.iter().rev() { return size; } Id::default() } // ERROR TYPES // impl core::fmt::Display for AllocError { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { match self { AllocError::SizeNotAvailable(size) => write!(f, "not enough Id's in allocator to allocate {:?} Id's", size), } } } #[cfg(feature = "std")] impl std::error::Error for AllocError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { // Generic error, underlying cause isn't tracked. None } } #[cfg(test)] mod tests { use crate::allocator::*; #[test] fn check_errors_empty() { let mut alloc = Allocator::new(0u64..10u64); assert_eq!(alloc.alloc(3), Ok(0u64..3)); assert_eq!(alloc.alloc(3), Ok(3u64..6)); assert_eq!(alloc.alloc(3), Ok(6u64..9)); assert_eq!(alloc.alloc(1), Ok(9u64..10)); assert_eq!(alloc.alloc(1), Err(AllocError::SizeNotAvailable(1))); assert_eq!(alloc.alloc(1), Err(AllocError::SizeNotAvailable(1))); } #[test] fn check_errors_alloc_too_much() { let mut alloc = Allocator::new(0u64..10u64); assert_eq!(alloc.alloc(3), Ok(0u64..3)); assert_eq!(alloc.alloc(3), Ok(3u64..6)); assert_eq!(alloc.alloc(5), Err(AllocError::SizeNotAvailable(5))); assert_eq!(alloc.alloc(3), Ok(6u64..9)); } #[test] fn check_empty_free() { let mut alloc = Allocator::new(0u64..10u64); assert_eq!(alloc.alloc(10), Ok(0u64..10)); assert_eq!(alloc.alloc(5), Err(AllocError::SizeNotAvailable(5))); alloc.free(0..10); assert_eq!(alloc.alloc(3), Ok(0u64..3)); } #[test] fn check_2_merge_end() { let mut alloc = Allocator::new(0u64..10u64); alloc.free(10..15); assert_eq!(alloc.available.len(), 1); assert_eq!(alloc.available.get(&15), Some(&0u64)); assert_eq!(alloc.alloc(1), Ok(0u64..1)); assert_eq!(alloc.alloc(12), Ok(1u64..13)); } #[test] fn check_2_merge_start() { let mut alloc = Allocator::new(5u64..15u64); alloc.free(0..5); assert_eq!(alloc.available.len(), 1); assert_eq!(alloc.available.get(&15), Some(&0u64)); assert_eq!(alloc.alloc(1), Ok(0u64..1)); assert_eq!(alloc.alloc(12), Ok(1u64..13)); } #[test] fn check_2_entries_smallalloc() { let mut alloc = Allocator::new(0u64..10u64); alloc.free(20..25); assert_eq!(alloc.available.len(), 2); assert_eq!(alloc.alloc(3), Ok(20u64..23)); assert_eq!(alloc.alloc(3), Ok(0u64..3)); } #[test] fn check_2_entries_bigalloc() { let mut alloc = Allocator::new(0u64..10u64); alloc.free(20..25); assert_eq!(alloc.available.len(), 2); assert_eq!(alloc.alloc(6), Ok(0u64..6)); assert_eq!(alloc.alloc(3), Ok(6u64..9)); } #[test] fn check_errors_empty_merge_end() { let mut alloc = Allocator::new(0u64..0u64); alloc.free(6..9); alloc.free(3..6); alloc.free(0..3); assert_eq!(alloc.alloc(3), Ok(0u64..3)); } #[test] fn count() { let mut alloc = Allocator::new(0u64..10u64); alloc.free(100..1000); alloc.free(1000..10000); assert_eq!(count_allocator(&alloc), 9910); assert!(alloc.alloc(1).is_ok()); assert_eq!(count_allocator(&alloc), 9909); assert!(alloc.alloc(9000).is_ok()); assert_eq!(count_allocator(&alloc), 909); assert!(alloc.alloc(900).is_ok()); assert_eq!(count_allocator(&alloc), 9); assert!(alloc.alloc(9).is_ok()); assert_eq!(count_allocator(&alloc), 0); } #[test] fn biggest() { let mut alloc = Allocator::new(0u64..10u64); alloc.free(100..1000); assert_eq!(biggest_region_allocator(&alloc), 900); alloc.free(1000..10000); assert_eq!(biggest_region_allocator(&alloc), 9900); assert!(alloc.alloc(1).is_ok()); assert_eq!(biggest_region_allocator(&alloc), 9900); assert!(alloc.alloc(9000).is_ok()); assert_eq!(biggest_region_allocator(&alloc), 900); alloc.free(20000..20100); assert_eq!(biggest_region_allocator(&alloc), 900); assert!(alloc.alloc(900).is_ok()); assert_eq!(biggest_region_allocator(&alloc), 100); //println!("{:?}",&alloc.available); assert!(alloc.alloc(100).is_ok()); assert_eq!(biggest_region_allocator(&alloc), 9); assert!(alloc.alloc(9).is_ok()); assert_eq!(biggest_region_allocator(&alloc), 0); } #[test] fn biggest_empty() { let alloc = Allocator::new(0u64..0u64); assert_eq!(biggest_region_allocator(&alloc), 0); } #[test] fn empty() { let alloc = Allocator::new(0u64..0u64); assert_eq!(alloc.available.len(), 0); } #[test] #[ignore] fn check_merge_3() { let mut alloc = Allocator::new(0u64..10u64); alloc.free(20..30); assert_eq!(alloc.available.len(), 2); alloc.free(10..20); assert_eq!(alloc.available.len(), 1); } }