use memo_cache::MemoCache; use rand_distr::{Distribution, Normal}; use std::{collections::HashMap, thread, time}; fn some_expensive_calculation(_: i32) -> f32 { thread::sleep(time::Duration::from_millis(20)); // ...zzzZZzz... std::f32::consts::PI } fn calculation_wrapper(input: &i32) -> f32 { some_expensive_calculation(*input) } fn calculation_wrapper_result(input: &i32) -> Result { Ok(some_expensive_calculation(*input)) } struct Process { pub cache1: HashMap, pub cache2: MemoCache, } impl Process { fn new() -> Self { Self { cache1: HashMap::new(), cache2: MemoCache::new(), } } /// Regular method, taking the calculation penalty, always. fn regular(&self, input: i32) -> f32 { some_expensive_calculation(input) } /// Memoized method, using a `HashMap` cache (no retention management). fn memoized1(&mut self, input: i32) -> f32 { if let Some(value) = self.cache1.get(&input) { *value } else { let result = some_expensive_calculation(input); self.cache1.insert(input, result); result } } /// Memoized method, using a `MemoCache` cache (using `get` and `insert`). fn memoized2a(&mut self, input: i32) -> f32 { if let Some(value) = self.cache2.get(&input) { *value } else { let result = some_expensive_calculation(input); self.cache2.insert(input, result); result } } /// Memoized method, using a `MemoCache` cache (using `get_or_insert_with`). fn memoized2b(&mut self, input: i32) -> f32 { *self.cache2.get_or_insert_with(&input, calculation_wrapper) } /// Memoized method, using a `MemoCache` cache (using `get_or_try_insert_with`). fn memoized2c(&mut self, input: i32) -> f32 { *self .cache2 .get_or_try_insert_with(&input, calculation_wrapper_result) .unwrap() } } fn main() { // This test runs three individual test cases: // // 1. a regular (non-memoized) method, // 2. a method memoized using a hash map, // 3. a method memoized using a MemoCache cache (three variants). // // Each of the methods are fed a series of random input numbers from a // normal distribution for which they (fake) "calculate" a result value. // The memoized methods keep a local cache of result values for input // values. The hash map will definitely perform best, but has no retention // management -- its memory usage will grow with every new inserted input // value. The method using the MemoCache cache will use a fixed-capacity // cache and will perform at best as good as the hash map cache version, // and in the worst case as bad as the regular (non-memoized) method. let mut rng = rand::thread_rng(); let normal = Normal::new(0.0, 30.0).unwrap(); // Use the same input data for all tests: let inputs = (0..100) .map(|_| normal.sample(&mut rng) as i32) .collect::>(); let mut p = Process::new(); println!("Running tests.."); let now = time::Instant::now(); inputs.iter().fold(0.0, |sum, &i| sum + p.regular(i)); let d_regular = now.elapsed(); let now = time::Instant::now(); inputs.iter().fold(0.0, |sum, &i| sum + p.memoized1(i)); let d_memoized1 = now.elapsed(); let now = time::Instant::now(); inputs.iter().fold(0.0, |sum, &i| sum + p.memoized2a(i)); let d_memoized2a = now.elapsed(); p.cache2.clear(); // The next test uses the same cache. let now = time::Instant::now(); inputs.iter().fold(0.0, |sum, &i| sum + p.memoized2b(i)); let d_memoized2b = now.elapsed(); p.cache2.clear(); // The next test uses the same cache. let now = time::Instant::now(); inputs.iter().fold(0.0, |sum, &i| sum + p.memoized2c(i)); let d_memoized2c = now.elapsed(); println!("Done. Timing results:"); println!("Regular: {} ms", d_regular.as_millis()); println!("Memoized (hash): {} ms", d_memoized1.as_millis()); println!("Memoized (MemoCache A): {} ms", d_memoized2a.as_millis()); println!("Memoized (MemoCache B): {} ms", d_memoized2b.as_millis()); println!("Memoized (MemoCache C): {} ms", d_memoized2c.as_millis()); let get_size = |capacity| capacity * (std::mem::size_of::() + std::mem::size_of::()); println!("Post-test occupied cache sizes:"); println!(" Hash: {} bytes", get_size(p.cache1.capacity())); println!(" MemoCache: {} bytes", get_size(p.cache2.capacity())); }