#![allow(non_snake_case)] //! Test the numerical operations of estimator implementations. //! //! [`covariance`], [`information`], [`information_root`], [`ud`] and [`unscented`] linearised estimator implementations are tested. //! [`sir`] sampled estimator is tested using the same model as for the linearised estimators. //! //! Tests are performed with Dyn matrices and matrices with fixed dimensions. use std::f64; use nalgebra::allocator::Allocator; use nalgebra::storage::Storage; use nalgebra::{Const, DefaultAllocator, Dim, Matrix3, OVector, vector}; use nalgebra::{DVector, Matrix, OMatrix, RealField, Vector2}; use bayes_estimate::estimators::information_root::InformationRootState; use bayes_estimate::estimators::sir::{SampleState, SampleStateDraw}; use bayes_estimate::estimators::ud::UDState; use bayes_estimate::estimators::unscented_duplex::UnscentedDuplexState; use bayes_estimate::models::{InformationState, KalmanState}; use fat_estimators::*; mod fat_estimators; #[test] fn simple_covariance() { simple::test_estimator(&mut zero_covariance(simple::D)); simple::test_estimator(&mut zero_covariance(simple::DYN)); } #[test] fn rtheta_covariance() { rtheta::test_estimator(&mut zero_covariance(rtheta::D), rtheta::EXPECT_X_LINEAR, rtheta::EXPECT_XX_LINEAR); rtheta::test_estimator(&mut zero_covariance(rtheta::DYN), rtheta::EXPECT_X_LINEAR, rtheta::EXPECT_XX_LINEAR); } #[test] fn simple_information() { simple::test_estimator(&mut zero_information(simple::D)); simple::test_estimator(&mut zero_information(simple::DYN)); } #[test] fn rtheta_information() { rtheta::test_estimator(&mut zero_information(rtheta::D), rtheta::EXPECT_X_LINEAR, rtheta::EXPECT_XX_LINEAR); rtheta::test_estimator(&mut zero_information(rtheta::DYN), rtheta::EXPECT_X_LINEAR, rtheta::EXPECT_XX_LINEAR); } #[test] fn simple_information_root() { simple::test_estimator(&mut zero_information_root(simple::D)); simple::test_estimator(&mut zero_information_root(simple::DYN)); } #[test] fn rtheta_information_root() { rtheta::test_estimator(&mut zero_information_root(rtheta::D), rtheta::EXPECT_X_LINEAR, rtheta::EXPECT_XX_LINEAR); rtheta::test_estimator(&mut zero_information_root(rtheta::DYN), rtheta::EXPECT_X_LINEAR, rtheta::EXPECT_XX_LINEAR); } #[test] fn simple_ud() { simple::test_estimator(&mut FatUDState { ud: zero_ud(simple::D), obs_uncorrelated: true, }); simple::test_estimator(&mut FatUDState { ud: zero_ud(simple::D), obs_uncorrelated: false, }); simple::test_estimator(&mut FatUDState { ud: zero_ud(simple::DYN), obs_uncorrelated: true, }); simple::test_estimator(&mut FatUDState { ud: zero_ud(simple::DYN), obs_uncorrelated: false, }); } #[test] fn rtheta_ud() { let expect_x = vector![0.043498, 0.092671, 0.121334]; // UD is restricted to uncorrelated observation noise when the observation model is non-linear rtheta::test_estimator(&mut FatUDState { ud: zero_ud(rtheta::D), obs_uncorrelated: true, }, expect_x, rtheta::EXPECT_XX_LINEAR); rtheta::test_estimator(&mut FatUDState { ud: zero_ud(rtheta::DYN), obs_uncorrelated: true, }, expect_x, rtheta::EXPECT_XX_LINEAR); } #[test] fn simple_unscented() { simple::test_estimator(&mut zero_unscented(simple::D)); simple::test_estimator(&mut zero_unscented(simple::DYN)); } #[test] fn rtheta_unscented() { let expect_x = vector!(0.047163, 0.092084, 0.124233); let expect_X = Matrix3::new( 0.014372, 0.000142, 0.009505, 0., 0.000357, 0.000439, 0., 0., 0.016912 ); rtheta::test_estimator(&mut zero_unscented(rtheta::D), expect_x, expect_X); rtheta::test_estimator(&mut zero_unscented(rtheta::DYN), expect_x, expect_X); } #[test] fn simple_sir() { let rng: rand::rngs::StdRng = rand::SeedableRng::seed_from_u64(1u64); { // Vector2 let samples = vec![Vector2::::zeros(); 10000]; simple::test_estimator(&mut FatSampleState { sample: SampleStateDraw { state: SampleState::equal_likelihood_samples(samples.clone()), rng: &mut rng.clone() }, systematic_resampler: false, kalman_roughening: false, }); simple::test_estimator(&mut FatSampleState { sample: SampleStateDraw { state: SampleState::equal_likelihood_samples(samples.clone()), rng: &mut rng.clone() }, systematic_resampler: true, kalman_roughening: false, }); } { // DVector let samples = vec![DVector::zeros(2); 10000]; simple::test_estimator(&mut FatSampleState { sample: SampleStateDraw { state: SampleState::equal_likelihood_samples(samples.clone()), rng: &mut rng.clone() }, systematic_resampler: false, kalman_roughening: false, }); simple::test_estimator(&mut FatSampleState { sample: SampleStateDraw { state: SampleState::equal_likelihood_samples(samples.clone()), rng: &mut rng.clone() }, systematic_resampler: true, kalman_roughening: false, }); } { // DVector with kalman_roughening let samples = vec![DVector::zeros(2); 10000]; simple::test_estimator(&mut FatSampleState { sample: SampleStateDraw { state: SampleState::equal_likelihood_samples(samples.clone()), rng: &mut rng.clone() }, systematic_resampler: false, kalman_roughening: true, }); simple::test_estimator(&mut FatSampleState { sample: SampleStateDraw { state: SampleState::equal_likelihood_samples(samples.clone()), rng: &mut rng.clone() }, systematic_resampler: true, kalman_roughening: true, }); } } #[test] fn rtheta_sir() { let expect_x = vector!(0.050184, 0.092121, 0.299925); let expect_X = Matrix3::new( 0.014390, 0.000138, 0.005794, 0., 0.000356, 0.027531, 0., 0., 0.035878 ); let rng: rand::rngs::StdRng = rand::SeedableRng::seed_from_u64(1u64); // DVector with kalman_roughening let samples = vec![DVector::zeros(rtheta::D.value()); 10000]; let mut state = FatSampleState { sample: SampleStateDraw { state: SampleState::equal_likelihood_samples(samples.clone()), rng: &mut rng.clone() }, systematic_resampler: false, kalman_roughening: true, }; rtheta::test_estimator(&mut state, expect_x, expect_X); } fn zero_covariance(d: D) -> KalmanState where DefaultAllocator: Allocator + Allocator, { KalmanState { x: OVector::zeros_generic(d, Const::<1>), X: OMatrix::zeros_generic(d, d), } } fn zero_information(d: D) -> InformationState where DefaultAllocator: Allocator + Allocator, { InformationState { i: OVector::zeros_generic(d, Const::<1>), I: OMatrix::zeros_generic(d, d), } } fn zero_information_root(d: D) -> InformationRootState where DefaultAllocator: Allocator + Allocator, { InformationRootState { r: OVector::zeros_generic(d, Const::<1>), R: OMatrix::zeros_generic(d, d), } } fn zero_ud(d: D) -> UDState where DefaultAllocator: Allocator + Allocator, { UDState::new(OMatrix::::zeros_generic(d, d), OVector::zeros_generic(d, Const::<1>)) } fn zero_unscented(d: D) -> UnscentedDuplexState where DefaultAllocator: Allocator + Allocator, { UnscentedDuplexState { kalman: zero_covariance(d), kappa: (3 - d.value()) as f64, } } /// Create a Dyn or Static copy. fn new_copy>( r: R, c: C, m: Matrix, ) -> OMatrix where DefaultAllocator: Allocator, { OMatrix::::from_iterator_generic(r, c, m.view_range(0..r.value(), 0..c.value()).iter().cloned()) } /// Simple test with linear models. pub mod simple { use nalgebra::{allocator::Allocator, DefaultAllocator, Dyn}; use nalgebra::{vector, Matrix1, Matrix1x2, Matrix2, Matrix2x1, OVector, Vector1, Vector2}; use nalgebra::{Const, Dim, U1, U2}; use bayes_estimate::models::{KalmanEstimator, KalmanState}; use bayes_estimate::noise::{CorrelatedNoise, CoupledNoise}; use super::fat_estimators::*; use super::new_copy; pub const D: Const<2> = Const::<2>; pub const DYN: Dyn= Dyn(2); const DT: f64 = 0.01; // Velocity noise, giving mean squared error bound const V_GAMMA: f64 = 1.; // Velocity correlation, giving velocity change time constant const V_NOISE: f64 = 0.2; // Filter's Initial state uncertainty const I_P_NOISE: f64 = 2.; const I_V_NOISE: f64 = 0.1; // Noise on observing system state const OBS_NOISE: f64 = 0.1; /// Simple prediction model. fn f(x: &OVector) -> OVector where DefaultAllocator: Allocator, { let f_vv: f64 = (-DT * V_GAMMA).exp(); let mut xp = (*x).clone(); xp[0] += DT * x[1]; xp[1] *= f_vv; xp } /// Simple observation model. fn h(x: &OVector) -> Vector1 where DefaultAllocator: Allocator, { Vector1::new(x[0]) } fn h_normalize(_z: &mut OVector>, _z0: &OVector>) {} /// Numerically test the estimation operations of a TestEstimator. /// /// Prediction und observation operations are performed and the expected KalmanState is checked. pub fn test_estimator(est: &mut dyn FatEstimator) where DefaultAllocator: Allocator + Allocator + Allocator + Allocator + Allocator + Allocator { let d = est.dim(); let f_vv: f64 = (-DT * V_GAMMA).exp(); let linear_pred_model = new_copy(d, d, Matrix2::new(1., DT, 0., f_vv)); let additive_noise = CoupledNoise { q: Vector1::new(DT * ((1. - f_vv) * V_NOISE).powi(2)), G: new_copy(d, Const::<1>, Matrix2x1::new(0.0, 1.0)), }; let linear_obs_model = new_copy(Const::<1>, d, Matrix1x2::new(1.0, 0.0)); let co_obs_noise = CorrelatedNoise { Q: Matrix1::new(OBS_NOISE.powi(2)), }; let z = &Vector1::new(1000.); let init_state: KalmanState = KalmanState { x: new_copy(d, Const::<1>, Vector2::new(1000., 1.5)), X: new_copy( d, d, Matrix2::new(I_P_NOISE.powi(2), 0.0, 0.0, I_V_NOISE.powi(2)), ), }; est.init(&init_state).unwrap(); let xx = est.kalman_state().unwrap(); println!("init={:.6}{:.6}", xx.x, xx.X); est.trace_state(); for _c in 0..2 { let predict_x = est.state().unwrap(); let predict_xp = f(&predict_x); est.predict_fn(&predict_xp, f, &linear_pred_model, &additive_noise); let pp = KalmanEstimator::kalman_state(est).unwrap(); println!("pred={:.6}{:.6}", pp.x, pp.X); est.trace_state(); est.observe_any(&z, h, h_normalize, &linear_obs_model, &co_obs_noise).unwrap(); let oo = est.kalman_state().unwrap(); println!("obs={:.6}{:.6}", oo.x, oo.X); est.trace_state(); } est.observe_any(&z, h, h_normalize, &linear_obs_model, &co_obs_noise).unwrap(); let xx = est.kalman_state().unwrap(); println!("final={:.6}{:.6}", xx.x, xx.X); expect_state( &KalmanState:: { x: xx.x, X: xx.X }, 0.5 * est.allow_error_by(), ); } /// Test the KalmanState is as expected. fn expect_state(state: &KalmanState, allow_by: f64) where DefaultAllocator: Allocator + Allocator, { let expect_x = vector!(1000.004971, 1.470200); approx::assert_relative_eq!(state.x[0], expect_x[0], max_relative = 0.00000001 * allow_by); approx::assert_relative_eq!(state.x[1], expect_x[1], max_relative = 0.01 * allow_by); approx::assert_abs_diff_eq!(state.X[(0, 0)], 0.003331, epsilon = 0.000001 * allow_by); approx::assert_abs_diff_eq!(state.X[(0, 1)], 0.000032, epsilon = 0.000001 * allow_by); approx::assert_abs_diff_eq!(state.X[(1, 1)], 0.009607, epsilon = 0.000003 * allow_by); } } /// A non-linear test with range angle observation model. pub mod rtheta { use std::f64::consts::PI; use nalgebra::{allocator::Allocator, Const, DefaultAllocator, Dim, Dyn, U1, U2, U3, vector}; use nalgebra::{matrix, OMatrix, OVector, Vector2}; use bayes_estimate::models::{KalmanEstimator, KalmanState}; use bayes_estimate::noise::{CorrelatedNoise, CoupledNoise}; use super::fat_estimators::*; use super::new_copy; pub const D: Const<2> = Const::<2>; pub const DYN: Dyn= Dyn(3); // Test with additional state const RANGE_NOISE: f64 = 1.; const ANGLE_NOISE: f64 = 0.1 * PI / 180.; const Z_CORRELATION: f64 = 0.; // (Un)Correlated observation model // predict model const X_NOISE: f64 = 0.05; const Y_NOISE: f64 = 0.09; const XY_NOISE_COUPLING: f64 = 0.05; const G_COUPLING: f64 = 1.0; // Coupling in addition G terms const INIT_X_NOISE: f64 = 0.07; const INIT_Y_NOISE: f64 = 0.10; const INIT_XY_NOISE_CORRELATION: f64 = 0.; //0.4; const INIT_2_NOISE: f64 = 0.09; // Use zero for singular X const INIT_Y2_NOISE_CORRELATION: f64 = 0.5; // XY position of target - this is chosen so the observation angle is discontinues at -pi/+pi const TARGET: [f64; 2] = [-11., 0.]; /// Coupled prediction model. fn f(x: &OVector) -> OVector where DefaultAllocator: Allocator, { let xp = x.clone(); // xp[0] = 0.9 * x[0] + 0.1 * x[1]; xp } /// range, angle observation model. fn h(x: &OVector) -> Vector2 where DefaultAllocator: Allocator, { let dx = TARGET[0] - x[0]; let dy = TARGET[1] - x[1]; Vector2::new((dx * dx + dy * dy).sqrt(), dy.atan2(dx)) } fn normalize_angle(a: &mut f64, a0: f64) { let mut d = (*a - a0) % (2. * PI); if d >= PI { d -= 2. * PI } else if d < -PI { d += 2. * PI } *a = a0 + d; } fn h_normalize(z: &mut OVector, z0: &OVector) { normalize_angle(&mut z[1], z0[1]); } fn H(x: &OVector) -> OMatrix where DefaultAllocator: Allocator + Allocator, { let dx = TARGET[0] - x[0]; let dy = TARGET[1] - x[1]; let mut hx = OMatrix::zeros_generic(Const::<2>, x.shape_generic().0); let dist_sq = dx * dx + dy * dy; let dist = dist_sq.sqrt(); hx[(0, 0)] = -dx / dist; hx[(0, 1)] = -dy / dist; hx[(1, 0)] = dy / dist_sq; hx[(1, 1)] = -dx / dist_sq; hx } /// Numerically test the estimation operations of a TestEstimator. /// /// Prediction und observation operations are performed and the expected KalmanState is checked. pub fn test_estimator(est: &mut dyn FatEstimator, expect_x: OVector, expect_X: OMatrix) where DefaultAllocator: Allocator + Allocator + Allocator + Allocator + Allocator + Allocator + Allocator + Allocator + Allocator { let d = est.dim(); let F = new_copy(d, d, matrix![ 0.9, 0.1, 0.0; 0.0, 1.0, 0.0; 0.0, 0.0, 1.0 ], ); let additive_noise = CoupledNoise { q: Vector2::new(X_NOISE.powi(2), Y_NOISE.powi(2)), G: new_copy(d, Const::<2>, matrix![ 1.0, XY_NOISE_COUPLING; XY_NOISE_COUPLING, 1.0; G_COUPLING, G_COUPLING ], ), }; let co_obs_noise = CorrelatedNoise { Q: matrix![ RANGE_NOISE.powi(2), RANGE_NOISE * ANGLE_NOISE * Z_CORRELATION; RANGE_NOISE * ANGLE_NOISE * Z_CORRELATION, ANGLE_NOISE.powi(2) ], }; // True position const TRUTH: Vector2 = Vector2::new(1., 0.1); const INIT_XY_CORRELATION: f64 = INIT_X_NOISE * INIT_Y_NOISE * INIT_XY_NOISE_CORRELATION; const INIT_Y2_CORRELATION: f64 = INIT_Y_NOISE * INIT_2_NOISE * INIT_Y2_NOISE_CORRELATION; let init_state: KalmanState = KalmanState { // Choose initial y so that the estimated position should pass through 0 and thus the observed angle be discontinues x: new_copy(d, Const::<1>, vector![0., -0.05, 0.0]), X: new_copy(d, d, matrix![ INIT_X_NOISE.powi(2), INIT_XY_CORRELATION, 0.; INIT_XY_CORRELATION, INIT_Y_NOISE.powi(2), INIT_Y2_CORRELATION; 0., INIT_Y2_CORRELATION, INIT_2_NOISE.powi(2) ], ), }; est.init(&init_state).unwrap(); let xx = est.kalman_state().unwrap(); println!("init={:.6}{:.6}", xx.x, xx.X); est.trace_state(); // 4 predict, observe cycles for _c in 0..4 { let predict_x = est.state().unwrap(); let predict_xp = f(&predict_x); est.predict_fn(&predict_xp, f, &F, &additive_noise); let pp = KalmanEstimator::kalman_state(est).unwrap(); let z = h(&TRUTH); let H = H(&pp.x); est.observe_any(&z, h, h_normalize, &H, &co_obs_noise).unwrap(); } let xx = est.kalman_state().unwrap(); println!("final={:.6}{:.6}", xx.x, xx.X); expect_state( &KalmanState:: { x: xx.x, X: xx.X }, expect_x, expect_X, est.allow_error_by(), ); } pub const EXPECT_X_LINEAR: OVector = vector![0.043426, 0.092055, 0.121143]; pub const EXPECT_XX_LINEAR: OMatrix = matrix![ 0.009389, 0.000102, 0.008025; 0., 0.000356, 0.000427; 0., 0., 0.016931 ]; /// Test the KalmanState is as expected. fn expect_state(state: &KalmanState, expect_x: OVector, expect_X: OMatrix, allow_by: f64) where DefaultAllocator: Allocator + Allocator, { approx::assert_relative_eq!(state.x[0], expect_x[0], max_relative = 0.00001 * allow_by); approx::assert_relative_eq!(state.x[1], expect_x[1], max_relative = 0.00001 * allow_by); approx::assert_abs_diff_eq!(state.X[(0, 0)], expect_X[(0, 0)], epsilon = 0.000001 * allow_by); approx::assert_abs_diff_eq!(state.X[(0, 1)], expect_X[(0, 1)], epsilon = 0.000001 * allow_by); approx::assert_abs_diff_eq!(state.X[(1, 1)], expect_X[(1, 1)], epsilon = 0.000001 * allow_by); if state.x.nrows() > 2 { approx::assert_relative_eq!(state.x[2], expect_x[2], max_relative = 0.00001 * allow_by); approx::assert_abs_diff_eq!(state.X[(0, 2)], expect_X[(0, 2)], epsilon = 0.000001 * allow_by); approx::assert_abs_diff_eq!(state.X[(1, 2)], expect_X[(1, 2)], epsilon = 0.000003 * allow_by); approx::assert_abs_diff_eq!(state.X[(2, 2)], expect_X[(2, 2)], epsilon = 0.000003 * allow_by); } } }