diff --git a/Cargo.toml b/Cargo.toml index e41052ddc..c0a6d9abe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,6 +53,8 @@ serde_json = { version = "1.0.60", default-features = false, features = ["alloc" smallstr = { version = "0.3.0", default-features = false } snafu = { version = "0.7.0", default-features = false } unicase = "2.6.0" +rustfft = "6.1.0" +assert_approx_eq = "1.1.0" # std image = { version = "0.24.0", features = [ @@ -180,6 +182,10 @@ harness = false name = "software_rendering" harness = false +[[bench]] +name = "statistical_pb_chance" +harness = false + [profile.max-opt] inherits = "release" lto = true diff --git a/benches/statistical_pb_chance.rs b/benches/statistical_pb_chance.rs new file mode 100644 index 000000000..a5a98c965 --- /dev/null +++ b/benches/statistical_pb_chance.rs @@ -0,0 +1,47 @@ +use criterion::{criterion_group, criterion_main, Criterion, black_box}; +use livesplit_core::{RealTime, SegmentHistory, Time, TimeSpan, TimingMethod}; +use livesplit_core::analysis::statistical_pb_chance::probability_distribution::ProbabilityDistribution; + + +criterion_main!(benches); +criterion_group!(benches, compute_probability_8192, add_distributions); + +/// +/// Computes a CDF of a probability distribution with 8192 data points +/// +fn compute_probability_8192(c: &mut Criterion){ + + // initialize the distribution + let mut times = SegmentHistory::default(); + + times.insert(1, Time::from(RealTime(Some(TimeSpan::from_seconds(1.2))))); + times.insert(2, Time::from(RealTime(Some(TimeSpan::from_seconds(6.0))))); + + let dist = ProbabilityDistribution::new(×, TimingMethod::RealTime, + 10.0, 8192, 0.5); + + c.bench_function("Probability Less than x (8192 points)", move |b| { + b.iter(|| dist.probability_below(black_box(5.0))) + }); + +} + +/// +/// benchmarks adding two distributions together +/// +fn add_distributions(c: &mut Criterion){ + // initialize the distribution + let mut times = SegmentHistory::default(); + + times.insert(1, Time::from(RealTime(Some(TimeSpan::from_seconds(1.2))))); + times.insert(2, Time::from(RealTime(Some(TimeSpan::from_seconds(6.0))))); + + let dist = ProbabilityDistribution::new(×, TimingMethod::RealTime, + 10.0, 8192, 0.5); + + let other = dist.clone(); + + c.bench_function("Adding Distributions", |b| { + b.iter(|| &black_box(dist.clone()) + &other) + }); +} \ No newline at end of file diff --git a/src/analysis/mod.rs b/src/analysis/mod.rs index 8153550da..fc1828dbb 100644 --- a/src/analysis/mod.rs +++ b/src/analysis/mod.rs @@ -9,6 +9,7 @@ mod skill_curve; pub mod state_helper; pub mod sum_of_segments; pub mod total_playtime; +pub mod statistical_pb_chance; pub use self::skill_curve::SkillCurve; pub use self::state_helper::*; diff --git a/src/analysis/statistical_pb_chance/discontinuous_fourier_transforms.rs b/src/analysis/statistical_pb_chance/discontinuous_fourier_transforms.rs new file mode 100644 index 000000000..069d06b99 --- /dev/null +++ b/src/analysis/statistical_pb_chance/discontinuous_fourier_transforms.rs @@ -0,0 +1,127 @@ +use rustfft::num_complex::Complex; + +/// +/// Functions that calculate the Fourier transforms of crucial discontinuous functions (Dirac Delta +/// and unit step functions) +/// + +/// +/// computes the discrete fourier transform of a dirac delta function +/// +/// # Arguments +/// +/// * `omega_naught` - The fundamental frequency of the fourier transform we are computing +/// * `num_terms` - The number of terms in the fourier transform to return +/// * `time` - The point in time at which the delta function is nonzero +/// +/// # Mathematics +/// +/// The fourier transform of a delta function is given by: +/// +/// ```latex +/// X[k] = e^{-t_{0}ωi} +/// ``` +/// +/// Where k is the index of the discrete fourier transform, t0 is the time at which the delta function +/// is nonzero, ω is the fundamental frequency (ω = k * ω0) and i is the complex unit (√(-1)) +/// +/// +pub fn delta_function_dft(omega_naught: f32, num_terms: usize, time: f32) -> Vec> { + + // determine the halfway point of the array. This is important because the frequencies in the second + // half of the transform are effectively negative frequencies. Since the formula for the fourier transform + // uses the frequency outside of a complex exponential we need to use the negative + // index of the second half of the array. + let cutoff = (num_terms as f32 / 2.0).ceil() as usize; + + // initialize the fourier series + let mut fourier_series= vec![Complex::{re: 0.0, im: 0.0}; num_terms]; + + for k in 0..cutoff { + let omega = omega_naught * k as f32; + // println!("Omega: {}", omega); + fourier_series[k] = Complex::{re: 0.0, im: - omega * time}.exp(); + } + + for k in cutoff..num_terms { + let omega = omega_naught * (k as i64 - num_terms as i64) as f32; // different calculation of omega + // println!("Omega: {}", omega); + fourier_series[k] = Complex::{re: 0.0, im: - omega * time}.exp(); + } + + return fourier_series; +} + +/// +/// computes the discrete fourier transform of a unit step function. In particular, this function +/// returns a step down at the specified point +/// +/// # Arguments +/// +/// * `omega_naught` - the fundamental frequency of the fourier transform we are computing +/// * `num_terms` - the number of terms in the fourier transform to return +/// * `time` - the point in time at which the unit step function drops +/// +/// # Mathematics +/// +/// The fourier transform of a unit step function is given by: +/// +/// ```latex +/// x[t] = u[t-t0] <=> X[k] = (𝛅[k] + 1 / (iω)) * e^{-iωt_{0}} +/// ``` +/// +/// Where k is the index of the discrete fourier transform, t0 is the point in time at which the +/// unit step function goes high, ω is the fundamental frequency (ω = k * ω0) and i is the complex unit (√(-1)) +/// +/// Unlike the dirac delta function, when we consider the unit step function, we must note that due to +/// the cyclic nature of the time domain, our step function must contain both a step up and a step down. +/// The definition of the unit step function seen above contains only a step up. We must therefore +/// add two step functions using the above definitions to obtain a true step function +/// +/// The unit step function returned by this function is initially 1, then steps down to zero at the point `time` +/// The step function must therefore step up between the last element of the array and the first element. i.e. +/// the index -0.5. For this reason. Thus, the fourier transform returned by this function yields: +/// +/// ```latex +/// u[t+0.5] - u[t-t_{0}] +/// ``` +/// +/// From the linearity of the Fourier transform and the distributive property, we get +/// +/// ```latex +/// u[t+0.5] - u[t-t_{0}] <=> X[k] = (𝛅[k] + 1 / (iω)) * e^{-iω(-1/2)} - (𝛅[k] + 1 / (iω)) * e^{-iωt_{0}} +/// X[k] = (𝛅[k] + 1 / (iω)) * (e^{-iω(-1/2)} - e^{-iωt_{0}}) +/// ``` +/// +pub fn step_function_dft(omega_naught: f32, num_terms: usize, time: f32) -> Vec> { + let mut fourier_series = vec![Complex::{re: 0.0, im: 0.0}; num_terms]; + + // determine the halfway point of the array. This is important because the frequencies in the second + // half of the transform are effectively negative frequencies. Since the formula for the fourier transform + // uses the frequency outside of a complex exponential (in particular, as 1/ω) we need to use the negative + // index of the second half of the array. + let cutoff = (num_terms as f32 / 2.0).ceil() as usize; + + // 1/ω is undefined for ω=0, so we need to manually specify it + // X[0] is equal to the integral of the whole function. In our case, that's a unit step function + // that starts at -0.5 and ends at `time`. Therefore, the integral is time - (-0.5) = time + 0.5 + fourier_series[0] = Complex::{re: (time + 0.5), im: 0.0}; + + for k in 1..cutoff { + let omega = omega_naught * k as f32; + // println!("Omega: {}", omega); + fourier_series[k] = Complex::{re: 0.0, im: - 1.0 / omega} * // 1/jω + (Complex::{re: 0.0, im: - omega * (-0.5)}.exp() - // e^{-iω(-1/2)} + Complex::{re: 0.0, im: - omega * (time)}.exp()); // e^{-iωt_{0}} + } + + for k in cutoff..num_terms { + let omega = omega_naught * (k as i64 - num_terms as i64) as f32; // different calculation of omega + // println!("Omega: {}", omega); + fourier_series[k] = Complex::{re: 0.0, im: - 1.0 / omega} * // 1/jω + (Complex::{re: 0.0, im: - omega * (-0.5)}.exp() - // e^{-iω(-1/2)} + Complex::{re: 0.0, im: - omega * (time)}.exp()); // e^{-iωt_{0}} + } + + return fourier_series; +} \ No newline at end of file diff --git a/src/analysis/statistical_pb_chance/mod.rs b/src/analysis/statistical_pb_chance/mod.rs new file mode 100644 index 000000000..daf2c8b67 --- /dev/null +++ b/src/analysis/statistical_pb_chance/mod.rs @@ -0,0 +1,7 @@ +//! This module + +pub mod probability_distribution; +pub mod discontinuous_fourier_transforms; + +#[cfg(test)] +mod tests; diff --git a/src/analysis/statistical_pb_chance/probability_distribution.rs b/src/analysis/statistical_pb_chance/probability_distribution.rs new file mode 100644 index 000000000..fd5f6f002 --- /dev/null +++ b/src/analysis/statistical_pb_chance/probability_distribution.rs @@ -0,0 +1,314 @@ +use std::f32::consts::TAU; +use std::ops; +use std::sync::Arc; +use rustfft::{FftPlanner, Fft}; +use rustfft::num_complex::{Complex, ComplexFloat}; + +use crate::SegmentHistory; +use crate::timing::TimingMethod; +use crate::analysis::statistical_pb_chance::discontinuous_fourier_transforms::{delta_function_dft, step_function_dft}; + +/// Utilities for handling Probability Distributions +/// +/// # Overview of Probability Distributions +/// +/// "Probability Distributions", or "Probability Density Functions" are essentially continuous histograms. The describe the relationship +/// between possible times and the probability of obtaining them. The odds that the random variable +/// will lie between points A and B is the integral from A to B of the probability density function. +/// In the case of speedrunning, the Random variable is the time of a split or the time of the full run. +/// Therefore, the probability of a speedrunner getting a time between A and B would be the integral +/// from A to B of the probability density function. +/// +/// Probability Distributions +/// +/// The "Skill curve" used elsewhere is essentially the integral of a probability distribution. +/// Both methods contain the same information, however the math required to combine probability distributions +/// can be optimized better than a skill curve can be. +/// +/// For more details, see [Wikipedia](https://en.wikipedia.org//wiki/Probability_density_function), +/// 3Blue1Brown also has a [video](https://www.3blue1brown.com/lessons/pdfs) on the subject, +/// but the central example of this video is irreverent to what is done here +/// +/// # Usage +/// +/// A probability distribution can be created from a split history object +/// +/// ```ignore +/// use livesplit_core::{RealTime, SegmentHistory, TimingMethod}; +/// use livesplit_core::analysis::statistical_pb_chance::probability_distribution::ProbabilityDistribution; +/// +/// // initialize some segment history +/// let history: SegmentHistory; // ... +/// +/// // Max Duration: 2 Hrs = 7200 seconds. Amost always still works the run lasts more than Max_Duration. +/// // Calculations will be inaccurate only if the total run time is close to a multiple of the Max Duration (2 hours, 4 hours, 6 hours, etc.) +/// // num_terms: Number of terms in Fourier Transform (fewer terms = faster, more terms = greater precision) +/// // smoothing_factor: The most recent split makes up 20% of the distribution (20% = 0.2) +/// let dist = ProbabilityDistribution::new(&history, method: TimingMethod::RealTime, max_duration: 7200, num_terms: 8192, smoothing_factor: 0.2); +/// ``` +/// (See `ProbabilityDistribution::new` for more details) +/// +/// Once a probability Distribution has been created, additional splits can be added to it. +/// +/// ```ignore +/// // adding a time of 2:12.7 to the split with weight 0.2 +/// dist.add_point(2 * 60 + 12.7, 0.2) +/// ``` +/// +/// +/// There are two main pieces of functionality associated with probability distributions +/// +/// 1) Combining the distributions of all splits in the run to generate the distribution of possible final +/// times for the run +/// 2) Using the distribution of the possible final times to determine the probability that the final +/// time will be below a desired goal or objective. +/// +/// The distributions of two splits can be added to each other by simply using the overloaded `add` trait +/// +/// ```ignore +/// use livesplit_core::analysis::statistical_pb_chance::probability_distribution::ProbabilityDistribution; +/// +/// // create from segment history +/// let cap_kingdom: ProbabilityDistribution; +/// let cascade_kingdom: ProbabilityDistribution; +/// let sand_kingdom: ProbabilityDistribution; +/// let lake_kingdom: ProbabilityDistribution; +/// // ... +/// +/// // compute the distributions of the remaining kingdoms +/// let full_run = cap_kingdom + cascade_kingdom + sand_kingdom + lake_kingdom; // + ... +/// let after_cap = cascade_kingdom + sand_kingdom + lake_kingdom; // + ... +/// let after_cascade = sand_kingdom + lake_kingdom; // + ... +/// // ... +/// +/// ``` +/// +/// To determine the probability that a given distribution is below some specific amount, simply use +/// the `probability_below` method +/// +/// ```ignore +/// let sub_hour = full_run.probability_below(60 * 60); +/// let below_55_minutes_after_cap = after_cap.probability_below(55 * 60); +/// ``` +/// +/// +/// # Internal Functionality +/// +/// Instead of tracking the data for the full probability distribution, the [Fourier Transform](https://en.wikipedia.org/wiki/Fourier_transform) +/// of the distribution is tracked to allow for extremely efficient computations. +/// +/// The PDF of the sum of two random variables is equal to the convolution of the PDFs of the two random +/// variables being added. The convolution theorem states that convolution in the time domain corresponds to +/// multiplication in the frequency domain. Therefore, the distribution of the sum of any two splits is computed +/// by complex multiplication. +/// +/// The probability that a random variable will be less than a given point is a bit more difficult to explain. +/// We wish to calculate... +/// +/// $$\int_{-\infty}^{t}p(x)dx$$ +/// +/// ...where p(x) is the probability density function. We observe that the integral is very similar to the +/// Fourier transform evaluated at the point $$\omega=0$$: +/// +/// $$\int_{-\infty}^{\infty}p(x)dx = \hat{p}[\omega=0]$$ +/// +/// The only difference between the two is the upper limit of integration. We can now use a trick involving unit step functions. If you take an integral and multiply it by a unit step function, it has the effect of simply changing the limit of integration since the function is zero +/// outside that limit of integration and one within it. Therefore +/// +/// $$\int_{-\infty}^{\infty}p(x)u(t-x)dx = \int_{-\infty}^{t}p(x)dx$$ +/// +/// Therefore, if we can multiply the probability distribution by a unit step function and evaluate the resulting Fourier transform at $$\omega=0$$, we can calculate the integral. Due to the convolution theorem, multiplication by a unit step function in the time domain +/// results in a convolution operation in the frequency domain. Convolution is computationally expensive, so we wish to avoid it as much as possible. Luckily, we only need to evaluate the Convolution of the distribution and the unit step function at a single point, $$\omega=0$$. Therefore, we do not need to compute the whole convolution, saving many computational resources. +/// +/// From the definition of the discrete convolution, +/// +/// $$(\hat{p}[x]*\hat{u}[x])[\omega=0] = \sum_{k=0}^{N}p[k]u[0-x] = \sum_{k=0}^{N}p[k]u[-x]$$ +/// +/// the `probability_below` function uses this methodology to compute the CDF of the distribution when desired +/// + +#[derive(Clone, Debug)] +pub struct ProbabilityDistribution { + + max_duration: f32, // the maximum simulated time duration + omega_naught: f32, // the fundamental frequency of the fourier transform of the distribution + + transform: Vec>, // Fourier transform of the function + + fft_inverse: Arc> +} + +impl ProbabilityDistribution { + + /// + /// Initializes a probability distribution given a segment history weighted by an exponentially weighted + /// average with smoothing factor `smoothing_factor` + /// + /// # Arguments + /// + /// * `times` - SegmentHistory object containing the history of times on the specified split + /// * `method` - The timing method to use to create the distribution + /// * `max_duration` - the maximum* duration of the run + /// * `num_terms` - the number of points to record data for in the distribution + /// * `smoothing_factor` - Smoothing factor of the exponential smoothing applied to the SegmentHistory (See [here](https://en.wikipedia.org/wiki/Exponential_smoothing)) + /// + /// *The time of the run can be larger than this maximum duration under most circumstances. Problems arise + /// when the duration of the run is close to the maximum duration of the run, or a multiple of it. + /// + pub fn new(times: &SegmentHistory, + method: TimingMethod, + max_duration: f32, num_terms: usize, smoothing_factor: f32) -> Self { + + // create a planner + let mut planner: FftPlanner = FftPlanner::new(); + + // initialize the distribution + let mut dist = ProbabilityDistribution { + max_duration, + omega_naught: TAU / max_duration, + transform: vec![Complex::{re: 0.0, im: 0.0}; num_terms], + fft_inverse: planner.plan_fft_inverse(num_terms) + }; + + // println!("omega naught: {}", dist.omega_naught); + // + // println!("{:?}", dist.transform); + + // go through all the splits and add them to the distribution + for (history_index, split) in times.iter_actual_runs().enumerate() { + + // only add splits to the distribution that use the specified timing method (Real time vs In-Game time) + match split.1[method] { + Some(time) => { + // on the very first segment, there is no weighing, so we just insert the time with no weight + if history_index == 0 { + dist.transform = delta_function_dft(dist.omega_naught, num_terms, time.total_seconds() as f32); + } + else { + dist.add_point(time.total_seconds() as f32, smoothing_factor); + } + } + _ => { + continue; + } + } + } + + // println!("after adding the points"); + // println!("{:?}", dist.transform); + + return dist; + + } + + /// + /// Adds a point to the probability distribution + /// + /// # Arguments + /// + /// * `time` - the time which is to be added to the distribution. + /// * `smoothing_factor` - Smoothing factor of the exponential smoothing applied to the SegmentHistory (See [here](https://en.wikipedia.org/wiki/Exponential_smoothing)) + /// + pub fn add_point(&mut self, time: f32, smoothing_factor: f32){ + + let num_terms = self.transform.len(); + + let dft = delta_function_dft(self.omega_naught, num_terms, time); + + // add the two vectors element wise in the form of an exponentially weighted average + for frequency_index in 0..num_terms { + self.transform[frequency_index] = dft[frequency_index] * smoothing_factor + (1.0 - smoothing_factor) * self.transform[frequency_index]; + } + } + + /// + /// Computes the Probability of the distribution being less than the specified amount (i.e. the + /// Cumulative Distribution Function or CDF). + /// + /// # Arguments + /// + /// * `time` - The point in time below which we wish to know the probability of being + /// + /// # Mathematics + /// + /// the Cumulative Distribution Function is defined as the integral from -∞ to t of the + /// probability density function (PDF) + /// + pub fn probability_below(&self, time: f32) -> f32{ + + // to take the integral, we evaluate the fourier transform at omega = 0. But this gives us the integral from -inf to inf. + // we want to limit our integral, which we can do by multiplying by a unit step function. In the frequency domain, + // this corresponds to taking a convolution. Since we only need to evaluate the result of the convolution at one point, all we're + // really doing is a multiply-add computation + + let step = step_function_dft(self.omega_naught, self.transform.len(), time); + + // to compute the convolution, we multiply the distribution and the step function element wise + // with one in reverse order and add them together. For a given index `i`, the correct product is + // calculated by multiplying element `i` of one function by `N - i` in the other function, where N + // is the number of elements in the arrays. For the case of i = 0 however, this results in us evaluating + // the array at index N, which is out of bounds. For this reason, we skip the first element and add + // it's product at the very end. + let convolution = (1..step.len()).map(|index| -> Complex { + // println!("{}", self.transform[index] * step[step.len() - index]); + return self.transform[index] * step[step.len() - index]; + }); + + let integral = (convolution.sum::>() + self.transform[0] * step[0]).abs(); + + return f32::max(f32::min(integral / self.max_duration as f32, 1.0), 0.0); + } + + /// + /// Obtains co-ordinate pairs to plot the probability density function + /// + pub fn plot(self) -> (Vec, Vec) { + + let num_terms = self.transform.len(); + + let x_points = (0..num_terms).map(|x| x as f32 * self.max_duration / num_terms as f32).collect(); + + // use the inverse fft we own to transform our points + let mut inverted = self.transform.clone(); + self.fft_inverse.process(&mut *inverted); + + // take the magnitude of the y points to convert them to real numbers + let y_points = inverted.iter().map(|x| x.abs() / num_terms as f32).collect(); + + return (x_points, y_points); + } + +} + +impl<'a, 'b> ops::Add<&'b ProbabilityDistribution> for &'a ProbabilityDistribution { + type Output = ProbabilityDistribution; + + /// + /// Computes the distribution of the sum of two random variables by convolution. + /// + /// By the Convolution theorem, convolution in the time domain corresponds to multiplication + /// in the Frequency domain. Therefore, it is only necessary to do an element wise multiplication + /// of the fourier transforms. + /// + fn add(self, other: &'b ProbabilityDistribution) -> ProbabilityDistribution { + // copy self + let mut result = self.clone().to_owned(); + + // multiply the elements + for i in 0..self.transform.len() { + result.transform[i] *= other.transform[i]; + } + + return result; + } + +} + +impl ops::AddAssign for ProbabilityDistribution { + + fn add_assign(self: &mut ProbabilityDistribution, rhs: ProbabilityDistribution){ + for i in 0..self.transform.len() { + self.transform[i] *= rhs.transform[i]; + } + } +} \ No newline at end of file diff --git a/src/analysis/statistical_pb_chance/tests.rs b/src/analysis/statistical_pb_chance/tests.rs new file mode 100644 index 000000000..31c169ab4 --- /dev/null +++ b/src/analysis/statistical_pb_chance/tests.rs @@ -0,0 +1,224 @@ +use std::f32::consts::TAU; + +use assert_approx_eq::assert_approx_eq; +use rustfft::{Fft, FftPlanner, num_complex::{Complex, ComplexFloat}}; + +use std::sync::Arc; +use crate::analysis::statistical_pb_chance::discontinuous_fourier_transforms::{delta_function_dft, step_function_dft}; +use crate::analysis::statistical_pb_chance::probability_distribution::{ProbabilityDistribution}; +use crate::{RealTime, SegmentHistory, Time, TimeSpan, TimingMethod}; + +/// +/// Makes sure that the discontinuous Fourier transform yields the correct values +/// +#[test] +fn test_ten_element_dirac_delta() { + + let duration = 10; + + // create the delta function, with a peak at t=0 + let mut delta_fourier = delta_function_dft( TAU / duration as f32, + duration, 0.0); + + // create the FFT planner + let mut planner = FftPlanner::new(); + let fft = planner.plan_fft_inverse(duration); + + fft.process(&mut delta_fourier); + + // convert to real numbers by taking the magnitude of the complex numbers + let magnitudes: Vec = delta_fourier.iter().map(|x: &Complex| -> f32 { (x.re().powi(2) + x.im().powi(2)).sqrt() / delta_fourier.len() as f32}).collect(); + + assert_approx_eq!(magnitudes[0], 1.0); + + for i in 1..magnitudes.len() { + assert_approx_eq!(magnitudes[i], 0.0); + } + +} + +/// +/// Makes sure that the fourier transform yields the correct result when the duration does not equal +/// the number of points +/// +#[test] +fn test_dirac_delta_different_duration() { + + let points = 16; + + // create the delta function, with a peak at t=0 + let mut delta_fourier = delta_function_dft( TAU / 10 as f32, + points, 0.0); + + // create the FFT planner + let mut planner = FftPlanner::new(); + let fft = planner.plan_fft_inverse(points); + + fft.process(&mut delta_fourier); + + // convert to real numbers by taking the magnitude of the complex numbers + let magnitudes: Vec = delta_fourier.iter().map(|x: &Complex| -> f32 { x.abs() / delta_fourier.len() as f32}).collect(); + + assert_approx_eq!(magnitudes[0], 1.0); + + for i in 1..magnitudes.len() { + assert_approx_eq!(magnitudes[i], 0.0); + } + +}/// +/// Makes sure that the fourier transform yields the correct result when the duration does not equal +/// the number of points +/// +#[test] +fn test_dirac_delta_non_integer() { + + let points = 16; + + // create the delta function, with a peak at t=0 + // [Complex { re: 1.0, im: -0.0 }, Complex { re: 0.72896856, im: -0.6845472 }, Complex { re: 0.06279038, im: -0.9980267 }, Complex { re: -0.6374242, im: -0.7705131 }, Complex { re: -0.9921147, im: -0.12533297 }, Complex { re: -0.8090168, im: 0.58778554 }, Complex { re: -0.18738091, im: 0.98228735 }, Complex { re: 0.535827, im: 0.8443278 }, Complex { re: 0.9685833, im: 0.24868935 }, Complex { re: 0.87630624, im: -0.48175442 }, Complex { re: 0.30901635, im: -0.9510567 }, Complex { re: -0.42577976, im: -0.9048268 }, Complex { re: -0.9297768, im: -0.3681238 }, Complex { re: -0.9297761, im: 0.36812562 }, Complex { re: -0.42577887, im: 0.90482724 }, Complex { re: 0.30901775, im: 0.95105624 }] + let mut delta_fourier = delta_function_dft( TAU / 10f32, + points, 1.2); + + // println!("{:?}", delta_fourier); + + // create the FFT planner + let mut planner = FftPlanner::new(); + let fft = planner.plan_fft_inverse(points); + + fft.process(&mut delta_fourier); + + // convert to real numbers by taking the magnitude of the complex numbers + let magnitudes: Vec = delta_fourier.iter().map(|x: &Complex| -> f32 { x.abs() / delta_fourier.len() as f32}).collect(); + + // println!("{:?}", magnitudes); + +} + +fn print_type_of(_: &T) { + println!("{}", std::any::type_name::()) +} + +#[test] +fn test_unit_step() { + let points = 16; + + let mut heaviside_fourier = step_function_dft(TAU / points as f32, points, 10.5f32); + + let mut planner = FftPlanner::new(); + let fft: Arc> = planner.plan_fft_inverse(points); + + + fft.process(&mut *heaviside_fourier); + + // convert to real numbers by taking the magnitude of the complex numbers + let magnitudes: Vec = heaviside_fourier.iter().map(|x: &Complex| -> f32 { x.abs() / points as f32}).collect(); + + for i in 0..11 { + assert_approx_eq!(magnitudes[i], 1f32, 0.1); // high tolerance b/c this isn't a precise unit step function + } + + for i in 11..points { + assert_approx_eq!(magnitudes[i], 0f32, 0.1); + } + +} + +#[test] +fn test_init_probability_distribution() { + + let mut times = SegmentHistory::default(); + + times.insert(1, Time::from(RealTime(Some(TimeSpan::from_seconds(1.2))))); + times.insert(2, Time::from(RealTime(Some(TimeSpan::from_seconds(6.0))))); + + let dist = ProbabilityDistribution::new(×, TimingMethod::RealTime, + 10.0, 256, 0.5); + + // we test to see if our distribution is working by using the CDF + + assert_approx_eq!(dist.probability_below(0.0), 0.0, 0.1); + assert_approx_eq!(dist.probability_below(4.0), 0.5, 0.1); + assert_approx_eq!(dist.probability_below(5.0), 0.5, 0.1); + assert_approx_eq!(dist.probability_below(8.0), 1.0, 0.1); + + // for time in times.iter() { + // + // println!("{}", time.1.real_time.expect("time empty").total_seconds()); + // + // } +} + +#[test] +fn test_inverse_fourier_transform() { + + let mut times = SegmentHistory::default(); + + times.insert(1, Time::from(RealTime(Some(TimeSpan::from_seconds(0.625))))); + times.insert(2, Time::from(RealTime(Some(TimeSpan::from_seconds(6.25))))); + + let dist = ProbabilityDistribution::new(×, TimingMethod::RealTime, + 10.0, 16, 0.5); + + // plot the distribution + let (x_points, y_points) = dist.plot(); + + // make sure all the points except for the two delta functions are zero + assert_approx_eq!(y_points[0], 0.0); + assert_approx_eq!(y_points[1], 0.5); + assert_approx_eq!(y_points[2], 0.0); + + assert_approx_eq!(y_points[9], 0.0); + assert_approx_eq!(y_points[10], 0.5); + assert_approx_eq!(y_points[11], 0.0); + +} + +#[test] +fn add_distributions() { + + let mut times = SegmentHistory::default(); + + times.insert(1, Time::from(RealTime(Some(TimeSpan::from_seconds(0.625))))); + times.insert(2, Time::from(RealTime(Some(TimeSpan::from_seconds(1.25))))); + + let dist = ProbabilityDistribution::new(×, TimingMethod::RealTime, + 10.0, 16, 0.5); + + let sum = &dist + &dist; + + let (x_points, y_points) = sum.plot(); + + assert_approx_eq!(y_points[0], 0.0); + assert_approx_eq!(y_points[1], 0.0); + assert_approx_eq!(y_points[2], 0.25); + assert_approx_eq!(y_points[3], 0.5); + assert_approx_eq!(y_points[4], 0.25); + assert_approx_eq!(y_points[5], 0.0); + assert_approx_eq!(y_points[6], 0.0); + +} + +#[test] +fn add_assign_distributions() { + + let mut times = SegmentHistory::default(); + + times.insert(1, Time::from(RealTime(Some(TimeSpan::from_seconds(0.625))))); + times.insert(2, Time::from(RealTime(Some(TimeSpan::from_seconds(1.25))))); + + let mut dist = ProbabilityDistribution::new(×, TimingMethod::RealTime, + 10.0, 16, 0.5); + + dist += dist.clone(); + + let (x_points, y_points) = dist.plot(); + + assert_approx_eq!(y_points[0], 0.0); + assert_approx_eq!(y_points[1], 0.0); + assert_approx_eq!(y_points[2], 0.25); + assert_approx_eq!(y_points[3], 0.5); + assert_approx_eq!(y_points[4], 0.25); + assert_approx_eq!(y_points[5], 0.0); + assert_approx_eq!(y_points[6], 0.0); + +} \ No newline at end of file diff --git a/src/analysis/tests/mod.rs b/src/analysis/tests/mod.rs index 680097a2c..4bac35225 100644 --- a/src/analysis/tests/mod.rs +++ b/src/analysis/tests/mod.rs @@ -1 +1 @@ -mod empty_run; +mod empty_run; \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 838e7685f..6d1ed107a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -50,6 +50,7 @@ //! ``` extern crate alloc; +extern crate core; mod platform;