Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/build-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ jobs:

steps:
- uses: actions/checkout@v4
- name: format
run: cargo fmt --check
- name: Build
run: cargo build --verbose
- name: Run tests default
Expand Down
113 changes: 55 additions & 58 deletions src/approximation/linear_approximation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,105 +3,101 @@ use crate::numerical_derivative::derivator::DerivatorMultiVariable;
use num_complex::ComplexFloat;

#[derive(Debug)]
pub struct LinearApproximationResult<T: ComplexFloat, const NUM_VARS: usize>
{
pub struct LinearApproximationResult<T: ComplexFloat, const NUM_VARS: usize> {
pub intercept: T,
pub coefficients: [T; NUM_VARS]
pub coefficients: [T; NUM_VARS],
}

#[derive(Debug)]
pub struct LinearApproximationPredictionMetrics<T: ComplexFloat>
{
pub struct LinearApproximationPredictionMetrics<T: ComplexFloat> {
pub mean_absolute_error: T::Real,
pub mean_squared_error: T::Real,
pub root_mean_squared_error: T::Real,
pub r_squared: T::Real,
pub adjusted_r_squared: T::Real
pub adjusted_r_squared: T::Real,
}

impl<T: ComplexFloat, const NUM_VARS: usize> LinearApproximationResult<T, NUM_VARS>
{
impl<T: ComplexFloat, const NUM_VARS: usize> LinearApproximationResult<T, NUM_VARS> {
///Helper function if you don't care about the details and just want the predictor directly
pub fn get_prediction_value(&self, args: &[T; NUM_VARS]) -> T
{
pub fn get_prediction_value(&self, args: &[T; NUM_VARS]) -> T {
let mut result = self.intercept;
for (iter, arg) in args.iter().enumerate().take(NUM_VARS)
{
result = result + self.coefficients[iter]**arg;
for (iter, arg) in args.iter().enumerate().take(NUM_VARS) {
result = result + self.coefficients[iter] * *arg;
}

return result;
}

//get prediction metrics by feeding a list of points and the original function
pub fn get_prediction_metrics<const NUM_POINTS: usize>(&self, points: &[[T; NUM_VARS]; NUM_POINTS], original_function: &dyn Fn(&[T; NUM_VARS]) -> T) -> LinearApproximationPredictionMetrics<T>
{
pub fn get_prediction_metrics<const NUM_POINTS: usize>(
&self,
points: &[[T; NUM_VARS]; NUM_POINTS],
original_function: &dyn Fn(&[T; NUM_VARS]) -> T,
) -> LinearApproximationPredictionMetrics<T> {
//let num_points = NUM_POINTS as f64;
let mut mae = T::zero();
let mut mse = T::zero();

for point in points.iter().take(NUM_POINTS)
{

for point in points.iter().take(NUM_POINTS) {
let predicted_y = self.get_prediction_value(point);

mae = mae + (predicted_y - original_function(point));
mse = mse + num_complex::ComplexFloat::powi(predicted_y - original_function(point), 2);
}

mae = mae/T::from(NUM_POINTS).unwrap();
mse = mse/T::from(NUM_POINTS).unwrap();
mae = mae / T::from(NUM_POINTS).unwrap();
mse = mse / T::from(NUM_POINTS).unwrap();

let rmse = mse.sqrt().abs();

let mut r2_numerator = T::zero();
let mut r2_denominator = T::zero();

for point in points.iter().take(NUM_POINTS)
{
for point in points.iter().take(NUM_POINTS) {
let predicted_y = self.get_prediction_value(point);

r2_numerator = r2_numerator + num_complex::ComplexFloat::powi(predicted_y - original_function(point), 2);
r2_denominator = r2_numerator + num_complex::ComplexFloat::powi(mae - original_function(point), 2);
r2_numerator = r2_numerator
+ num_complex::ComplexFloat::powi(predicted_y - original_function(point), 2);
r2_denominator =
r2_numerator + num_complex::ComplexFloat::powi(mae - original_function(point), 2);
}

let r2 = T::one() - (r2_numerator/r2_denominator);
let r2 = T::one() - (r2_numerator / r2_denominator);

let r2_adj = T::one() - (T::one() - r2)*(T::from(NUM_POINTS).unwrap())/(T::from(NUM_POINTS).unwrap() - T::from(2.0).unwrap());
let r2_adj = T::one()
- (T::one() - r2) * (T::from(NUM_POINTS).unwrap())
/ (T::from(NUM_POINTS).unwrap() - T::from(2.0).unwrap());

return LinearApproximationPredictionMetrics
{
return LinearApproximationPredictionMetrics {
mean_absolute_error: mae.abs(),
mean_squared_error: mse.abs(),
root_mean_squared_error: rmse,
r_squared: r2.abs(),
adjusted_r_squared: r2_adj.abs()
adjusted_r_squared: r2_adj.abs(),
};
}
}

pub struct LinearApproximator<D: DerivatorMultiVariable>
{
derivator: D
pub struct LinearApproximator<D: DerivatorMultiVariable> {
derivator: D,
}

impl<D: DerivatorMultiVariable> Default for LinearApproximator<D>
{
fn default() -> Self
{
return LinearApproximator { derivator: D::default() };
impl<D: DerivatorMultiVariable> Default for LinearApproximator<D> {
fn default() -> Self {
return LinearApproximator {
derivator: D::default(),
};
}
}

impl<D: DerivatorMultiVariable> LinearApproximator<D>
{
pub fn from_derivator(derivator: D) -> Self
{
return LinearApproximator {derivator}
impl<D: DerivatorMultiVariable> LinearApproximator<D> {
pub fn from_derivator(derivator: D) -> Self {
return LinearApproximator { derivator };
}

/// For an n-dimensional approximation, the equation is linearized as:
/// coefficient[0]*var_1 + coefficient[1]*var_2 + ... + coefficient[n-1]*var_n + intercept
///
///
/// NOTE: Returns a Result<T, &'static str>
/// Possible &'static str are:
/// NumberOfStepsCannotBeZero -> if the derivative step size is zero
Expand All @@ -110,9 +106,9 @@ impl<D: DerivatorMultiVariable> LinearApproximator<D>
///```
///use multicalc::approximation::linear_approximation::*;
///use multicalc::numerical_derivative::finite_difference::MultiVariableSolver;
///
///
///let function_to_approximate = | args: &[f64; 3] | -> f64
///{
///{
/// return args[0] + args[1].powf(2.0) + args[2].powf(3.0);
///};
///
Expand All @@ -123,29 +119,30 @@ impl<D: DerivatorMultiVariable> LinearApproximator<D>
///assert!(f64::abs(function_to_approximate(&point) - result.get_prediction_value(&point)) < 1e-9);
/// ```
/// you can also inspect the results of the approximation. For an n-dimensional approximation, the equation is linearized as
///
///
/// [`LinearApproximationResult::intercept`] gives you the required intercept
/// [`LinearApproximationResult::coefficients`] gives you the required coefficients in order
///
///
/// if you don't care about the results and want the predictor directly, use [`LinearApproximationResult::get_prediction_value()`]
/// you can also inspect the prediction metrics by providing list of points, use [`LinearApproximationResult::get_prediction_metrics()`]
///
pub fn get<T: ComplexFloat, const NUM_VARS: usize>(&self, function: &dyn Fn(&[T; NUM_VARS]) -> T, point: &[T; NUM_VARS]) -> Result<LinearApproximationResult<T, NUM_VARS>, &'static str>
{
pub fn get<T: ComplexFloat, const NUM_VARS: usize>(
&self,
function: &dyn Fn(&[T; NUM_VARS]) -> T,
point: &[T; NUM_VARS],
) -> Result<LinearApproximationResult<T, NUM_VARS>, &'static str> {
let mut slopes_ = [T::zero(); NUM_VARS];

let mut intercept_ = function(point);

for iter in 0..NUM_VARS
{
for iter in 0..NUM_VARS {
slopes_[iter] = self.derivator.get(1, function, &[iter], point)?;
intercept_ = intercept_ - slopes_[iter]*point[iter];
intercept_ = intercept_ - slopes_[iter] * point[iter];
}

return Ok(LinearApproximationResult
{
return Ok(LinearApproximationResult {
intercept: intercept_,
coefficients: slopes_
coefficients: slopes_,
});
}
}
}
2 changes: 1 addition & 1 deletion src/approximation/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@ pub mod linear_approximation;
pub mod quadratic_approximation;

#[cfg(test)]
mod test;
mod test;
Loading