-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | Simple linear and quadratic regression -- -- A simple package with a module for -- -- -- -- All specialized to Double. @package regression-simple @version 0.2 -- | Minimil linear algebra lib. module Math.Regression.Simple.LinAlg -- | Addition class Add a zero :: Add a => a add :: Add a => a -> a -> a infixl 6 `add` -- | Identity class Eye a eye :: Eye a => a -- | Multiplication of different things. class Eye a => Mult a b c | a b -> c mult :: Mult a b c => a -> b -> c infixl 7 `mult` -- | Determinant class Eye a => Det a det :: Det a => a -> Double -- | Inverse class Det a => Inv a inv :: Inv a => a -> a -- | Solve linear equation. -- --
--   >>> zerosLin (V2 1 2)
--   -2.0
--   
zerosLin :: V2 -> Double -- | Solve quadratic equation. -- --
--   >>> zerosQuad (V3 2 0 (-1))
--   Right (-0.7071067811865476,0.7071067811865476)
--   
-- --
--   >>> zerosQuad (V3 2 0 1)
--   Left ((-0.0) :+ (-0.7071067811865476),(-0.0) :+ 0.7071067811865476)
--   
-- -- Double root is not treated separately: -- --
--   >>> zerosQuad (V3 1 0 0)
--   Right (-0.0,0.0)
--   
-- --
--   >>> zerosQuad (V3 1 (-2) 1)
--   Right (1.0,1.0)
--   
zerosQuad :: V3 -> Either (Complex Double, Complex Double) (Double, Double) -- | Find an optima point. -- --
--   >>> optimaQuad (V3 1 (-2) 0)
--   1.0
--   
-- -- compare to -- --
--   >>> zerosQuad (V3 1 (-2) 0)
--   Right (0.0,2.0)
--   
optimaQuad :: V3 -> Double -- | 2d vector. Strict pair of Doubles. -- -- Also used to represent linear polynomial: V2 a b -- <math>. data V2 V2 :: !Double -> !Double -> V2 -- | 2×2 matrix. data M22 M22 :: !Double -> !Double -> !Double -> !Double -> M22 -- | Symmetric 2x2 matrix. data SM22 SM22 :: !Double -> !Double -> !Double -> SM22 -- | 3d vector. Strict triple of Doubles. -- -- Also used to represent quadratic polynomial: V3 a b c -- <math>. data V3 V3 :: !Double -> !Double -> !Double -> V3 -- | 3×3 matrix. data M33 M33 :: !Double -> !Double -> !Double -> !Double -> !Double -> !Double -> !Double -> !Double -> !Double -> M33 -- | Symmetric 3×3 matrix. data SM33 SM33 :: !Double -> !Double -> !Double -> !Double -> !Double -> !Double -> SM33 instance GHC.Show.Show Math.Regression.Simple.LinAlg.V2 instance GHC.Classes.Eq Math.Regression.Simple.LinAlg.V2 instance GHC.Show.Show Math.Regression.Simple.LinAlg.M22 instance GHC.Classes.Eq Math.Regression.Simple.LinAlg.M22 instance GHC.Show.Show Math.Regression.Simple.LinAlg.SM22 instance GHC.Classes.Eq Math.Regression.Simple.LinAlg.SM22 instance GHC.Show.Show Math.Regression.Simple.LinAlg.V3 instance GHC.Classes.Eq Math.Regression.Simple.LinAlg.V3 instance GHC.Show.Show Math.Regression.Simple.LinAlg.M33 instance GHC.Classes.Eq Math.Regression.Simple.LinAlg.M33 instance GHC.Show.Show Math.Regression.Simple.LinAlg.SM33 instance GHC.Classes.Eq Math.Regression.Simple.LinAlg.SM33 instance Control.DeepSeq.NFData Math.Regression.Simple.LinAlg.SM33 instance Math.Regression.Simple.LinAlg.Add Math.Regression.Simple.LinAlg.SM33 instance Math.Regression.Simple.LinAlg.Eye Math.Regression.Simple.LinAlg.SM33 instance Math.Regression.Simple.LinAlg.Det Math.Regression.Simple.LinAlg.SM33 instance Math.Regression.Simple.LinAlg.Inv Math.Regression.Simple.LinAlg.SM33 instance Math.Regression.Simple.LinAlg.Mult GHC.Types.Double Math.Regression.Simple.LinAlg.SM33 Math.Regression.Simple.LinAlg.SM33 instance Math.Regression.Simple.LinAlg.Mult Math.Regression.Simple.LinAlg.SM33 Math.Regression.Simple.LinAlg.V3 Math.Regression.Simple.LinAlg.V3 instance Control.DeepSeq.NFData Math.Regression.Simple.LinAlg.M33 instance Math.Regression.Simple.LinAlg.Add Math.Regression.Simple.LinAlg.M33 instance Math.Regression.Simple.LinAlg.Eye Math.Regression.Simple.LinAlg.M33 instance Math.Regression.Simple.LinAlg.Det Math.Regression.Simple.LinAlg.M33 instance Math.Regression.Simple.LinAlg.Inv Math.Regression.Simple.LinAlg.M33 instance Math.Regression.Simple.LinAlg.Mult GHC.Types.Double Math.Regression.Simple.LinAlg.M33 Math.Regression.Simple.LinAlg.M33 instance Math.Regression.Simple.LinAlg.Mult Math.Regression.Simple.LinAlg.M33 Math.Regression.Simple.LinAlg.V3 Math.Regression.Simple.LinAlg.V3 instance Control.DeepSeq.NFData Math.Regression.Simple.LinAlg.V3 instance Math.Regression.Simple.LinAlg.Add Math.Regression.Simple.LinAlg.V3 instance Math.Regression.Simple.LinAlg.Mult GHC.Types.Double Math.Regression.Simple.LinAlg.V3 Math.Regression.Simple.LinAlg.V3 instance Control.DeepSeq.NFData Math.Regression.Simple.LinAlg.SM22 instance Math.Regression.Simple.LinAlg.Add Math.Regression.Simple.LinAlg.SM22 instance Math.Regression.Simple.LinAlg.Eye Math.Regression.Simple.LinAlg.SM22 instance Math.Regression.Simple.LinAlg.Det Math.Regression.Simple.LinAlg.SM22 instance Math.Regression.Simple.LinAlg.Inv Math.Regression.Simple.LinAlg.SM22 instance Math.Regression.Simple.LinAlg.Mult GHC.Types.Double Math.Regression.Simple.LinAlg.SM22 Math.Regression.Simple.LinAlg.SM22 instance Math.Regression.Simple.LinAlg.Mult Math.Regression.Simple.LinAlg.SM22 Math.Regression.Simple.LinAlg.V2 Math.Regression.Simple.LinAlg.V2 instance Control.DeepSeq.NFData Math.Regression.Simple.LinAlg.M22 instance Math.Regression.Simple.LinAlg.Add Math.Regression.Simple.LinAlg.M22 instance Math.Regression.Simple.LinAlg.Eye Math.Regression.Simple.LinAlg.M22 instance Math.Regression.Simple.LinAlg.Det Math.Regression.Simple.LinAlg.M22 instance Math.Regression.Simple.LinAlg.Inv Math.Regression.Simple.LinAlg.M22 instance Math.Regression.Simple.LinAlg.Mult GHC.Types.Double Math.Regression.Simple.LinAlg.M22 Math.Regression.Simple.LinAlg.M22 instance Math.Regression.Simple.LinAlg.Mult Math.Regression.Simple.LinAlg.M22 Math.Regression.Simple.LinAlg.V2 Math.Regression.Simple.LinAlg.V2 instance Math.Regression.Simple.LinAlg.Mult Math.Regression.Simple.LinAlg.M22 Math.Regression.Simple.LinAlg.M22 Math.Regression.Simple.LinAlg.M22 instance Control.DeepSeq.NFData Math.Regression.Simple.LinAlg.V2 instance Math.Regression.Simple.LinAlg.Add Math.Regression.Simple.LinAlg.V2 instance Math.Regression.Simple.LinAlg.Mult GHC.Types.Double Math.Regression.Simple.LinAlg.V2 Math.Regression.Simple.LinAlg.V2 instance Math.Regression.Simple.LinAlg.Inv GHC.Types.Double instance Math.Regression.Simple.LinAlg.Det GHC.Types.Double instance Math.Regression.Simple.LinAlg.Mult GHC.Types.Double GHC.Types.Double GHC.Types.Double instance Math.Regression.Simple.LinAlg.Eye GHC.Types.Double instance Math.Regression.Simple.LinAlg.Add GHC.Types.Double -- | math-functions has KBN-Babuška-Neumaier summation algorithm -- as well in Numeric.Sum module. module Numeric.KBN -- | KBN summation accumulator. data KBN KBN :: !Double -> !Double -> KBN zeroKBN :: KBN getKBN :: KBN -> Double -- | KBN summation algorithm. -- --
--   >>> sumKBN (replicate 10 0.1)
--   1.0
--   
-- --
--   >>> Data.List.foldl' (+) 0 (replicate 10 0.1) :: Double
--   0.9999999999999999
--   
-- --
--   >>> sumKBN [1, 1e100, 1, -1e100]
--   2.0
--   
-- --
--   >>> Data.List.foldl' (+) 0 [1, 1e100, 1, -1e100]
--   0.0
--   
sumKBN :: Foldable f => f Double -> Double -- | Add a Double to KBN accumulator. addKBN :: KBN -> Double -> KBN instance GHC.Show.Show Numeric.KBN.KBN instance Control.DeepSeq.NFData Numeric.KBN.KBN -- | regression-simple provides (hopefully) simple regression -- functions. -- -- The linear :: Foldable f => (a -> (Double, Double)) -- -> f a -> V2 is the simplest one. -- -- There are variants with weights, y-errors, and x and y-errors. In -- addition, package includes Levenberg–Marquardt algorithm -- implementation to fit arbitrary functions (with one, two or three -- parameters), as long as you can give their partial derivatives as well -- (ad package is handy for that). -- -- For multiple independent variable ordinary least squares or -- Levenberg-Marquard with functions with > 3 parameter you should -- look elsewhere. -- -- Package has been tested to return similar results as fit -- functionality in gnuplot (L-M doesn't always converge to -- exactly the same points in parameter space). module Math.Regression.Simple -- | Linear regression. -- --
--   >>> let input1 = [(0, 1), (1, 3), (2, 5)]
--   
--   >>> PP $ linear id input1
--   V2 2.0000 1.00000
--   
-- --
--   >>> let input2 = [(0.1, 1.2), (1.3, 3.1), (1.9, 4.9), (3.0, 7.1), (4.1, 9.0)]
--   
--   >>> PP $ linear id input2
--   V2 2.0063 0.88685
--   
linear :: Foldable f => (a -> (Double, Double)) -> f a -> V2 -- | Like linear but returns complete Fit. -- -- To get confidence intervals you should multiply the errors by -- quantile (studentT (n - 2)) ci' from statistics -- package or similar. For big n using value 1 gives 68% -- interval and using value 2 gives 95% confidence interval. See -- https://en.wikipedia.org/wiki/Student%27s_t-distribution#Table_of_selected_values -- (quantile calculates one-sided values, you need two-sided, -- thus adjust ci value). -- -- The first input is perfect fit: -- --
--   >>> let fit = linearFit id input1
--   
--   >>> PP fit
--   Fit (V2 2.0000 1.00000) (V2 0.00000 0.00000) 1 0.00000
--   
-- -- The second input is quite good: -- --
--   >>> PP $ linearFit id input2
--   Fit (V2 2.0063 0.88685) (V2 0.09550 0.23826) 3 0.25962
--   
-- -- But the third input isn't so much, standard error of a slope parameter -- is 20%. -- --
--   >>> let input3 = [(0, 2), (1, 3), (2, 6), (3, 11)]
--   
--   >>> PP $ linearFit id input3
--   Fit (V2 3.0000 1.00000) (V2 0.63246 1.1832) 2 4.0000
--   
linearFit :: Foldable f => (a -> (Double, Double)) -> f a -> Fit V2 -- | Weighted linear regression. -- --
--   >>> let input2 = [(0.1, 1.2), (1.3, 3.1), (1.9, 4.9), (3.0, 7.1), (4.1, 9.0)]
--   
--   >>> PP $ linearFit id input2
--   Fit (V2 2.0063 0.88685) (V2 0.09550 0.23826) 3 0.25962
--   
-- --
--   >>> let input2w = [(0.1, 1.2, 1), (1.3, 3.1, 1), (1.9, 4.9, 1), (3.0, 7.1, 1/4), (4.1, 9.0, 1/4)]
--   
--   >>> PP $ linearWithWeights id input2w
--   Fit (V2 2.0060 0.86993) (V2 0.12926 0.23696) 3 0.22074
--   
linearWithWeights :: Foldable f => (a -> (Double, Double, Double)) -> f a -> Fit V2 -- | Linear regression with y-errors. -- --
--   >>> let input2y = [(0.1, 1.2, 0.12), (1.3, 3.1, 0.31), (1.9, 4.9, 0.49), (3.0, 7.1, 0.71), (4.1, 9.0, 1.9)]
--   
--   >>> let fit = linearWithYerrors id input2y
--   
--   >>> PP fit
--   Fit (V2 1.9104 0.98302) (V2 0.13006 0.10462) 3 2.0930
--   
-- -- When we know actual y-errors, we can calculate the Q-value using -- statistics package: -- --
--   >>> import qualified Statistics.Distribution            as S
--   
--   >>> import qualified Statistics.Distribution.ChiSquared as S
--   
--   >>> S.cumulative (S.chiSquared (fitNDF fit)) (fitWSSR fit)
--   0.446669639443138
--   
-- -- or using math-functions -- --
--   >>> import Numeric.SpecFunctions (incompleteGamma)
--   
--   >>> incompleteGamma (fromIntegral (fitNDF fit) / 2) (fitWSSR fit / 2)
--   0.446669639443138
--   
-- -- It is not uncommon to deem acceptable on equal terms any models with, -- say, Q > 0.001. If Q is too large, too near to 1 is most likely -- caused by overestimating the y-errors. linearWithYerrors :: Foldable f => (a -> (Double, Double, Double)) -> f a -> Fit V2 -- | Iterative linear regression with x and y errors. -- -- Orear, J. (1982). Least squares when both variables have -- uncertainties. American Journal of Physics, 50(10), 912–916. -- doi:10.1119/1.12972 -- --
--   >>> let input2xy = [(0.1, 1.2, 0.01, 0.12), (1.3, 3.1, 0.13, 0.31), (1.9, 4.9, 0.19, 0.49), (3.0, 7.1, 0.3, 0.71), (4.1, 9.0, 0.41, 1.9)]
--   
--   >>> let fit :| fits = linearWithXYerrors id input2xy
--   
-- -- First fit is done using linearWithYerrors: -- --
--   >>> PP fit
--   Fit (V2 1.9104 0.98302) (V2 0.13006 0.10462) 3 2.0930
--   
-- -- After that the effective variance is used to refine the fit, just a -- few iterations is often enough: -- --
--   >>> PP $ take 3 fits
--   Fit (V2 1.9092 0.99251) (V2 0.12417 0.08412) 3 1.2992
--   Fit (V2 1.9092 0.99250) (V2 0.12418 0.08414) 3 1.2998
--   Fit (V2 1.9092 0.99250) (V2 0.12418 0.08414) 3 1.2998
--   
linearWithXYerrors :: Foldable f => (a -> (Double, Double, Double, Double)) -> f a -> NonEmpty (Fit V2) -- | Calculate linear fit from LinRegAcc. linearFit' :: LinRegAcc -> Fit V2 -- | Linear regression accumulator. data LinRegAcc LinRegAcc :: {-# UNPACK #-} !Int -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> LinRegAcc -- | <math> [lra_n] :: LinRegAcc -> {-# UNPACK #-} !Int -- | <math> [lra_w] :: LinRegAcc -> {-# UNPACK #-} !KBN -- | <math> [lra_x] :: LinRegAcc -> {-# UNPACK #-} !KBN -- | <math> [lra_x2] :: LinRegAcc -> {-# UNPACK #-} !KBN -- | <math> [lra_y] :: LinRegAcc -> {-# UNPACK #-} !KBN -- | <math> [lra_xy] :: LinRegAcc -> {-# UNPACK #-} !KBN -- | <math> [lra_y2] :: LinRegAcc -> {-# UNPACK #-} !KBN -- | All-zeroes LinRegAcc. zeroLinRegAcc :: LinRegAcc -- | Add a point to linreg accumulator. addLinReg :: LinRegAcc -> Double -> Double -> LinRegAcc -- | Add a weighted point to linreg accumulator. addLinRegW :: LinRegAcc -> Double -> Double -> Double -> LinRegAcc -- | Quadratic regression. -- --
--   >>> let input1 = [(0, 1), (1, 3), (2, 5)]
--   
--   >>> quadratic id input1
--   V3 0.0 2.0 1.0
--   
-- --
--   >>> let input2 = [(0.1, 1.2), (1.3, 3.1), (1.9, 4.9), (3.0, 7.1), (4.1, 9.0)]
--   
--   >>> PP $ quadratic id input2
--   V3 (-0.00589) 2.0313 0.87155
--   
-- --
--   >>> let input3 = [(0, 2), (1, 3), (2, 6), (3, 11)]
--   
--   >>> PP $ quadratic id input3
--   V3 1.00000 0.00000 2.0000
--   
quadratic :: Foldable f => (a -> (Double, Double)) -> f a -> V3 -- | Like quadratic but returns complete Fit. -- --
--   >>> PP $ quadraticFit id input2
--   Fit (V3 (-0.00589) 2.0313 0.87155) (V3 0.09281 0.41070 0.37841) 2 0.25910
--   
-- --
--   >>> PP $ quadraticFit id input3
--   Fit (V3 1.00000 0.00000 2.0000) (V3 0.00000 0.00000 0.00000) 1 0.00000
--   
quadraticFit :: Foldable f => (a -> (Double, Double)) -> f a -> Fit V3 -- | Weighted quadratic regression. -- --
--   >>> let input2w = [(0.1, 1.2, 1), (1.3, 3.1, 1), (1.9, 4.9, 1), (3.0, 7.1, 1/4), (4.1, 9.0, 1/4)]
--   
--   >>> PP $ quadraticWithWeights id input2w
--   Fit (V3 0.02524 1.9144 0.91792) (V3 0.10775 0.42106 0.35207) 2 0.21484
--   
quadraticWithWeights :: Foldable f => (a -> (Double, Double, Double)) -> f a -> Fit V3 -- | Quadratic regression with y-errors. -- --
--   >>> let input2y = [(0.1, 1.2, 0.12), (1.3, 3.1, 0.31), (1.9, 4.9, 0.49), (3.0, 7.1, 0.71), (4.1, 9.0, 0.9)]
--   
--   >>> PP $ quadraticWithYerrors id input2y
--   Fit (V3 0.08776 1.6667 1.0228) (V3 0.10131 0.31829 0.11917) 2 1.5398
--   
quadraticWithYerrors :: Foldable f => (a -> (Double, Double, Double)) -> f a -> Fit V3 -- | Iterative quadratic regression with x and y errors. -- -- Orear, J. (1982). Least squares when both variables have -- uncertainties. American Journal of Physics, 50(10), 912–916. -- doi:10.1119/1.12972 quadraticWithXYerrors :: Foldable f => (a -> (Double, Double, Double, Double)) -> f a -> NonEmpty (Fit V3) -- | Calculate quadratic fit from QuadRegAcc. quadraticFit' :: QuadRegAcc -> Fit V3 -- | Quadratic regression accumulator. data QuadRegAcc QuadRegAcc :: {-# UNPACK #-} !Int -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> {-# UNPACK #-} !KBN -> QuadRegAcc -- | <math> [qra_n] :: QuadRegAcc -> {-# UNPACK #-} !Int -- | <math> [qra_w] :: QuadRegAcc -> {-# UNPACK #-} !KBN -- | <math> [qra_x] :: QuadRegAcc -> {-# UNPACK #-} !KBN -- | <math> [qra_x2] :: QuadRegAcc -> {-# UNPACK #-} !KBN -- | <math> [qra_x3] :: QuadRegAcc -> {-# UNPACK #-} !KBN -- | <math> [qra_x4] :: QuadRegAcc -> {-# UNPACK #-} !KBN -- | <math> [qra_y] :: QuadRegAcc -> {-# UNPACK #-} !KBN -- | <math> [qra_xy] :: QuadRegAcc -> {-# UNPACK #-} !KBN -- | <math> [qra_x2y] :: QuadRegAcc -> {-# UNPACK #-} !KBN -- | <math> [qra_y2] :: QuadRegAcc -> {-# UNPACK #-} !KBN -- | All-zeroes QuadRegAcc. zeroQuadRegAcc :: QuadRegAcc -- | Add a point to quadreg accumulator. addQuadReg :: QuadRegAcc -> Double -> Double -> QuadRegAcc -- | Add a weighted point to quadreg accumulator. addQuadRegW :: QuadRegAcc -> Double -> Double -> Double -> QuadRegAcc -- | Convert QuadRegAcc to LinRegAcc. -- -- Using this we can try quadratic and linear fits with a single data -- scan. quadRegAccToLin :: QuadRegAcc -> LinRegAcc -- | Levenberg–Marquardt for functions with one parameter. -- -- See levenbergMarquardt2 for examples, this is very similar. -- -- For example we can fit <math>, its derivative is <math>. -- --
--   >>> let scale a (x, y) = (y, a * x + 1, x)
--   
--   >>> PP $ NE.last $ levenbergMarquardt1 scale 1 input2
--   Fit 1.9685 0.04735 4 0.27914
--   
-- -- Not bad, but worse then linear fit which fits the intercept point too. levenbergMarquardt1 :: Foldable f => (Double -> a -> (Double, Double, Double)) -> Double -> f a -> NonEmpty (Fit Double) -- | levenbergMarquardt1 with weights. levenbergMarquardt1WithWeights :: Foldable f => (Double -> a -> (Double, Double, Double, Double)) -> Double -> f a -> NonEmpty (Fit Double) -- | levenbergMarquardt1 with Y-errors. levenbergMarquardt1WithYerrors :: Foldable f => (Double -> a -> (Double, Double, Double, Double)) -> Double -> f a -> NonEmpty (Fit Double) -- | levenbergMarquardt1 with XY-errors. levenbergMarquardt1WithXYerrors :: Foldable f => (Double -> a -> (Double, Double, Double, Double, Double, Double)) -> Double -> f a -> NonEmpty (Fit Double) -- | Levenberg–Marquardt for functions with two parameters. -- -- You can use this sledgehammer to do a a linear fit: -- --
--   >>> let lin (V2 a b) (x, y) = (y, a * x + b, V2 x 1)
--   
-- -- We can then use levenbergMarquardt2 to find a fit: -- --
--   >>> PP $ levenbergMarquardt2 lin (V2 1 1) input2
--   Fit (V2 1.00000 1.00000) (V2 1.0175 2.5385) 3 29.470
--   Fit (V2 1.0181 1.0368) (V2 0.98615 2.4602) 3 27.681
--   Fit (V2 1.1557 1.2988) (V2 0.75758 1.8900) 3 16.336
--   Fit (V2 1.5463 1.6577) (V2 0.29278 0.73043) 3 2.4400
--   Fit (V2 1.9129 1.1096) (V2 0.11033 0.27524) 3 0.34645
--   Fit (V2 2.0036 0.89372) (V2 0.09552 0.23830) 3 0.25970
--   Fit (V2 2.0063 0.88687) (V2 0.09550 0.23826) 3 0.25962
--   Fit (V2 2.0063 0.88685) (V2 0.09550 0.23826) 3 0.25962
--   
-- -- This is the same result what linearFit returns: -- --
--   >>> PP $ linearFit id input2
--   Fit (V2 2.0063 0.88685) (V2 0.09550 0.23826) 3 0.25962
--   
-- --

Using AD

-- -- You can use ad to calculate derivatives for you. -- --
--   >>> import qualified Numeric.AD.Mode.Reverse.Double as AD
--   
-- -- We need a (Traversable) homogenic triple to represent the two -- parameters and x: -- --
--   >>> data H3 a = H3 a a a deriving (Functor, Foldable, Traversable)
--   
-- -- Then we define a function ad can operate with: -- --
--   >>> let linearF (H3 a b x) = a * x + b
--   
-- -- which we can use to fit the curve in generic way: -- --
--   >>> let lin' (V2 a b) (x, y) = case AD.grad' linearF (H3 a b x) of (f, H3 da db _f') -> (y, f, V2 da db)
--   
--   >>> PP $ levenbergMarquardt2 lin' (V2 1 1) input2
--   Fit (V2 1.00000 1.00000) (V2 1.0175 2.5385) 3 29.470
--   Fit (V2 1.0181 1.0368) (V2 0.98615 2.4602) 3 27.681
--   Fit (V2 1.1557 1.2988) (V2 0.75758 1.8900) 3 16.336
--   Fit (V2 1.5463 1.6577) (V2 0.29278 0.73043) 3 2.4400
--   Fit (V2 1.9129 1.1096) (V2 0.11033 0.27524) 3 0.34645
--   Fit (V2 2.0036 0.89372) (V2 0.09552 0.23830) 3 0.25970
--   Fit (V2 2.0063 0.88687) (V2 0.09550 0.23826) 3 0.25962
--   Fit (V2 2.0063 0.88685) (V2 0.09550 0.23826) 3 0.25962
--   
-- --

Non-polynomial example

-- -- We can fit other curves too, for example an example from Wikipedia -- https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm#Example -- --
--   >>> let rateF (H3 vmax km s) = (vmax * s) / (km + s)
--   
--   >>> let rateF' (V2 vmax km) (x, y) = case AD.grad' rateF (H3 vmax km x) of (f, H3 vmax' km' _) -> (y, f, V2 vmax' km')
--   
--   >>> let input = zip [0.038,0.194,0.425,0.626,1.253,2.500,3.740] [0.050,0.127,0.094,0.2122,0.2729,0.2665,0.3317]
--   
--   >>> PP $ levenbergMarquardt2 rateF' (V2 0.9 0.2) input
--   Fit (V2 0.90000 0.20000) (V2 0.43304 0.43936) 5 1.4455
--   Fit (V2 0.83306 0.25278) (V2 0.39164 0.49729) 5 1.0055
--   Fit (V2 0.59437 0.43508) (V2 0.21158 0.53403) 5 0.18832
--   Fit (V2 0.39687 0.56324) (V2 0.05723 0.25666) 5 0.01062
--   Fit (V2 0.36289 0.56104) (V2 0.04908 0.24007) 5 0.00784
--   Fit (V2 0.36190 0.55662) (V2 0.04887 0.23843) 5 0.00784
--   Fit (V2 0.36184 0.55629) (V2 0.04885 0.23830) 5 0.00784
--   Fit (V2 0.36184 0.55627) (V2 0.04885 0.23829) 5 0.00784
--   
-- -- We get the same result as in the article: 0.362 and 0.556 -- -- The algorithm terminates when a scaling parameter <math> becomes -- larger than 1e20 or smaller than 1e-20, or relative WSSR change is -- smaller than 1e-10, or sum-of-squared-residuals candidate becomes -- NaN (i.e. when it would start to produce garbage). You may -- want to terminate sooner, Numerical Recipes suggest to stop when WSSR -- decreases by a neglible amount absolutely or fractionally. levenbergMarquardt2 :: Foldable f => (V2 -> a -> (Double, Double, V2)) -> V2 -> f a -> NonEmpty (Fit V2) -- | levenbergMarquardt2 with weights. -- -- Because levenbergMarquardt2 is an iterative algorithm, not only -- we can use it to fit curves with known y-errors -- (levenbergMarquardt2WithYerrors), but also with both x and -- y-errors (levenbergMarquardt2WithXYerrors). levenbergMarquardt2WithWeights :: Foldable f => (V2 -> a -> (Double, Double, V2, Double)) -> V2 -> f a -> NonEmpty (Fit V2) -- | levenbergMarquardt2 with Y-errors. levenbergMarquardt2WithYerrors :: Foldable f => (V2 -> a -> (Double, Double, V2, Double)) -> V2 -> f a -> NonEmpty (Fit V2) -- | levenbergMarquardt2 with XY-errors. levenbergMarquardt2WithXYerrors :: Foldable f => (V2 -> a -> (Double, Double, V2, Double, Double, Double)) -> V2 -> f a -> NonEmpty (Fit V2) -- | Levenberg–Marquardt for functions with three parameters. -- -- See levenbergMarquardt2 for examples, this is very similar. -- --
--   >>> let quad (V3 a b c) (x, y) = (y, a * x * x + b * x + c, V3 (x * x) x 1)
--   
--   >>> PP $ NE.last $ levenbergMarquardt3 quad (V3 2 2 2) input3
--   Fit (V3 1.00000 0.00000 2.0000) (V3 0.00000 0.00000 0.00000) 1 0.00000
--   
-- -- Same as quadratic fit, just less direct: -- --
--   >>> PP $ quadraticFit id input3
--   Fit (V3 1.00000 0.00000 2.0000) (V3 0.00000 0.00000 0.00000) 1 0.00000
--   
levenbergMarquardt3 :: Foldable f => (V3 -> a -> (Double, Double, V3)) -> V3 -> f a -> NonEmpty (Fit V3) -- | levenbergMarquardt3 with weights. levenbergMarquardt3WithWeights :: Foldable f => (V3 -> a -> (Double, Double, V3, Double)) -> V3 -> f a -> NonEmpty (Fit V3) -- | levenbergMarquardt3 with Y-errors. levenbergMarquardt3WithYerrors :: Foldable f => (V3 -> a -> (Double, Double, V3, Double)) -> V3 -> f a -> NonEmpty (Fit V3) -- | levenbergMarquardt3 with XY-errors. levenbergMarquardt3WithXYerrors :: Foldable f => (V3 -> a -> (Double, Double, V3, Double, Double, Double)) -> V3 -> f a -> NonEmpty (Fit V3) -- | Result of a curve fit. data Fit v Fit :: !v -> !v -> !Int -> !Double -> Fit v -- | fit parameters [fitParams] :: Fit v -> !v -- | asympotic standard errors, assuming a good fit [fitErrors] :: Fit v -> !v -- | number of degrees of freedom [fitNDF] :: Fit v -> !Int -- | sum of squares of residuals [fitWSSR] :: Fit v -> !Double -- | 2d vector. Strict pair of Doubles. -- -- Also used to represent linear polynomial: V2 a b -- <math>. data V2 V2 :: !Double -> !Double -> V2 -- | 3d vector. Strict triple of Doubles. -- -- Also used to represent quadratic polynomial: V3 a b c -- <math>. data V3 V3 :: !Double -> !Double -> !Double -> V3 instance GHC.Show.Show Math.Regression.Simple.LM1Acc instance GHC.Show.Show Math.Regression.Simple.LM2Acc instance GHC.Show.Show Math.Regression.Simple.LM3Acc instance GHC.Show.Show v => GHC.Show.Show (Math.Regression.Simple.Fit v) instance GHC.Show.Show Math.Regression.Simple.LinRegAcc instance GHC.Show.Show Math.Regression.Simple.QuadRegAcc instance Control.DeepSeq.NFData Math.Regression.Simple.QuadRegAcc instance Control.DeepSeq.NFData Math.Regression.Simple.LinRegAcc instance Control.DeepSeq.NFData v => Control.DeepSeq.NFData (Math.Regression.Simple.Fit v)