-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | Various iterative algorithms for optimization of nonlinear functions. -- -- This library implements numerical algorithms to optimize nonlinear -- functions. Optimization means that we try to find a minimum of the -- function. Currently all algorithms guarantee only that local minima -- will be found, not global ones. -- -- Almost any continuosly differentiable function f : R^n -> -- R may be optimized by this library. Any further restrictions are -- listed in the modules that need them. -- -- We use the vector package to represent vectors and matrices, -- although it would be possible to use something like hmatrix -- easily. -- -- Currently only CG_DESCENT method is implemented. @package nonlinear-optimization @version 0.3.2 -- | This module implements the algorithms described by Hager and Zhang -- [1]. We use bindings to CG_DESCENT library by the same -- authors, version 3.0 from 18/05/2008 [2]. The library code is also -- licensed under the terms of the GPL. -- -- module Numeric.Optimization.Algorithms.HagerZhang05 -- | Run the CG_DESCENT optimizer and try to minimize the -- function. optimize :: (Vector v Double) => Parameters -> Double -> v Double -> Function t1 -> Gradient t2 -> Maybe (Combined t3) -> IO (Vector Double, Result, Statistics) -- | Function calculating the value of the objective function f at -- a point x. data Function t VFunction :: (v Double -> Double) -> Function Simple MFunction :: (forall m. (PrimMonad m, Functor m) => PointMVector m -> m Double) -> Function Mutable -- | Function calculating the value of the gradient of the objective -- function f at a point x. -- -- The MGradient constructor uses a function receiving as -- parameters the point x being evaluated (should not be -- modified) and the vector where the gradient should be written. data Gradient t VGradient :: (v Double -> v Double) -> Gradient Simple MGradient :: (forall m. (PrimMonad m, Functor m) => PointMVector m -> GradientMVector m -> m ()) -> Gradient Mutable -- | Function calculating both the value of the objective function -- f and its gradient at a point x. data Combined t VCombined :: (v Double -> (Double, v Double)) -> Combined Simple MCombined :: (forall m. (PrimMonad m, Functor m) => PointMVector m -> GradientMVector m -> m Double) -> Combined Mutable -- | Mutable vector representing the point where the function/gradient is -- begin evaluated. This vector should not be modified. type PointMVector m = MVector (PrimState m) Double -- | Mutable vector representing where the gradient should be -- written. type GradientMVector m = MVector (PrimState m) Double -- | Phantom type for simple pure functions. data Simple -- | Phantom type for functions using mutable data. data Mutable data Result -- | Convergence tolerance was satisfied. ToleranceStatisfied :: Result -- | Change in function value was less than funcEpsilon * |f|. FunctionChange :: Result -- | Total iterations exceeded maxItersFac * n. MaxTotalIter :: Result -- | Slope was always negative in line search. NegativeSlope :: Result -- | Number of secant iterations exceed nsecant. MaxSecantIter :: Result -- | Search direction not a descent direction. NotDescent :: Result -- | Line search fails in initial interval. LineSearchFailsInitial :: Result -- | Line search fails during bisection. LineSearchFailsBisection :: Result -- | Line search fails during interval update. LineSearchFailsUpdate :: Result -- | Debug tolerance was on and the test failed (see debugTol). DebugTol :: Result -- | Function value became NaN. FunctionValueNaN :: Result -- | Initial function value was NaN. StartFunctionValueNaN :: Result -- | Statistics given after the process finishes. data Statistics Statistics :: Double -> Double -> CInt -> CInt -> CInt -> Statistics -- | Value of the function at the solution. finalValue :: Statistics -> Double -- | Maximum absolute component of the gradient at the solution. gradNorm :: Statistics -> Double -- | Total number of iterations. totalIters :: Statistics -> CInt -- | Total number of function evaluations. funcEvals :: Statistics -> CInt -- | Total number of gradient evaluations. gradEvals :: Statistics -> CInt -- | Default parameters. See the documentation for Parameters and -- TechParameters to see what are the defaults. defaultParameters :: Parameters -- | Parameters given to the optimizer. data Parameters Parameters :: Bool -> Bool -> Verbose -> LineSearch -> Double -> StopRules -> EstimateError -> Maybe Double -> Maybe Double -> Maybe Double -> Double -> CInt -> CInt -> Double -> Double -> Double -> TechParameters -> Parameters -- | Print final statistics to stdout. Defaults to True. printFinal :: Parameters -> Bool -- | Print parameters to stdout before starting. Defaults to -- False printParams :: Parameters -> Bool -- | How verbose we should be while computing. Everything is printed to -- stdout. Defaults to Quiet. verbose :: Parameters -> Verbose -- | What kind of line search should be used. Defaults to AutoSwitch -- 1e-3. lineSearch :: Parameters -> LineSearch -- | Factor in [0, 1] used to compute average cost magnitude -- C_k as follows: -- --
--   Q_k = 1 + (qdecay)Q_{k-1},   Q_0 = 0
--   C_k = C_{k-1} + (|f_k| - C_{k-1})/Q_k
--   
-- -- Defaults to 0.7. qdecay :: Parameters -> Double -- | Stop rules that define when the iterations should end. Defaults to -- DefaultStopRule 0. stopRules :: Parameters -> StopRules -- | How to calculate the estimated error in the function value. Defaults -- to RelativeEpsilon 1e-6. estimateError :: Parameters -> EstimateError -- | When to attempt quadratic interpolation in line search. If -- Nothing then never try a quadratic interpolation step. If -- Just cutoff, then attemp quadratic interpolation in line -- search when |f_{k+1} - f_k| / f_k <= cutoff. Defaults to -- Just 1e-12. quadraticStep :: Parameters -> Maybe Double -- | If Just tol, then always check that f_{k+1} - f_k <= -- tol * C_k. Otherwise, if Nothing then no checking of -- function values is done. Defaults to Nothing. debugTol :: Parameters -> Maybe Double -- | If Just step, then use step as the initial step of -- the line search. Otherwise, if Nothing then the initial step -- is programatically calculated. Defaults to Nothing. initialStep :: Parameters -> Maybe Double -- | Defines the maximum number of iterations. The process is aborted when -- maxItersFac * n iterations are done, where n is the -- number of dimensions. Defaults to infinity. maxItersFac :: Parameters -> Double -- | Maximum number of times the bracketing interval grows or shrinks in -- the line search. Defaults to 50. nexpand :: Parameters -> CInt -- | Maximum number of secant iterations in line search. Defaults to -- 50. nsecant :: Parameters -> CInt -- | Restart the conjugate gradient method after restartFac * n -- iterations. Defaults to 1. restartFac :: Parameters -> Double -- | Stop when -alpha * dphi0, the estimated change in function -- value, is less than funcEpsilon * |f|. Defaults to -- 0. funcEpsilon :: Parameters -> Double -- | After encountering NaN while calculating the step length, -- growth factor when searching for a bracketing interval. Defaults to -- 1.3. nanRho :: Parameters -> Double -- | Technical parameters which you probably should not touch. techParameters :: Parameters -> TechParameters -- | How verbose we should be. data Verbose -- | Do not output anything to stdout, which most of the time is -- good. Quiet :: Verbose -- | Print what work is being done on each iteraction. Verbose :: Verbose -- | Print information about every step, may be useful for troubleshooting. VeryVerbose :: Verbose -- | Line search methods that may be used. data LineSearch -- | Use approximate Wolfe line search. ApproximateWolfe :: LineSearch -- | Use ordinary Wolfe line search, switch to approximate Wolfe when -- --
--   |f_{k+1} - f_k| < AWolfeFac * C_k
--   
-- -- where C_k is the average size of cost and AWolfeFac -- is the parameter to this constructor. AutoSwitch :: Double -> LineSearch -- | Stop rules used to decided when to stop iterating. data StopRules -- | DefaultStopRule stop_fac stops when -- --
--   |g_k|_infty <= max(grad_tol, |g_0|_infty * stop_fac)
--   
-- -- where |g_i|_infty is the maximum absolute component of the -- gradient at the i-th step. DefaultStopRule :: Double -> StopRules -- | AlternativeStopRule stops when -- --
--   |g_k|_infty <= grad_tol * (1 + |f_k|)
--   
AlternativeStopRule :: StopRules -- | How to calculate the estimated error in the function value. data EstimateError -- | AbsoluteEpsilon eps estimates the error as eps. AbsoluteEpsilon :: Double -> EstimateError -- | RelativeEpsilon eps estimates the error as eps * -- C_k. RelativeEpsilon :: Double -> EstimateError -- | Technical parameters which you probably should not touch. You should -- read the papers of CG_DESCENT to understand how you can tune -- these parameters. data TechParameters TechParameters :: Double -> Double -> Double -> Double -> Double -> Double -> Double -> Double -> TechParameters -- | Wolfe line search parameter. Defaults to 0.1. techDelta :: TechParameters -> Double -- | Wolfe line search parameter. Defaults to 0.9. techSigma :: TechParameters -> Double -- | Decay factor for bracket interval width. Defaults to 0.66. techGamma :: TechParameters -> Double -- | Growth factor when searching for initial bracketing interval. Defaults -- to 5. techRho :: TechParameters -> Double -- | Lower bound for the conjugate gradient update parameter -- beta_k is techEta * ||d||_2. Defaults to -- 0.01. techEta :: TechParameters -> Double -- | Factor used in starting guess for iteration 1. Defaults to -- 0.01. techPsi0 :: TechParameters -> Double -- | In performing a QuadStep, we evaluate the function at psi1 * -- previous step. Defaults to 0.1. techPsi1 :: TechParameters -> Double -- | When starting a new CG iteration, our initial guess for the line -- search stepsize is psi2 * previous step. Defaults to -- 2. techPsi2 :: TechParameters -> Double instance Eq EstimateError instance Ord EstimateError instance Show EstimateError instance Read EstimateError instance Eq StopRules instance Ord StopRules instance Show StopRules instance Read StopRules instance Eq LineSearch instance Ord LineSearch instance Show LineSearch instance Read LineSearch instance Eq Verbose instance Ord Verbose instance Show Verbose instance Read Verbose instance Enum Verbose instance Eq TechParameters instance Ord TechParameters instance Show TechParameters instance Read TechParameters instance Eq Parameters instance Ord Parameters instance Show Parameters instance Read Parameters instance Eq Statistics instance Ord Statistics instance Show Statistics instance Read Statistics instance Eq Result instance Ord Result instance Show Result instance Read Result instance Enum Result instance Storable Parameters instance Storable Statistics