-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | Automatic Differentiation -- -- Forward, reverse, and higher-order automatic differentiation -- combinators with a common API. -- -- Type-level "branding" is used to prevent the end user from confusing -- infinitesimals. @package ad @version 0.27 -- | Forward mode automatic differentiation module Numeric.AD.Forward grad :: (Traversable f, Num a) => FU f a -> f a -> f a grad' :: (Traversable f, Num a) => FU f a -> f a -> (a, f a) gradWith :: (Traversable f, Num a) => (a -> a -> b) -> FU f a -> f a -> f b gradWith' :: (Traversable f, Num a) => (a -> a -> b) -> FU f a -> f a -> (a, f b) jacobian :: (Traversable f, Traversable g, Num a) => FF f g a -> f a -> g (f a) jacobian' :: (Traversable f, Traversable g, Num a) => FF f g a -> f a -> g (a, f a) jacobianWith :: (Traversable f, Traversable g, Num a) => (a -> a -> b) -> FF f g a -> f a -> g (f b) jacobianWith' :: (Traversable f, Traversable g, Num a) => (a -> a -> b) -> FF f g a -> f a -> g (a, f b) -- | A fast, simple transposed Jacobian computed with forward-mode AD. jacobianT :: (Traversable f, Functor g, Num a) => FF f g a -> f a -> f (g a) -- | A fast, simple transposed Jacobian computed with forward-mode AD. jacobianWithT :: (Traversable f, Functor g, Num a) => (a -> a -> b) -> FF f g a -> f a -> f (g b) -- | Compute the product of a vector with the Hessian using -- forward-on-forward-mode AD. hessianProduct :: (Traversable f, Num a) => FU f a -> f (a, a) -> f a -- | Compute the gradient and hessian product using forward-on-forward-mode -- AD. hessianProduct' :: (Traversable f, Num a) => FU f a -> f (a, a) -> f (a, a) -- | The diff function calculates the first derivative of a -- scalar-to-scalar function by forward-mode AD -- --
--   diff sin == cos
--   
diff :: (Num a) => UU a -> a -> a -- | The d'UU function calculates the result and first derivative -- of scalar-to-scalar function by Forward AD -- --
--   d' sin == sin &&& cos
--   d' f = f &&& d f
--   
diff' :: (Num a) => UU a -> a -> (a, a) -- | The diffF function calculates the first derivative of -- scalar-to-nonscalar function by Forward AD diffF :: (Functor f, Num a) => UF f a -> a -> f a -- | The diffF' function calculates the result and first derivative -- of a scalar-to-non-scalar function by Forward AD diffF' :: (Functor f, Num a) => UF f a -> a -> f (a, a) du :: (Functor f, Num a) => FU f a -> f (a, a) -> a du' :: (Functor f, Num a) => FU f a -> f (a, a) -> (a, a) duF :: (Functor f, Functor g, Num a) => FF f g a -> f (a, a) -> g a duF' :: (Functor f, Functor g, Num a) => FF f g a -> f (a, a) -> g (a, a) -- | The dUM function calculates the first derivative of -- scalar-to-scalar monadic function by Forward AD diffM :: (Monad m, Num a) => UF m a -> a -> m a -- | The d'UM function calculates the result and first derivative -- of a scalar-to-scalar monadic function by Forward AD diffM' :: (Monad m, Num a) => UF m a -> a -> m (a, a) type UU a = forall s. (Mode s) => AD s a -> AD s a type UF f a = forall s. (Mode s) => AD s a -> f (AD s a) type FU f a = forall s. (Mode s) => f (AD s a) -> AD s a type FF f g a = forall s. (Mode s) => f (AD s a) -> g (AD s a) -- | AD serves as a common wrapper for different Mode -- instances, exposing a traditional numerical tower. Universal -- quantification is used to limit the actions in user code to machinery -- that will return the same answers under all AD modes, allowing us to -- use modes interchangeably as both the type level "brand" and -- dictionary, providing a common API. newtype AD f a AD :: f a -> AD f a runAD :: AD f a -> f a class (Lifted t) => Mode t lift :: (Mode t, Num a) => a -> t a (<+>) :: (Mode t, Num a) => t a -> t a -> t a (*^) :: (Mode t, Num a) => a -> t a -> t a (^*) :: (Mode t, Num a) => t a -> a -> t a (^/) :: (Mode t, Fractional a) => t a -> a -> t a zero :: (Mode t, Num a) => t a -- | Mixed-Mode Automatic Differentiation. -- -- For reverse mode AD we use System.Mem.StableName.StableName -- to recover sharing information from the tape to avoid combinatorial -- explosion, and thus run asymptotically faster than it could without -- such sharing information, but the use of side-effects contained herein -- is benign. module Numeric.AD.Reverse -- | The grad function calculates the gradient of a -- non-scalar-to-scalar function with Reverse AD in a single pass. grad :: (Traversable f, Num a) => FU f a -> f a -> f a -- | The grad' function calculates the result and gradient of a -- non-scalar-to-scalar function with Reverse AD in a single pass. grad' :: (Traversable f, Num a) => FU f a -> f a -> (a, f a) -- | grad g f function calculates the gradient of a -- non-scalar-to-scalar function f with reverse-mode AD in a -- single pass. The gradient is combined element-wise with the argument -- using the function g. -- --
--   grad == gradWith (\_ dx -> dx)
--   id == gradWith const
--   
gradWith :: (Traversable f, Num a) => (a -> a -> b) -> FU f a -> f a -> f b -- | grad' g f calculates the result and gradient of a -- non-scalar-to-scalar function f with Reverse AD in a -- single pass the gradient is combined element-wise with the argument -- using the function g. -- --
--   grad' == gradWith' (\_ dx -> dx)
--   
gradWith' :: (Traversable f, Num a) => (a -> a -> b) -> FU f a -> f a -> (a, f b) -- | An alias for gradF jacobian :: (Traversable f, Functor g, Num a) => FF f g a -> f a -> g (f a) -- | An alias for gradF' jacobian' :: (Traversable f, Functor g, Num a) => FF f g a -> f a -> g (a, f a) -- | An alias for gradWithF. jacobianWith :: (Traversable f, Functor g, Num a) => (a -> a -> b) -> FF f g a -> f a -> g (f b) -- | An alias for gradWithF' jacobianWith' :: (Traversable f, Functor g, Num a) => (a -> a -> b) -> FF f g a -> f a -> g (a, f b) -- | Compute the hessian via the jacobian of the gradient. gradient is -- computed in reverse mode and then the jacobian is computed in reverse -- mode. -- -- However, since the 'grad f :: f a -> f a' is square this -- is not as fast as using the forward-mode Jacobian of a reverse mode -- gradient provided by Numeric.AD.hessian in Numeric.AD. hessian :: (Traversable f, Num a) => FU f a -> f a -> f (f a) -- | Compute the hessian via the reverse-mode jacobian of the reverse-mode -- gradient of a non-scalar-to-scalar monadic action. -- -- While this is less efficient than Numeric.AD.hessianTensor -- from Numeric.AD or Numeric.AD.Forward.hessianTensor -- from Numeric.AD.Forward, the type signature is more permissive -- with regards to the output non-scalar, and it may be more efficient if -- only a few coefficients of the result are consumed. hessianM :: (Traversable f, Monad m, Num a) => FF f m a -> f a -> m (f (f a)) -- | Compute the order 3 Hessian tensor on a non-scalar-to-non-scalar -- function via the forward-mode Jacobian of the mixed-mode Jacobian of -- the function. -- -- While this is less efficient than Numeric.AD.hessianTensor -- from Numeric.AD or Numeric.AD.Forward.hessianTensor -- from Numeric.AD.Forward, the type signature is more permissive -- with regards to the output non-scalar, and it may be more efficient if -- only a few coefficients of the result are consumed. hessianTensor :: (Traversable f, Functor g, Num a) => FF f g a -> f a -> g (f (f a)) diff :: (Num a) => UU a -> a -> a -- | The d' function calculates the value and derivative, as a -- pair, of a scalar-to-scalar function. diff' :: (Num a) => UU a -> a -> (a, a) diffF :: (Functor f, Num a) => UF f a -> a -> f a diffF' :: (Functor f, Num a) => UF f a -> a -> f (a, a) diffM :: (Monad m, Num a) => UF m a -> a -> m a diffM' :: (Monad m, Num a) => UF m a -> a -> m (a, a) gradM :: (Traversable f, Monad m, Num a) => FF f m a -> f a -> m (f a) gradM' :: (Traversable f, Monad m, Num a) => FF f m a -> f a -> m (a, f a) gradWithM :: (Traversable f, Monad m, Num a) => (a -> a -> b) -> FF f m a -> f a -> m (f b) gradWithM' :: (Traversable f, Monad m, Num a) => (a -> a -> b) -> FF f m a -> f a -> m (a, f b) -- | The gradF function calculates the jacobian of a -- non-scalar-to-non-scalar function with reverse AD lazily in m -- passes for m outputs. gradF :: (Traversable f, Functor g, Num a) => FF f g a -> f a -> g (f a) -- | The gradF' function calculates both the result and the Jacobian -- of a nonscalar-to-nonscalar function, using m invocations of -- reverse AD, where m is the output dimensionality. Applying -- fmap snd to the result will recover the result of -- gradF gradF' :: (Traversable f, Functor g, Num a) => FF f g a -> f a -> g (a, f a) -- | 'gradWithF g f' calculates the Jacobian of a non-scalar-to-non-scalar -- function f with reverse AD lazily in m passes for -- m outputs. -- -- Instead of returning the Jacobian matrix, the elements of the matrix -- are combined with the input using the g. -- --
--   gradF == gradWithF (\_ dx -> dx)
--   gradWithF const == (\f x -> const x <$> f x)
--   
gradWithF :: (Traversable f, Functor g, Num a) => (a -> a -> b) -> FF f g a -> f a -> g (f b) -- | gradWithF g f' calculates both the result and the Jacobian of a -- nonscalar-to-nonscalar function f, using m -- invocations of reverse AD, where m is the output -- dimensionality. Applying fmap snd to the result will recover -- the result of gradWithF -- -- Instead of returning the Jacobian matrix, the elements of the matrix -- are combined with the input using the g. -- --
--   jacobian' == gradWithF' (\_ dx -> dx)
--   
gradWithF' :: (Traversable f, Functor g, Num a) => (a -> a -> b) -> FF f g a -> f a -> g (a, f b) type UU a = forall s. (Mode s) => AD s a -> AD s a type UF f a = forall s. (Mode s) => AD s a -> f (AD s a) type FU f a = forall s. (Mode s) => f (AD s a) -> AD s a type FF f g a = forall s. (Mode s) => f (AD s a) -> g (AD s a) -- | AD serves as a common wrapper for different Mode -- instances, exposing a traditional numerical tower. Universal -- quantification is used to limit the actions in user code to machinery -- that will return the same answers under all AD modes, allowing us to -- use modes interchangeably as both the type level "brand" and -- dictionary, providing a common API. newtype AD f a AD :: f a -> AD f a runAD :: AD f a -> f a class (Lifted t) => Mode t lift :: (Mode t, Num a) => a -> t a (<+>) :: (Mode t, Num a) => t a -> t a -> t a (*^) :: (Mode t, Num a) => a -> t a -> t a (^*) :: (Mode t, Num a) => t a -> a -> t a (^/) :: (Mode t, Fractional a) => t a -> a -> t a zero :: (Mode t, Num a) => t a -- | Higher order derivatives via a "dual number tower". module Numeric.AD.Tower taylor :: (Fractional a) => UU a -> a -> a -> [a] taylor0 :: (Fractional a) => UU a -> a -> a -> [a] maclaurin :: (Fractional a) => UU a -> a -> [a] maclaurin0 :: (Fractional a) => UU a -> a -> [a] diff :: (Num a) => UU a -> a -> a diff' :: (Num a) => UU a -> a -> (a, a) diffs :: (Num a) => UU a -> a -> [a] diffs0 :: (Num a) => UU a -> a -> [a] diffsF :: (Functor f, Num a) => UF f a -> a -> f [a] diffs0F :: (Functor f, Num a) => UF f a -> a -> f [a] du :: (Functor f, Num a) => FU f a -> f (a, a) -> a du' :: (Functor f, Num a) => FU f a -> f (a, a) -> (a, a) dus :: (Functor f, Num a) => FU f a -> f [a] -> [a] dus0 :: (Functor f, Num a) => FU f a -> f [a] -> [a] duF :: (Functor f, Functor g, Num a) => FF f g a -> f (a, a) -> g a duF' :: (Functor f, Functor g, Num a) => FF f g a -> f (a, a) -> g (a, a) dusF :: (Functor f, Functor g, Num a) => FF f g a -> f [a] -> g [a] dus0F :: (Functor f, Functor g, Num a) => FF f g a -> f [a] -> g [a] diffsM :: (Monad m, Num a) => UF m a -> a -> m [a] diffs0M :: (Monad m, Num a) => UF m a -> a -> m [a] type UU a = forall s. (Mode s) => AD s a -> AD s a type UF f a = forall s. (Mode s) => AD s a -> f (AD s a) type FU f a = forall s. (Mode s) => f (AD s a) -> AD s a type FF f g a = forall s. (Mode s) => f (AD s a) -> g (AD s a) class (Lifted t) => Mode t lift :: (Mode t, Num a) => a -> t a (<+>) :: (Mode t, Num a) => t a -> t a -> t a (*^) :: (Mode t, Num a) => a -> t a -> t a (^*) :: (Mode t, Num a) => t a -> a -> t a (^/) :: (Mode t, Fractional a) => t a -> a -> t a zero :: (Mode t, Num a) => t a -- | AD serves as a common wrapper for different Mode -- instances, exposing a traditional numerical tower. Universal -- quantification is used to limit the actions in user code to machinery -- that will return the same answers under all AD modes, allowing us to -- use modes interchangeably as both the type level "brand" and -- dictionary, providing a common API. newtype AD f a AD :: f a -> AD f a runAD :: AD f a -> f a module Numeric.AD.Newton -- | The findZero function finds a zero of a scalar function using -- Newton's method; its output is a stream of increasingly accurate -- results. (Modulo the usual caveats.) -- -- Examples: -- --
--   take 10 $ findZero (\\x->x^2-4) 1  -- converge to 2.0
--   
-- --
--   module Data.Complex
--   take 10 $ findZero ((+1).(^2)) (1 :+ 1)  -- converge to (0 :+ 1)@
--   
findZero :: (Fractional a) => UU a -> a -> [a] findZeroM :: (Monad m, Fractional a) => UF m a -> a -> MList m a -- | The inverseNewton function inverts a scalar function using -- Newton's method; its output is a stream of increasingly accurate -- results. (Modulo the usual caveats.) -- -- Example: -- --
--   take 10 $ inverseNewton sqrt 1 (sqrt 10)  -- converges to 10
--   
inverse :: (Fractional a) => UU a -> a -> a -> [a] inverseM :: (Monad m, Fractional a) => UF m a -> a -> a -> MList m a -- | The fixedPoint function find a fixedpoint of a scalar function -- using Newton's method; its output is a stream of increasingly accurate -- results. (Modulo the usual caveats.) -- --
--   take 10 $ fixedPoint cos 1 -- converges to 0.7390851332151607
--   
fixedPoint :: (Fractional a) => UU a -> a -> [a] fixedPointM :: (Monad m, Fractional a) => UF m a -> a -> MList m a -- | The extremum function finds an extremum of a scalar function -- using Newton's method; produces a stream of increasingly accurate -- results. (Modulo the usual caveats.) -- --
--   take 10 $ extremum cos 1 -- convert to 0 
--   
extremum :: (Fractional a) => UU a -> a -> [a] extremumM :: (Monad m, Fractional a) => UF m a -> a -> MList m a -- | The gradientDescent function performs a multivariate -- optimization, based on the naive-gradient-descent in the file -- stalingrad/examples/flow-tests/pre-saddle-1a.vlad from the -- VLAD compiler Stalingrad sources. Its output is a stream of -- increasingly accurate results. (Modulo the usual caveats.) -- -- It uses reverse mode automatic differentiation to compute the -- gradient. gradientDescent :: (Traversable f, Fractional a, Ord a) => FU f a -> f a -> [f a] gradientDescentM :: (Traversable f, Monad m, Fractional a, Ord a) => FF f m a -> f a -> MList m (f a) gradientAscent :: (Traversable f, Fractional a, Ord a) => FU f a -> f a -> [f a] gradientAscentM :: (Traversable f, Monad m, Fractional a, Ord a) => FF f m a -> f a -> MList m (f a) type UU a = forall s. (Mode s) => AD s a -> AD s a type UF f a = forall s. (Mode s) => AD s a -> f (AD s a) type FU f a = forall s. (Mode s) => f (AD s a) -> AD s a type FF f g a = forall s. (Mode s) => f (AD s a) -> g (AD s a) -- | AD serves as a common wrapper for different Mode -- instances, exposing a traditional numerical tower. Universal -- quantification is used to limit the actions in user code to machinery -- that will return the same answers under all AD modes, allowing us to -- use modes interchangeably as both the type level "brand" and -- dictionary, providing a common API. newtype AD f a AD :: f a -> AD f a runAD :: AD f a -> f a class (Lifted t) => Mode t lift :: (Mode t, Num a) => a -> t a (<+>) :: (Mode t, Num a) => t a -> t a -> t a (*^) :: (Mode t, Num a) => a -> t a -> t a (^*) :: (Mode t, Num a) => t a -> a -> t a (^/) :: (Mode t, Fractional a) => t a -> a -> t a zero :: (Mode t, Num a) => t a -- | A cofree comonad/f-branching stream for use in returning towers of -- gradients. module Numeric.AD.Stream data (:>) f a (:<) :: a -> f (f :> a) -> :> f a class (Functor f) => Comonad f extract :: (Comonad f) => (f :> a) -> a duplicate :: (Comonad f) => (f :> a) -> (f :> (f :> a)) extend :: (Comonad f) => ((f :> a) -> b) -> (f :> a) -> (f :> b) unfold :: (Functor f) => (a -> (b, f a)) -> a -> (f :> b) tails :: (f :> a) -> f (f :> a) -- | Mixed-Mode Automatic Differentiation. -- -- Each combinator exported from this module chooses an appropriate AD -- mode. module Numeric.AD -- | The grad function calculates the gradient of a -- non-scalar-to-scalar function with Reverse AD in a single pass. grad :: (Traversable f, Num a) => FU f a -> f a -> f a -- | The grad' function calculates the result and gradient of a -- non-scalar-to-scalar function with Reverse AD in a single pass. grad' :: (Traversable f, Num a) => FU f a -> f a -> (a, f a) -- | grad g f function calculates the gradient of a -- non-scalar-to-scalar function f with reverse-mode AD in a -- single pass. The gradient is combined element-wise with the argument -- using the function g. -- --
--   grad == gradWith (\_ dx -> dx)
--   id == gradWith const
--   
gradWith :: (Traversable f, Num a) => (a -> a -> b) -> FU f a -> f a -> f b -- | grad' g f calculates the result and gradient of a -- non-scalar-to-scalar function f with Reverse AD in a -- single pass the gradient is combined element-wise with the argument -- using the function g. -- --
--   grad' == gradWith' (\_ dx -> dx)
--   
gradWith' :: (Traversable f, Num a) => (a -> a -> b) -> FU f a -> f a -> (a, f b) -- | Calculate the Jacobian of a non-scalar-to-non-scalar function, -- automatically choosing between forward and reverse mode AD based on -- the number of inputs and outputs. -- -- If you need to support functions where the output is only a -- Functor or Monad, consider -- Numeric.AD.Reverse.jacobian or gradM from -- Numeric.AD.Reverse. jacobian :: (Traversable f, Traversable g, Num a) => FF f g a -> f a -> g (f a) -- | Calculate both the answer and Jacobian of a non-scalar-to-non-scalar -- function, automatically choosing between forward- and reverse- mode AD -- based on the relative, number of inputs and outputs. -- -- If you need to support functions where the output is only a -- Functor or Monad, consider -- Numeric.AD.Reverse.jacobian' or gradM' from -- Numeric.AD.Reverse. jacobian' :: (Traversable f, Traversable g, Num a) => FF f g a -> f a -> g (a, f a) -- | jacobianWith g f calculates the Jacobian of a -- non-scalar-to-non-scalar function, automatically choosing between -- forward and reverse mode AD based on the number of inputs and outputs. -- -- The resulting Jacobian matrix is then recombined element-wise with the -- input using g. -- -- If you need to support functions where the output is only a -- Functor or Monad, consider -- Numeric.AD.Reverse.jacobianWith or gradWithM from -- Numeric.AD.Reverse. jacobianWith :: (Traversable f, Traversable g, Num a) => (a -> a -> b) -> FF f g a -> f a -> g (f b) -- | jacobianWith' g f calculates the answer and Jacobian -- of a non-scalar-to-non-scalar function, automatically choosing between -- forward and reverse mode AD based on the number of inputs and outputs. -- -- The resulting Jacobian matrix is then recombined element-wise with the -- input using g. -- -- If you need to support functions where the output is only a -- Functor or Monad, consider -- Numeric.AD.Reverse.jacobianWith' or gradWithM' from -- Numeric.AD.Reverse. jacobianWith' :: (Traversable f, Traversable g, Num a) => (a -> a -> b) -> FF f g a -> f a -> g (a, f b) gradM :: (Traversable f, Monad m, Num a) => FF f m a -> f a -> m (f a) gradM' :: (Traversable f, Monad m, Num a) => FF f m a -> f a -> m (a, f a) gradWithM :: (Traversable f, Monad m, Num a) => (a -> a -> b) -> FF f m a -> f a -> m (f b) gradWithM' :: (Traversable f, Monad m, Num a) => (a -> a -> b) -> FF f m a -> f a -> m (a, f b) -- | The gradF function calculates the jacobian of a -- non-scalar-to-non-scalar function with reverse AD lazily in m -- passes for m outputs. gradF :: (Traversable f, Functor g, Num a) => FF f g a -> f a -> g (f a) -- | The gradF' function calculates both the result and the Jacobian -- of a nonscalar-to-nonscalar function, using m invocations of -- reverse AD, where m is the output dimensionality. Applying -- fmap snd to the result will recover the result of -- gradF gradF' :: (Traversable f, Functor g, Num a) => FF f g a -> f a -> g (a, f a) -- | 'gradWithF g f' calculates the Jacobian of a non-scalar-to-non-scalar -- function f with reverse AD lazily in m passes for -- m outputs. -- -- Instead of returning the Jacobian matrix, the elements of the matrix -- are combined with the input using the g. -- --
--   gradF == gradWithF (\_ dx -> dx)
--   gradWithF const == (\f x -> const x <$> f x)
--   
gradWithF :: (Traversable f, Functor g, Num a) => (a -> a -> b) -> FF f g a -> f a -> g (f b) -- | gradWithF g f' calculates both the result and the Jacobian of a -- nonscalar-to-nonscalar function f, using m -- invocations of reverse AD, where m is the output -- dimensionality. Applying fmap snd to the result will recover -- the result of gradWithF -- -- Instead of returning the Jacobian matrix, the elements of the matrix -- are combined with the input using the g. -- --
--   jacobian' == gradWithF' (\_ dx -> dx)
--   
gradWithF' :: (Traversable f, Functor g, Num a) => (a -> a -> b) -> FF f g a -> f a -> g (a, f b) -- | A fast, simple transposed Jacobian computed with forward-mode AD. jacobianT :: (Traversable f, Functor g, Num a) => FF f g a -> f a -> f (g a) -- | A fast, simple transposed Jacobian computed with forward-mode AD. jacobianWithT :: (Traversable f, Functor g, Num a) => (a -> a -> b) -> FF f g a -> f a -> f (g b) -- | Compute the hessian via the jacobian of the gradient. gradient is -- computed in reverse mode and then the jacobian is computed in forward -- mode. hessian :: (Traversable f, Num a) => FU f a -> f a -> f (f a) -- | Compute the order 3 Hessian tensor on a non-scalar-to-non-scalar -- function via the forward-mode Jacobian of the mixed-mode Jacobian of -- the function. hessianTensor :: (Traversable f, Traversable g, Num a) => FF f g a -> f a -> g (f (f a)) -- | hessianProduct f wv computes the product of the -- hessian H of a non-scalar-to-scalar function f at -- w = fst $ wv with a vector v = snd $ -- wv using "Pearlmutter's method" from -- http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.29.6143, -- which states: -- --
--   H v = (d/dr) grad_w (w + r v) | r = 0
--   
-- -- Or in other words, we take the directional derivative of the gradient. hessianProduct :: (Traversable f, Num a) => FU f a -> f (a, a) -> f a -- | hessianProduct' f wv computes both the gradient of a -- non-scalar-to-scalar f at w = fst $ wv -- and the product of the hessian H at w with a vector -- v = snd $ wv using "Pearlmutter's method". The outputs -- are returned wrapped in the same functor. -- --
--   H v = (d/dr) grad_w (w + r v) | r = 0
--   
-- -- Or in other words, we take the directional derivative of the gradient. hessianProduct' :: (Traversable f, Num a) => FU f a -> f (a, a) -> f (a, a) -- | The diff function calculates the first derivative of a -- scalar-to-scalar function by forward-mode AD -- --
--   diff sin == cos
--   
diff :: (Num a) => UU a -> a -> a -- | The diffF function calculates the first derivative of -- scalar-to-nonscalar function by Forward AD diffF :: (Functor f, Num a) => UF f a -> a -> f a -- | The d'UU function calculates the result and first derivative -- of scalar-to-scalar function by Forward AD -- --
--   d' sin == sin &&& cos
--   d' f = f &&& d f
--   
diff' :: (Num a) => UU a -> a -> (a, a) -- | The diffF' function calculates the result and first derivative -- of a scalar-to-non-scalar function by Forward AD diffF' :: (Functor f, Num a) => UF f a -> a -> f (a, a) diffs :: (Num a) => UU a -> a -> [a] diffsF :: (Functor f, Num a) => UF f a -> a -> f [a] diffs0 :: (Num a) => UU a -> a -> [a] diffs0F :: (Functor f, Num a) => UF f a -> a -> f [a] du :: (Functor f, Num a) => FU f a -> f (a, a) -> a du' :: (Functor f, Num a) => FU f a -> f (a, a) -> (a, a) duF :: (Functor f, Functor g, Num a) => FF f g a -> f (a, a) -> g a duF' :: (Functor f, Functor g, Num a) => FF f g a -> f (a, a) -> g (a, a) dus :: (Functor f, Num a) => FU f a -> f [a] -> [a] dus0 :: (Functor f, Num a) => FU f a -> f [a] -> [a] dusF :: (Functor f, Functor g, Num a) => FF f g a -> f [a] -> g [a] dus0F :: (Functor f, Functor g, Num a) => FF f g a -> f [a] -> g [a] taylor :: (Fractional a) => UU a -> a -> a -> [a] taylor0 :: (Fractional a) => UU a -> a -> a -> [a] maclaurin :: (Fractional a) => UU a -> a -> [a] maclaurin0 :: (Fractional a) => UU a -> a -> [a] -- | The dUM function calculates the first derivative of -- scalar-to-scalar monadic function by Forward AD diffM :: (Monad m, Num a) => UF m a -> a -> m a -- | The d'UM function calculates the result and first derivative -- of a scalar-to-scalar monadic function by Forward AD diffM' :: (Monad m, Num a) => UF m a -> a -> m (a, a) type UU a = forall s. (Mode s) => AD s a -> AD s a type UF f a = forall s. (Mode s) => AD s a -> f (AD s a) type FU f a = forall s. (Mode s) => f (AD s a) -> AD s a type FF f g a = forall s. (Mode s) => f (AD s a) -> g (AD s a) -- | AD serves as a common wrapper for different Mode -- instances, exposing a traditional numerical tower. Universal -- quantification is used to limit the actions in user code to machinery -- that will return the same answers under all AD modes, allowing us to -- use modes interchangeably as both the type level "brand" and -- dictionary, providing a common API. newtype AD f a AD :: f a -> AD f a runAD :: AD f a -> f a class (Lifted t) => Mode t lift :: (Mode t, Num a) => a -> t a (<+>) :: (Mode t, Num a) => t a -> t a -> t a (*^) :: (Mode t, Num a) => a -> t a -> t a (^*) :: (Mode t, Num a) => t a -> a -> t a (^/) :: (Mode t, Fractional a) => t a -> a -> t a zero :: (Mode t, Num a) => t a -- | Allows the choice of AD Mode to be specified at the term level -- for benchmarking or more complicated usage patterns. module Numeric.AD.Directed grad :: (Traversable f, Num a) => Direction -> FU f a -> f a -> f a grad' :: (Traversable f, Num a) => Direction -> FU f a -> f a -> (a, f a) jacobian :: (Traversable f, Traversable g, Num a) => Direction -> FF f g a -> f a -> g (f a) jacobian' :: (Traversable f, Traversable g, Num a) => Direction -> FF f g a -> f a -> g (a, f a) diff :: (Num a) => Direction -> UU a -> a -> a diff' :: (Num a) => Direction -> UU a -> a -> (a, a) type UU a = forall s. (Mode s) => AD s a -> AD s a type UF f a = forall s. (Mode s) => AD s a -> f (AD s a) type FU f a = forall s. (Mode s) => f (AD s a) -> AD s a type FF f g a = forall s. (Mode s) => f (AD s a) -> g (AD s a) data Direction Forward :: Direction Reverse :: Direction Tower :: Direction Mixed :: Direction class (Lifted t) => Mode t lift :: (Mode t, Num a) => a -> t a (<+>) :: (Mode t, Num a) => t a -> t a -> t a (*^) :: (Mode t, Num a) => a -> t a -> t a (^*) :: (Mode t, Num a) => t a -> a -> t a (^/) :: (Mode t, Fractional a) => t a -> a -> t a zero :: (Mode t, Num a) => t a -- | AD serves as a common wrapper for different Mode -- instances, exposing a traditional numerical tower. Universal -- quantification is used to limit the actions in user code to machinery -- that will return the same answers under all AD modes, allowing us to -- use modes interchangeably as both the type level "brand" and -- dictionary, providing a common API. newtype AD f a AD :: f a -> AD f a runAD :: AD f a -> f a instance Show Direction instance Eq Direction instance Ord Direction instance Read Direction instance Bounded Direction instance Enum Direction instance Ix Direction