-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Reverse Automatic Differentiation.
--
-- Reverse-Mode Automatic Differentiation via overloading. Existential
-- type "branding" is used to prevent sensitivity confusion.
@package rad
@version 0.1.3
-- | Reverse Mode Automatic Differentiation via overloading to perform
-- nonstandard interpretation that replaces original numeric type with a
-- bundle that contains a value of the original type and the tape that
-- will be used to recover the value of the sensitivity.
--
-- This package uses StableNames internally to recover sharing
-- information from the tape to avoid combinatorial explosion, and thus
-- runs asymptotically faster than it could without such sharing
-- information, but the use of side-effects contained herein is benign.
--
-- The API has been built to be close to the design of
-- Numeric.FAD from the fad package by Barak
-- Pearlmutter and Jeffrey Mark Siskind and contains portions of that
-- code, with minor liberties taken.
module Numeric.RAD
data RAD s a
-- | The lift function injects a primal number into the RAD data
-- type with a 0 derivative. If reverse-mode AD numbers formed a monad,
-- then lift would be return.
lift :: a -> RAD s a
-- | The diffUU function calculates the first derivative of a
-- scalar-to-scalar function.
diffUU :: (Num a) => (forall s. RAD s a -> RAD s a) -> a -> a
-- | The diffUF function calculates the first derivative of
-- scalar-to-nonscalar function.
diffUF :: (Functor f, Num a) => (forall s. RAD s a -> f (RAD s a)) -> a -> f a
-- | The diff2UU function calculates the value and derivative, as a
-- pair, of a scalar-to-scalar function.
diff2UU :: (Num a) => (forall s. RAD s a -> RAD s a) -> a -> (a, a)
-- | Note that the signature differs from that used in Numeric.FAD, because
-- while you can always unzip an arbitrary functor, not all
-- functors can be zipped.
diff2UF :: (Functor f, Num a) => (forall s. RAD s a -> f (RAD s a)) -> a -> f (a, a)
-- | The diff function is a synonym for diffUU.
diff :: (Num a) => (forall s. RAD s a -> RAD s a) -> a -> a
-- | The diff2 function is a synonym for diff2UU.
diff2 :: (Num a) => (forall s. RAD s a -> RAD s a) -> a -> (a, a)
-- | The jacobian function calcualtes the Jacobian of a
-- nonscalar-to-nonscalar function, using m invocations of reverse AD,
-- where m is the output dimensionality. When the output dimensionality
-- is significantly greater than the input dimensionality you should use
-- Numeric.FAD.jacobian instead.
jacobian :: (Traversable f, Functor g, Num a) => (forall s. f (RAD s a) -> g (RAD s a)) -> f a -> g (f a)
-- | The jacobian2 function calcualtes both the result and the
-- Jacobian of a nonscalar-to-nonscalar function, using m invocations of
-- reverse AD, where m is the output dimensionality. 'fmap snd' on the
-- result will recover the result of jacobian
jacobian2 :: (Traversable f, Functor g, Num a) => (forall s. f (RAD s a) -> g (RAD s a)) -> f a -> g (a, f a)
grad :: (Traversable f, Num a) => (forall s. f (RAD s a) -> RAD s a) -> f a -> f a
grad2 :: (Traversable f, Num a) => (forall s. f (RAD s a) -> RAD s a) -> f a -> (a, f a)
-- | The zeroNewton function finds a zero of a scalar function using
-- Newton's method; its output is a stream of increasingly accurate
-- results. (Modulo the usual caveats.)
--
-- TEST CASE: take 10 $ zeroNewton (\x->x^2-4) 1 -- converge to
-- 2.0
--
-- TEST CASE :module Data.Complex Numeric.RAD take 10 $ zeroNewton
-- ((+1).(^2)) (1 :+ 1) -- converge to (0 :+ 1)
zeroNewton :: (Fractional a) => (forall s. RAD s a -> RAD s a) -> a -> [a]
-- | The inverseNewton function inverts a scalar function using
-- Newton's method; its output is a stream of increasingly accurate
-- results. (Modulo the usual caveats.)
--
-- TEST CASE: take 10 $ inverseNewton sqrt 1 (sqrt 10) -- converge to
-- 10
inverseNewton :: (Fractional a) => (forall s. RAD s a -> RAD s a) -> a -> a -> [a]
-- | The fixedPointNewton function find a fixedpoint of a scalar
-- function using Newton's method; its output is a stream of increasingly
-- accurate results. (Modulo the usual caveats.)
fixedPointNewton :: (Fractional a) => (forall s. RAD s a -> RAD s a) -> a -> [a]
-- | The extremumNewton function finds an extremum of a scalar
-- function using Newton's method; produces a stream of increasingly
-- accurate results. (Modulo the usual caveats.)
extremumNewton :: (Fractional a) => (forall s t. RAD t (RAD s a) -> RAD t (RAD s a)) -> a -> [a]
-- | The argminNaiveGradient function performs a multivariate
-- optimization, based on the naive-gradient-descent in the file
-- stalingrad/examples/flow-tests/pre-saddle-1a.vlad from the
-- VLAD compiler Stalingrad sources. Its output is a stream of
-- increasingly accurate results. (Modulo the usual caveats.) This is
-- O(n) faster than Numeric.FAD.argminNaiveGradient
argminNaiveGradient :: (Fractional a, Ord a) => (forall s. [RAD s a] -> RAD s a) -> [a] -> [[a]]
instance Monad S
instance (Floating a) => Floating (RAD s a)
instance (Fractional a) => Fractional (RAD s a)
instance (RealFrac a) => RealFrac (RAD s a)
instance (RealFloat a) => RealFloat (RAD s a)
instance (Real a) => Real (RAD s a)
instance (Num a) => Num (RAD s a)
instance (Num a, Enum a) => Enum (RAD s a)
instance (Bounded a) => Bounded (RAD s a)
instance (Ord a) => Ord (RAD s a)
instance (Eq a) => Eq (RAD s a)
instance MuRef (RAD s a)
instance (Show a) => Show (RAD s a)