-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | A Haskell library for inference using Gaussian processes
--
-- A Haskell library implementing algorithms for supervised learning,
-- roughly corresponding to chapters 1 to 5 of Gaussian Processes for
-- Machine Learning by Carl Rasmussen and Christopher Williams, The
-- MIT Press 2006. In particular, algorithms are provides for regression
-- and for two-class classification using either the Laplace or EP
-- approximation.
@package HasGP
@version 0.1
-- | Parser implemented using the Parsec library for reading from files in
-- the format used by SVMLight.
--
-- Currently assumes your file is a text file in Unix format. The extra
-- characters in Windows text files confuse it.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Parsers.SvmLight
-- | Parse a file in SvmLight format and print some information about it.
analyse :: String -> IO ()
-- | Read examples from a file in SvmLight format and produce a
-- corresponding matrix and vector, for a classification problem.
-- Includes checks that all examples have the same number of attributes,
-- and that the file does in fact correspond to a classification problem.
getMatrixExamplesFromFileC :: String -> IO (Matrix Double, Vector Double)
-- | We often need to iterate some update equation until convergence is
-- detected. This module uses the State monad to provide a very general
-- way of expressing computations of this kind.
--
-- Copyright (C) Sean Holden 2011. sbh11@cl.cam.ac.uk
module HasGP.Support.Iterate
-- | iterateOnce takes a function to update a state and another to compute
-- a value associated with a given state.
--
-- It returns a state transformer performing the corresponding update -
-- that is, one iteration.
iterateOnce :: (s -> s) -> (s -> a) -> State s a
-- | iterateToConvergence takes a state transformer typically generated
-- using iterateOnce, a convergence test that compares two values
-- associated with the current and next states returning True if we've
-- converged, and an initial value.
--
-- It returns a state transformer that performs iteration until
-- convergence. When run from an initial state it returns the state at
-- convergence and the corresponding value.
iterateToConvergence :: State s a -> (a -> a -> Bool) -> a -> State s a
-- | The same as iterateToConvergence, but takes the state update and state
-- value functions directly, so the resulting state transformer only
-- requires a start state to be run.
iterateToConvergence' :: (s -> s) -> (s -> a) -> (a -> a -> Bool) -> State s a
-- | The same as iterateToConvergence, but does one update to obtain an
-- initial value and continues from there. Consequently, no initial value
-- is required, but you do one extra update.
iterateToConvergence'' :: State s a -> (a -> a -> Bool) -> State s a
-- | MainTypes is a module in the HasGP Gaussian process library. It
-- implements basic types for the entire library.
--
-- Note: some more specific classes and types are defined elsewhere, in
-- particular in HasGP.Likelihood and HasGP.Covariance.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Types.MainTypes
-- | These are defined to make functions more readable.
type DVector = Vector Double
type DMatrix = Matrix Double
type Input = Vector Double
type Inputs = Matrix Double
type CovarianceMatrix = Matrix Double
type Targets = Vector Double
type Outputs = Vector Double
-- | HasGP Gaussian Process Library. This module contains assorted
-- functions that support GP calculations but are more general-purpose
-- than GP-specific.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Support.Functions
square :: Double -> Double
trace :: DMatrix -> Double
-- | Standard delta function - 0/1 valued.
delta :: Eq a => a -> a -> Double
-- | Standard delta function - boolean valued.
deltaBool :: Eq a => a -> a -> Bool
-- | General sigmoid function with variable slope.
generalSigmoid :: Double -> Double -> Double
-- | Standard sigmoid function.
sigmoid :: Double -> Double
-- | Integral of Gaussian density of mean 0 and variance 1 from -infinity
-- to x
phiIntegral :: Double -> Double
-- | Value of Gaussian density function for mean 0 and variance 1.
n :: Double -> Double
-- | DANGER! You can't compute the ratio (n x) / (phiIntegral x) directly,
-- as although it has sensible values for negative x the denominator gets
-- small so fast that you quickly get Infinity turning up. GSL has the
-- inverse Mill's function/hazard function for the Gaussian distribution,
-- and the ratio is equal to hazard(-x).
nOverPhi :: Double -> Double
-- | DANGER! See nOverPhi - you have to compute this carefully as well.
logPhi :: Double -> Double
-- | HasGP Gaussian Process Library. This module contains assorted
-- functions that support the construction of matrices from functions.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Support.MatrixFunction
-- | Take two vectors and a function. The vectors contain inputs 1 and 2.
-- The function maps a pair of inputs to a value. Produce a matrix
-- containing the values of the function at the relevant points.
makeMatrixFromFunction2 :: (DVector -> Double) -> DVector -> DVector -> DMatrix
-- | Take a function and a matrix of instance vectors. Apply the function
-- to each possible pair of instance vectors and return the result as a
-- matrix.
makeMatrixFromPairs2 :: (DVector -> DVector -> Double) -> DMatrix -> DMatrix
-- | Same as makeMatrixFromPairs but the function returns a vector. In this
-- case the output is a list of matrices, one for each element of the
-- function value.
makeMatricesFromPairs :: (DVector -> DVector -> DVector) -> DMatrix -> [DMatrix]
-- | HasGP Gaussian Process Library. This module contains assorted
-- functions that support GP calculations and are specifically related to
-- linear algebra.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Support.Linear
-- | Sum the elements in a vector.
sumVector :: DVector -> Double
-- | Sum of elements in a vector, divided by an Int.
sumVectorDiv :: Int -> DVector -> Double
-- | Length of a vector.
lengthV :: Normed a b => a b -> RealOf b
-- | Generate a vector equal to the first column of a matrix.
toVector :: Matrix Double -> Vector Double
-- | Replace the element at a specified position in a vector. NOTE: hmatrix
-- numbers from 0, which is odd. This numbers from 1. The result is
-- returned by overwriting v. This is implemented via runSTVector because
-- the increase in efficiency is HUGE.
replaceInVector :: DVector -> Int -> Double -> DVector
-- | Efficiently pre multiply by a diagonal matrix (passed as a vector)
preMultiply :: DVector -> DMatrix -> DMatrix
-- | Efficiently post multiply by a diagonal matrix (passed as a vector)
postMultiply :: DMatrix -> DVector -> DMatrix
-- | Compute x^T A x when A is diagonal. The second argument is the
-- diagonal of A.
xAxDiag :: DVector -> DVector -> Double
-- | Compute the diagonal only of the product of two square matrices
abDiagOnly :: DMatrix -> DMatrix -> DVector
-- | Compute ABA where A is diagonal. The first argument is the diagonal of
-- A.
abaDiagDiag :: DVector -> DMatrix -> DMatrix
-- | Compute aBa where a is a vector and B is a matrix
abaVV :: DVector -> DMatrix -> Double
-- | HasGP Gaussian Process Library. This module contains assorted
-- functions that support the efficient solution of sets of linear
-- equations
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Support.Solve
-- | It's not clear whether the use of linearSolve from HMatrix will induce
-- a performance hit when the matrix is upper or lower triangular. Pro:
-- it's a call to something presumably from LaPack. Con: we've got some
-- structure that should allow us to make it O(n^2) instead of O(n^3).
--
-- To do: try some timed runs to see if these are needed.
--
-- Solve an upper triangular system.
upperSolve :: DMatrix -> DVector -> DVector
-- | Solve a lower triangular system.
lowerSolve :: DMatrix -> DVector -> DVector
-- | Used by lowerSolve.
--
-- Used by upperSolve.
--
-- Compute the value of x_n when solving a lower triangular set of
-- equations Mx=y. It is assumed that all values x_i where i < n are
-- already in the vector x and that the rest of the elements of x are 0.
computeNthElement :: DVector -> Double -> Int -> DVector -> DVector
-- | General solver for linear equations of the relevant kind.
--
-- First parameter is either upperSolve or lowerSolve. Next two
-- parameters are the upper/lower triangular matrix from the Cholesky
-- decomposition, then another matrix. Returns the solution as a matrix.
generalSolve :: (DMatrix -> DVector -> DVector) -> DMatrix -> DMatrix -> DMatrix
-- | Find the inverse of a matrix from its Cholesky decomposition
cholSolve :: DMatrix -> DMatrix
-- | Gaussian Process Library. This module contains assorted functions that
-- support random number generation and the construction of basic
-- standard training sets.
--
-- Note: these are mostly calls to functions now (but not originally)
-- supplied by HMatrix. Originally different random sources were used,
-- hence the current format.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Support.Random
-- | Make a random matrix. Elements are uniformly distributed between
-- specified bounds. Returns the matrix and a new generator.
uniformMatrix :: Int -> (Double, Double) -> Int -> Int -> DMatrix
-- | Produce vectors with normally distributed, independent elements of
-- zero mean and specified variance.
normalVectorSimple :: Int -> Double -> Int -> DVector
-- | Produce lists with normally distributed independent elements of zero
-- mean and specified variance.
normalList :: Int -> Double -> Int -> [Double]
-- | Produce normally distributed vectors with mean and covariance
-- specified.
normalVector :: Int -> DVector -> DMatrix -> DVector
-- | Make a matrix with normally distributed, independent elements of zero
-- mean and specified variance.
normalMatrix :: Int -> Double -> Int -> Int -> DMatrix
-- | Gaussian Process Library. This module contains assorted functions that
-- support the computation of covariance, constructing covariance
-- matrices etc.
--
-- Covariance functions store log parameters. Functions are needed to
-- return the covariance and its derivative. Derivatives are with respect
-- to the actual parameters, NOT their logs.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Covariance.Basic
class CovarianceFunction a
trueHyper :: CovarianceFunction a => a -> DVector
covariance :: CovarianceFunction a => a -> DVector -> DVector -> Double
dCovarianceDParameters :: CovarianceFunction a => a -> DVector -> DVector -> DVector
makeCovarianceFromList :: CovarianceFunction a => a -> [Double] -> a
makeListFromCovariance :: CovarianceFunction a => a -> [Double]
-- | Construct a matrix of covariances from a covariance and a design
-- matrix.
covarianceMatrix :: CovarianceFunction c => c -> Inputs -> CovarianceMatrix
-- | Constructs the column vector required when a new input is included.
-- Constructed as a matrix to avoid further work elsewhere.
covarianceWithPoint :: CovarianceFunction c => c -> Inputs -> Input -> DVector
-- | covarianceWithPoint applied to a list of points to produce a list of
-- vectors.
covarianceWithPoints :: CovarianceFunction c => c -> Inputs -> [Input] -> [DVector]
-- | HasGP Gaussian Process Library. This module contains the class
-- definition for log likelihoods.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Likelihood.Basic
-- | The following allows arbitrary likelihoods with or without parameters
-- to be wrapped up with their derivatives (with respect to f) and passed
-- to a function.
class LogLikelihood b
likelihood :: LogLikelihood b => b -> Double -> Double -> Double
dLikelihood :: LogLikelihood b => b -> Double -> Double -> Double
ddLikelihood :: LogLikelihood b => b -> Double -> Double -> Double
dddLikelihood :: LogLikelihood b => b -> Double -> Double -> Double
-- | ClassificationLaplace is a module in the HasGP Gaussian Process
-- library. It implements basic Gaussian Process Classification for two
-- classes using the Laplace approximation. For details see
-- www.gaussianprocesses.org.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Classification.Laplace.ClassificationLaplace
-- | Computing the Laplace approximation requires us to deal with quite a
-- lot of information. To keep things straightforward we wrap this up in
-- a type.
--
-- The value associated with a state includes f, evidence, objective,
-- derivative of the objective, the vector a needed to compute the
-- derivative of the evidence, and the number of iterations.
data LaplaceValue
LaplaceValue :: DVector -> Double -> Double -> DVector -> DVector -> Int -> LaplaceValue
fValue :: LaplaceValue -> DVector
eValue :: LaplaceValue -> Double
psiValue :: LaplaceValue -> Double
dPsiValue :: LaplaceValue -> DVector
aValue :: LaplaceValue -> DVector
count :: LaplaceValue -> Int
-- | A convergence test is a function that takes two consecutive values
-- during iteration and works out whether you've converged or not.
type LaplaceConvergenceTest = LaplaceValue -> LaplaceValue -> Bool
-- | Iteration to convergence is much nicer if the state is hidden using
-- the State monad.
--
-- This uses a general function from HasGP.Support.Iterate to implement
-- the learning algorithm. Convergence testing is done using a user
-- supplied function.
gpCLaplaceLearn :: LogLikelihood l => CovarianceMatrix -> Targets -> l -> LaplaceConvergenceTest -> LaplaceValue
-- | Converts pairs of fStar and V produced by the prediction functions to
-- actual probabilities, assuming the cumulative Gaussian likelihood was
-- used.
convertToP_CG :: (Double, Double) -> Double
-- | Predict using a GP classifier based on the Laplace approximation.
--
-- Produces fStar and V rather than the actual probability as further
-- approximations are then required to compute this.
gpCLaplacePredict :: (CovarianceFunction cF, LogLikelihood l) => DVector -> Inputs -> Targets -> CovarianceMatrix -> cF -> l -> Input -> (Double, Double)
-- | Predict using a GP classifier based on the Laplace approximation.
--
-- The same as gpLaplacePredict but applies to a collection of new inputs
-- supplied as the rows of a matrix.
--
-- Produces a list of pairs of fStar and V rather than the actual
-- probabilities as further approximations are then required to compute
-- these.
gpCLaplacePredict' :: (CovarianceFunction cF, LogLikelihood l) => DVector -> Inputs -> Targets -> CovarianceMatrix -> cF -> l -> Inputs -> [(Double, Double)]
-- | Compute the log marginal likelihood and its first derivative for the
-- Laplace approximation for GP classification.
--
-- The convergence test input tests for convergence when using
-- gpClassificationLaplaceLearn. Note that a covariance function contains
-- its own parameters and can compute its own derivative so theta does
-- not need to be passed seperately.
--
-- Outputs the NEGATIVE log marginal likelihood and a vector of its
-- derivatives. The derivatives are with respect to the actual, NOT log
-- parameters.
gpCLaplaceLogEvidence :: (CovarianceFunction cF, LogLikelihood l) => Inputs -> Targets -> cF -> l -> LaplaceConvergenceTest -> (Double, DVector)
-- | A version of gpClassificationLaplaceEvidence that's usable by the
-- conjugate gradient function included in the hmatrix library. Computes
-- the log evidence and its first derivative for the Laplace
-- approximation for GP classification. The issue is that while it makes
-- sense for a covariance function to be implemented as a class so that
-- any can easily be used, we need to supply evidence and its derivatives
-- directly as functions of the hyperparameters, and these have to be
-- supplied as vectors of Doubles. The solution is to include a function
-- in the CovarianceFunction class that takes a list and returns a new
-- covariance function of the required type having the specified
-- hyperparameters.
--
-- Parameters: The same parameters as gpClassifierLaplaceEvidence, plus
-- the list of hyperparameters. Outputs: negative log marginal likelihood
-- and a vector of its first derivatives.
--
-- In addition to the above, this assumes that we want derivatives with
-- respect to log parameters and so converts using df/d log p = p df/dp.
gpCLaplaceLogEvidenceList :: (CovarianceFunction cF, LogLikelihood l) => Inputs -> Targets -> cF -> l -> LaplaceConvergenceTest -> [Double] -> (Double, DVector)
-- | This is the same as gpCLaplaceLogEvidenceList but takes a vector
-- instead of a list.
gpCLaplaceLogEvidenceVec :: (CovarianceFunction cF, LogLikelihood l) => Inputs -> Targets -> cF -> l -> LaplaceConvergenceTest -> DVector -> (Double, DVector)
-- | Gaussian Process Library. This module contains the definition of the
-- standard squared exponential covariance function.
--
-- Copyright (C) 2008-11 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Covariance.SquaredExp
data SquaredExponential
SquaredExponential :: Double -> Double -> SquaredExponential
-- | log sigma_f^2
f :: SquaredExponential -> Double
-- | log l
l :: SquaredExponential -> Double
instance CovarianceFunction SquaredExponential
-- | Gaussian Process Library. This module contains the definition of the
-- standard squared exponential covariance function, extended for use
-- with Automatic Relevance Determination.
--
-- s_f^2 exp (-1/2 (x_1 - x_2)^T M (x_1 - x_2))
--
-- Parameters: s_f^2 and vector containing the diagonal of M. M is diag
-- (1/l_1^2,...,1/l_?^2)
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Covariance.SquaredExpARD
data SquaredExponentialARD
SquaredExponentialARD :: Double -> DVector -> SquaredExponentialARD
fARD :: SquaredExponentialARD -> Double
m :: SquaredExponentialARD -> DVector
instance CovarianceFunction SquaredExponentialARD
-- | BishopData is a module in the HasGP Gaussian Process library. It
-- contains functions to generate toy data as used in Neural Networks
-- for Pattern Recognition, by Chris Bishop.
--
-- There is one difference between this data and that in the book.
-- Namely: this data is adjusted to have zero mean, making it easier to
-- use in the demonstrations.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Data.BishopData
h :: Double -> Double
bishopData :: (Inputs, Targets)
-- | Normalise is a module in the HasGP Gaussian process library. It
-- contains functions for performing basic normalisation tasks on
-- training examples, and for computing assorted standard statistics.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Data.Normalise
-- | Compute the mean for each attribute in a set of examples.
exampleMean :: Inputs -> DVector
-- | Compute the variance for each attribute in a set of examples.
exampleVariance :: Inputs -> DVector
-- | Compute the mean and variance for each attribute in a set of examples.
exampleMeanVariance :: Inputs -> (DVector, DVector)
-- | Normalise a set of examples to have specified mean and variance.
normaliseMeanVariance :: DVector -> DVector -> Inputs -> Inputs
-- | The same as normaliseMeanVariance but every column (attribute) is
-- normalised in the same way.
normaliseMeanVarianceSimple :: Double -> Double -> Inputs -> Inputs
-- | Normalise a set of examples to have specified maximum and minimum.
normaliseBetweenLimits :: Double -> Double -> Inputs -> Inputs
-- | Find the columns of a matrix in which all values are equal.
findRedundantAttributes :: Inputs -> [Bool]
-- | List column numbers for redundant attributes.
listRedundantAttributes :: Inputs -> [Int]
-- | Remove any redundant columns from a matrix.
removeRedundantAttributes :: Inputs -> Inputs
-- | Specify a list of columns (matrix numbered from 1). Produce a matrix
-- with ONLY those columns in the order specified in the list.
retainAttributes :: [Int] -> Inputs -> Inputs
-- | Compute the numbers for the confusion matrix. It is assumed that
-- classes are +1 (positive) and -1 (negative). Result is (a,b,c,d): a -
-- correct negatives b - predict positive when correct is negative c -
-- predict negative when correct is positive d - correct positives
confusionMatrix :: Targets -> Outputs -> (Double, Double, Double, Double)
-- | Print the confusion matrix and some other statistics
printConfusionMatrix :: Targets -> Outputs -> IO ()
-- | Assuming the labels are +1 or -1, count how many there are of each.
countLabels :: Targets -> IO ()
-- | Gaussian Process Library - functions for producing data sets From
-- Rasmussen and Williams, Gaussian Processes for Machine
-- Learning.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Data.RWData1
-- | Generate training data for a simple classification problem as in
-- Rasmussen/Williams, page 62.
simpleClassificationData :: Int -> (DMatrix, DVector)
-- | HasGP Gaussian Process Library. This module contains the definition
-- for the standard log Phi likelihood.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Likelihood.LogPhi
-- | Value and first three derivatives of log Phi with respect to its
-- parameter f. log p(y|f) = log Phi (yf) where y is +1 or -1.
data LogPhi
LogPhi :: LogPhi
instance LogLikelihood LogPhi
-- | Regression is a module in the HasGP Gaussian process library. It
-- implements basic Gaussian process regression. For the technical
-- details see www.gaussianprocesses.org.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Regression.Regression
-- | Compute the main quantities required to do regression, specifically:
-- the Cholesky decomposition L of the covariance matrix, and the
-- parameters alpha such that L L^t y = alpha.
gpRMain :: CovarianceFunction cF => cF -> Double -> Inputs -> Targets -> (DMatrix, DVector)
-- | Compute the expected value and variance for a collection of new points
-- supplied as the rows of a matrix. Differs from gpRPredict' as l and
-- alpha need to be computed in advance.
gpRPredict :: CovarianceFunction cF => cF -> DMatrix -> DVector -> Inputs -> Targets -> Inputs -> (DVector, DVector)
-- | Compute the expected value and variance for a collection of new points
-- supplied as the rows of a matrix.
gpRPredict' :: CovarianceFunction cF => cF -> Double -> Inputs -> Targets -> Inputs -> (DVector, DVector)
-- | Compute the log of the marginal likelihood.
gpRLogEvidence :: DMatrix -> DVector -> Targets -> Double
-- | Compute the gradient of the log marginal likelihood. Output contains
-- derivative with respect to noise variance followed by the derivatives
-- with respect to the hyperparameters in the covariance function.
gpRGradLogEvidence :: CovarianceFunction cF => cF -> Double -> DMatrix -> DVector -> Inputs -> DVector
-- | Given the log parameters and other necessary inputs, compute the
-- NEGATIVE of the log marginal likelihood and its derivatives with
-- respect to the LOG hyperparameters.
gpRLogHyperToEvidence :: CovarianceFunction cF => cF -> Inputs -> Targets -> DVector -> (Double, DVector)
-- | Demonstration of Gaussian process regression using the simple data
-- from Neural Networks for Pattern Recognition, by Chris Bishop.
-- This version estimates the hyperparameters using the optimization
-- algorithm from HMatrix.
--
-- For details of the algorithms involved see www.gaussianprocesses.org.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Demos.RegressionDemo1
-- | HasGP Gaussian Process Library. This module contains the definition
-- for the standard log logistic likelihood function.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Likelihood.LogLogistic
-- | Value and first three derivatives of log sigmoid with respect to its
-- parameter f. log p(y|f) = log sigmoid (yf) where y is +1 or -1.
data LogLogistic
LogLogistic :: LogLogistic
instance LogLikelihood LogLogistic
-- | ClassificationEP is a module in the HasGP Gaussian Process library. It
-- implements basic Gaussian Process Classification for two classes using
-- the EP approximation. Targets should be +1/-1.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Classification.EP.ClassificationEP
-- | A convergence test for EP usually depends on the evidence and the
-- number of iterations so far. This allows us to specify completely
-- arbitrary convergence tests.
data EPValue
-- | By passing a function with this type we can specify arbitrary
-- convergence tests.
type EPConvergenceTest = EPValue -> EPValue -> Bool
-- | When updating a single site at a time you keep track of var, tauTilde,
-- mu, nuTilde, TauMinus, and MuMinus.
data EPSiteState
-- | We hide the state used in performing EP using the state monad. We need
-- to include a random number generator and the number of iterations.
type EPState = (EPSiteState, StdGen, Int)
-- | If we're updating sites in a random order then we need access to the
-- random number generator.
type SiteOrder = State EPState [Int]
-- | We're often going to want to update sites in a random order. So we
-- need a state transformer that takes the current state (which includes
-- a random number generator) and produces a random permutation.
generateRandomSiteOrder :: SiteOrder
-- | For completeness: just in case you want to update sites in a
-- non-random manner, this state transformer does exactly that.
generateFixedSiteOrder :: SiteOrder
-- | Compute the approximation to the log marginal likelihood.
gpClassifierEPEvidence :: CovarianceMatrix -> Targets -> DMatrix -> EPSiteState -> Double
-- | The learning algorithm. Takes an arbitrary function for convergence
-- testing.
gpClassifierEPLearn :: CovarianceMatrix -> Targets -> SiteOrder -> EPConvergenceTest -> (EPValue, EPState)
-- | Prediction with GP classifiers based on EP learning. Takes a matrix in
-- which each row is an example to be classified.
gpClassifierEPPredict :: CovarianceFunction c => EPSiteState -> Inputs -> Targets -> CovarianceMatrix -> c -> Inputs -> DVector
-- | Compute the log evidence and its first derivative for the EP
-- approximation for GP classification. Targets should be +1/-1. Outputs
-- the -log marginal likelihood and a vector of its derivatives.
gpClassifierEPLogEvidence :: CovarianceFunction c => c -> Inputs -> Targets -> SiteOrder -> EPConvergenceTest -> (Double, DVector)
-- | Essentially the same as gpClassifierEPLogEvidence, but makes a
-- covariance function using the hyperparameters supplied in a list and
-- passes it on.
gpClassifierEPLogEvidenceList :: CovarianceFunction c => Inputs -> Targets -> c -> SiteOrder -> EPConvergenceTest -> [Double] -> (Double, DVector)
-- | Essentially the same as gpClassifierEPLogEvidence, but makes a
-- covariance function using the hyperparameters supplied in a vector and
-- passes it on.
gpClassifierEPLogEvidenceVec :: CovarianceFunction c => Inputs -> Targets -> c -> SiteOrder -> EPConvergenceTest -> DVector -> (Double, DVector)
-- | Demonstration of Gaussian process classification using the
-- 1-dimensional problem from Rasmussen and Williams' book.
--
-- This demo compares the Laplace and EP approximation approaches.
--
-- For details of the algorithms involved see www.gaussianprocesses.org.
--
-- Copyright (C) 2011 Sean Holden. sbh11@cl.cam.ac.uk.
module HasGP.Demos.ClassificationDemo1
-- | This function defines when iteration stops for the Laplace version.
stopLaplace :: LaplaceConvergenceTest
-- | This function defines when iteration stops for the EP version.
stopEP :: EPConvergenceTest
module HasGP.Demos.ClassificationDemo2
-- | This function defines when iteration stops.
stopEP :: EPConvergenceTest