-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Sparse matrix linear-equation solver
--
-- Sparse matrix linear-equation solver, using the conjugate gradient
-- algorithm. Note that the technique only applies to matrices that are:
--
--
-- - Symmetric
-- - Positive-definite
--
--
-- See http://en.wikipedia.org/wiki/Conjugate_gradient_method for
-- details.
--
-- The conjugate gradient method can handle very large sparse matrices,
-- where direct methods (such as LU decomposition) are way too expensive
-- to be useful in practice.
@package conjugateGradient
@version 1.0
-- | (The linear equation solver library is hosted at
-- http://github.com/LeventErkok/conjugateGradient. Comments, bug
-- reports, and patches are always welcome.)
--
-- Sparse matrix linear-equation solver, using the conjugate gradient
-- algorithm. Note that the technique only applies to matrices that are:
--
--
-- - Symmetric
-- - Positive-definite
--
--
-- See http://en.wikipedia.org/wiki/Conjugate_gradient_method for
-- details.
--
-- The conjugate gradient method can handle very large sparse matrices,
-- where direct methods (such as LU decomposition) are way too expensive
-- to be useful in practice.
module Math.ConjugateGradient
-- | A sparse vector containing elements of type a. (For our
-- purposes, the elements will be either Floats or
-- Doubles.)
type SV a = IntMap a
-- | A sparse matrix is an int-map containing sparse row-vectors. Again,
-- only put in rows that have a non-0 element in them for
-- efficiency.
type SM a = IntMap (SV a)
-- | Multiply a sparse-vector by a scalar.
sMulSV :: RealFloat a => a -> SV a -> SV a
-- | Multiply a sparse-matrix by a scalar.
sMulSM :: RealFloat a => a -> SM a -> SM a
-- | Add two sparse vectors.
addSV :: RealFloat a => SV a -> SV a -> SV a
-- | Subtract two sparse vectors.
subSV :: RealFloat a => SV a -> SV a -> SV a
-- | Dot product of two sparse vectors.
dotSV :: RealFloat a => SV a -> SV a -> a
-- | Multiply a sparse matrix (nxn) with a sparse vector (nx1), obtaining a
-- sparse vector (nx1).
mulSMV :: RealFloat a => SM a -> SV a -> SV a
-- | Norm of a sparse vector. (Square-root of it's dot-product with
-- itself.)
normSV :: RealFloat a => SV a -> a
-- | Conjugate Gradient Solver for the system Ax=b. See:
-- http://en.wikipedia.org/wiki/Conjugate_gradient_method.
--
-- NB. Assumptions on the input:
--
--
-- - The A matrix is symmetric and positive definite.
-- - The indices start from 0 and go consecutively up-to
-- n-1. (Only non-0 value/row indices has to be
-- present, of course.)
--
--
-- For efficiency reasons, we do not check for either property. (If these
-- assumptions are violated, the algorithm will still produce a result,
-- but not the one you expected!)
--
-- We perform either 10^6 iterations of the Conjugate-Gradient
-- algorithm, or until the error factor is less than 1e-10. The
-- error factor is defined as the difference of the norm of the current
-- solution from the last one, as we go through the iteration. See
-- http://en.wikipedia.org/wiki/Conjugate_gradient_method#Convergence_properties_of_the_conjugate_gradient_method
-- for a discussion on the convergence properties of this algorithm.
solveCG :: (RandomGen g, RealFloat a, Random a) => g -> Int -> SM a -> SV a -> (a, SV a)
-- | Display a solution in a human-readable form. Needless to say, only use
-- this method when the system is small enough to fit nicely on the
-- screen.
showSolution :: RealFloat a => Int -> Int -> Int -> SM a -> SV a -> SV a -> String