-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Sparse matrix linear-equation solver
--
-- Sparse matrix linear-equation solver, using the conjugate gradient
-- algorithm. Note that the technique only applies to matrices that are
-- symmetric and positive-definite. See
-- http://en.wikipedia.org/wiki/Conjugate_gradient_method for
-- details.
--
-- The conjugate gradient method can handle very large sparse matrices,
-- where direct methods (such as LU decomposition) are way too expensive
-- to be useful in practice. Such large sparse matrices arise naturally
-- in many engineering problems, such as in ASIC placement algorithms and
-- when solving partial differential equations.
@package conjugateGradient
@version 1.4
-- | (The linear equation solver library is hosted at
-- http://github.com/LeventErkok/conjugateGradient. Comments, bug
-- reports, and patches are always welcome.)
--
-- Sparse matrix linear-equation solver, using the conjugate gradient
-- algorithm. Note that the technique only applies to matrices that are:
--
--
-- - Symmetric
-- - Positive-definite
--
--
-- See http://en.wikipedia.org/wiki/Conjugate_gradient_method for
-- details.
--
-- The conjugate gradient method can handle very large sparse matrices,
-- where direct methods (such as LU decomposition) are way too expensive
-- to be useful in practice. Such large sparse matrices arise naturally
-- in many engineering problems, such as in ASIC placement algorithms and
-- when solving partial differential equations.
--
-- Here's an example usage, for the simple system:
--
--
-- 4x + y = 1
-- x + 3y = 2
--
--
--
-- >>> import Data.IntMap
--
-- >>> import System.Random
--
-- >>> let a = (2, fromList [(0, fromList [(0, 4), (1, 1)]), (1, fromList [(0, 1), (1, 3)])]) :: SM Double
--
-- >>> let b = fromList [(0, 1), (1, 2)] :: SV Double
--
-- >>> let g = mkStdGen 12345
--
-- >>> let x = solveCG g a b
--
-- >>> putStrLn $ showSolution 4 a b x
-- A | x = b
-- --------------+----------------
-- 4.0000 1.0000 | 0.0909 = 1.0000
-- 1.0000 3.0000 | 0.6364 = 2.0000
--
module Math.ConjugateGradient
-- | A sparse vector containing elements of type a. For our
-- purposes, the elements will be either Floats or Doubles.
-- Only the indices that contain non-0 elements should be given
-- for efficiency purposes. (Nothing will break if you put in elements
-- that are 0's, it's just not as efficient.)
type SV a = IntMap a
-- | A sparse matrix is an int-map containing sparse row-vectors:
--
--
-- - The first element, n, is the number of rows in the
-- matrix, including those with all 0 elements.
-- - The matrix is implicitly assumed to be nxn, indexed by
-- keys (0, 0) to (n-1, n-1).
-- - When constructing a sparse-matrix, only put in rows that have a
-- non-0 element in them for efficiency.
-- - Note that you have to give all the non-0 elements: Even though the
-- matrix must be symmetric for the algorithm to work, the matrix should
-- contain all the non-0 elements, not just the upper (or the
-- lower)-triangle.
-- - Make sure the keys of the int-map is a subset of [0 ..
-- n-1], both for the row-indices and the indices of the vectors
-- representing the sparse-rows.
--
type SM a = (Int, IntMap (SV a))
-- | Multiply a sparse-vector by a scalar.
sMulSV :: RealFloat a => a -> SV a -> SV a
-- | Multiply a sparse-matrix by a scalar.
sMulSM :: RealFloat a => a -> SM a -> SM a
-- | Add two sparse vectors.
addSV :: RealFloat a => SV a -> SV a -> SV a
-- | Subtract two sparse vectors.
subSV :: RealFloat a => SV a -> SV a -> SV a
-- | Dot product of two sparse vectors.
dotSV :: RealFloat a => SV a -> SV a -> a
-- | Multiply a sparse matrix (nxn) with a sparse vector (nx1), obtaining a
-- sparse vector (nx1).
mulSMV :: RealFloat a => SM a -> SV a -> SV a
-- | Norm of a sparse vector. (Square-root of its dot-product with itself.)
normSV :: RealFloat a => SV a -> a
-- | Conjugate Gradient Solver for the system Ax=b. See:
-- http://en.wikipedia.org/wiki/Conjugate_gradient_method.
--
-- NB. Assumptions on the input:
--
--
-- - The A matrix is symmetric and positive definite.
-- - The indices start from 0 and go consecutively up-to
-- n-1. (Only non-0 value/row indices has to be
-- present, of course.)
--
--
-- For efficiency reasons, we do not check for either property. (If these
-- assumptions are violated, the algorithm will still produce a result,
-- but not the one you expected!)
--
-- We perform either 10^6 iterations of the Conjugate-Gradient
-- algorithm, or until the error factor is less than 1e-10. The
-- error factor is defined as the difference of the norm of the current
-- solution from the last one, as we go through the iteration. See
-- http://en.wikipedia.org/wiki/Conjugate_gradient_method#Convergence_properties_of_the_conjugate_gradient_method
-- for a discussion on the convergence properties of this algorithm.
--
-- The solver can throw an error if it does not converge by 10^6
-- iterations. This is typically an indication that the input matrix is
-- not symmetric positive definite.
solveCG :: (RandomGen g, RealFloat a, Random a) => g -> SM a -> SV a -> SV a
-- | Display a solution in a human-readable form. Needless to say, only use
-- this method when the system is small enough to fit nicely on the
-- screen.
showSolution :: RealFloat a => Int -> SM a -> SV a -> SV a -> String