-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | Eigen C++ library (linear algebra: matrices, sparse matrices, vectors, numerical solvers). -- -- This module provides Haskell binding for Eigen C++ library. -- -- Eigen is versatile. -- --
-- >>> let a = fromList [[1,2],[3,4]] :: MatrixXf ---- --
-- >>> a -- Matrix 2x2 -- 1.0 2.0 -- 3.0 4.0 ---- --
-- >>> map (*10) a -- Matrix 2x2 -- 10.0 20.0 -- 30.0 40.0 --map :: Elem a b => (a -> a) -> Matrix a b -> Matrix a b -- | Apply a given function to each element of the matrix. -- -- Here is an example how upper triangular matrix can be implemented: -- --
-- >>> let a = fromList [[1,2,3],[4,5,6],[7,8,9]] :: MatrixXf ---- --
-- >>> a -- Matrix 3x3 -- 1.0 2.0 3.0 -- 4.0 5.0 6.0 -- 7.0 8.0 9.0 ---- --
-- >>> imap (\row col val -> if row <= col then val else 0) a -- Matrix 3x3 -- 1.0 2.0 3.0 -- 0.0 5.0 6.0 -- 0.0 0.0 9.0 --imap :: Elem a b => (Int -> Int -> a -> a) -> Matrix a b -> Matrix a b -- | Filter elements in the matrix. Filtered elements will be replaced by 0 filter :: Elem a b => (a -> Bool) -> Matrix a b -> Matrix a b -- | Filter elements in the matrix. Filtered elements will be replaced by 0 ifilter :: Elem a b => (Int -> Int -> a -> Bool) -> Matrix a b -> Matrix a b -- | Diagonal of the matrix diagonal :: Elem a b => Matrix a b -> Matrix a b -- | Transpose of the matrix transpose :: Elem a b => Matrix a b -> Matrix a b -- | Inverse of the matrix -- -- For small fixed sizes up to 4x4, this method uses cofactors. In the -- general case, this method uses PartialPivLU decomposition inverse :: Elem a b => Matrix a b -> Matrix a b -- | Adjoint of the matrix adjoint :: Elem a b => Matrix a b -> Matrix a b -- | Conjugate of the matrix conjugate :: Elem a b => Matrix a b -> Matrix a b -- | Nomalize the matrix by deviding it on its norm normalize :: Elem a b => Matrix a b -> Matrix a b -- | Apply a destructive operation to a matrix. The operation will be -- performed in place if it is safe to do so and will modify a copy of -- the matrix otherwise. modify :: Elem a b => (forall s. MMatrix a b s -> ST s ()) -> Matrix a b -> Matrix a b -- | Convert matrix to different type using user provided element converter convert :: (Elem a b, Elem c d) => (a -> c) -> Matrix a b -> Matrix c d data TriangularMode -- | View matrix as a lower triangular matrix. Lower :: TriangularMode -- | View matrix as an upper triangular matrix. Upper :: TriangularMode -- | View matrix as a lower triangular matrix with zeros on the diagonal. StrictlyLower :: TriangularMode -- | View matrix as an upper triangular matrix with zeros on the diagonal. StrictlyUpper :: TriangularMode -- | View matrix as a lower triangular matrix with ones on the diagonal. UnitLower :: TriangularMode -- | View matrix as an upper triangular matrix with ones on the diagonal. UnitUpper :: TriangularMode -- | Triangular view extracted from the current matrix triangularView :: Elem a b => TriangularMode -> Matrix a b -> Matrix a b -- | Lower trinagle of the matrix. Shortcut for triangularView -- Lower lowerTriangle :: Elem a b => Matrix a b -> Matrix a b -- | Upper trinagle of the matrix. Shortcut for triangularView -- Upper upperTriangle :: Elem a b => Matrix a b -> Matrix a b -- | Encode the matrix as a lazy byte string encode :: Elem a b => Matrix a b -> ByteString -- | Decode matrix from the lazy byte string decode :: Elem a b => ByteString -> Matrix a b -- | Yield a mutable copy of the immutable matrix thaw :: Elem a b => PrimMonad m => Matrix a b -> m (MMatrix a b (PrimState m)) -- | Yield an immutable copy of the mutable matrix freeze :: Elem a b => PrimMonad m => MMatrix a b (PrimState m) -> m (Matrix a b) -- | Unsafely convert an immutable matrix to a mutable one without copying. -- The immutable matrix may not be used after this operation. unsafeThaw :: Elem a b => PrimMonad m => Matrix a b -> m (MMatrix a b (PrimState m)) -- | Unsafe convert a mutable matrix to an immutable one without copying. -- The mutable matrix may not be used after this operation. unsafeFreeze :: Elem a b => PrimMonad m => MMatrix a b (PrimState m) -> m (Matrix a b) -- | Pass a pointer to the matrix's data to the IO action. The data may not -- be modified through the pointer. unsafeWith :: Elem a b => Matrix a b -> (Ptr b -> CInt -> CInt -> IO c) -> IO c instance GHC.Read.Read Data.Eigen.Matrix.TriangularMode instance GHC.Show.Show Data.Eigen.Matrix.TriangularMode instance GHC.Enum.Enum Data.Eigen.Matrix.TriangularMode instance GHC.Classes.Eq Data.Eigen.Matrix.TriangularMode instance (Data.Eigen.Internal.Elem a b, GHC.Show.Show a) => GHC.Show.Show (Data.Eigen.Matrix.Matrix a b) instance Data.Eigen.Internal.Elem a b => GHC.Num.Num (Data.Eigen.Matrix.Matrix a b) instance Data.Eigen.Internal.Elem a b => Data.Binary.Class.Binary (Data.Eigen.Matrix.Matrix a b) -- | The problem: You have a system of equations, that you have written as -- a single matrix equation -- --
-- Ax = b ---- -- Where A and b are matrices (b could be a vector, as a special case). -- You want to find a solution x. -- -- The solution: You can choose between various decompositions, depending -- on what your matrix A looks like, and depending on whether you favor -- speed or accuracy. However, let's start with an example that works in -- all cases, and is a good compromise: -- --
-- import Data.Eigen.Matrix -- import Data.Eigen.LA -- -- main = do -- let -- a :: MatrixXd -- a = fromList [[1,2,3], [4,5,6], [7,8,10]] -- b = fromList [[3],[3],[4]] -- x = solve ColPivHouseholderQR a b -- putStrLn "Here is the matrix A:" >> print a -- putStrLn "Here is the vector b:" >> print b -- putStrLn "The solution is:" >> print x ---- -- produces the following output -- --
-- Here is the matrix A: -- Matrix 3x3 -- 1.0 2.0 3.0 -- 4.0 5.0 6.0 -- 7.0 8.0 10.0 -- -- Here is the vector b: -- Matrix 3x1 -- 3.0 -- 3.0 -- 4.0 -- -- The solution is: -- Matrix 3x1 -- -2.0000000000000004 -- 1.0000000000000018 -- 0.9999999999999989 ---- -- Checking if a solution really exists: Only you know what error margin -- you want to allow for a solution to be considered valid. -- -- You can compute relative error using norm (ax - b) / -- norm b formula or use relativeError function which -- provides the same calculation implemented slightly more efficient. module Data.Eigen.LA -- |
-- Decomposition Requirements on the matrix Speed Accuracy Rank Kernel Image -- -- PartialPivLU Invertible ++ + - - - -- FullPivLU None - +++ + + + -- HouseholderQR None ++ + - - - -- ColPivHouseholderQR None + ++ + - - -- FullPivHouseholderQR None - +++ + - - -- LLT Positive definite +++ + - - - -- LDLT Positive or negative semidefinite +++ ++ - - - -- JacobiSVD None - +++ + - - ---- -- The best way to do least squares solving for square matrices is with a -- SVD decomposition (JacobiSVD) data Decomposition -- | LU decomposition of a matrix with partial pivoting. PartialPivLU :: Decomposition -- | LU decomposition of a matrix with complete pivoting. FullPivLU :: Decomposition -- | Householder QR decomposition of a matrix. HouseholderQR :: Decomposition -- | Householder rank-revealing QR decomposition of a matrix with -- column-pivoting. ColPivHouseholderQR :: Decomposition -- | Householder rank-revealing QR decomposition of a matrix with full -- pivoting. FullPivHouseholderQR :: Decomposition -- | Standard Cholesky decomposition (LL^T) of a matrix. LLT :: Decomposition -- | Robust Cholesky decomposition of a matrix with pivoting. LDLT :: Decomposition -- | Two-sided Jacobi SVD decomposition of a rectangular matrix. JacobiSVD :: Decomposition -- |
-- import Data.Eigen.LA -- main = print $ linearRegression [ -- [-4.32, 3.02, 6.89], -- [-3.79, 2.01, 5.39], -- [-4.01, 2.41, 6.01], -- [-3.86, 2.09, 5.55], -- [-4.10, 2.58, 6.32]] ---- -- produces the following output -- --
-- ([-2.3466569233817127,-0.2534897541434826,-0.1749653335680988],1.8905965120153139e-3) --linearRegression :: [[Double]] -> ([Double], Double) instance GHC.Read.Read Data.Eigen.LA.Decomposition instance GHC.Show.Show Data.Eigen.LA.Decomposition instance GHC.Enum.Enum Data.Eigen.LA.Decomposition instance GHC.Classes.Eq Data.Eigen.LA.Decomposition -- | Some Eigen's algorithms can exploit the multiple cores present in your -- hardware. To this end, it is enough to enable OpenMP on your compiler, -- for instance: GCC: -fopenmp. You can control the number of thread that -- will be used using either the OpenMP API or Eiegn's API using the -- following priority: -- --
-- 0 3 0 0 0 -- 22 0 0 0 17 -- 7 5 0 1 0 -- 0 0 0 0 0 -- 0 0 14 0 8 ---- -- and one of its possible sparse, column major representation: -- --
-- values: 22 7 _ 3 5 14 _ _ 1 _ 17 8 -- innerIndices: 1 2 _ 0 2 4 _ _ 2 _ 1 4 -- outerStarts: 0 3 5 8 10 12 -- innerNNZs: 2 2 1 1 2 ---- -- Currently the elements of a given inner vector are guaranteed to be -- always sorted by increasing inner indices. The "_" indicates available -- free space to quickly insert new elements. Assuming no reallocation is -- needed, the insertion of a random element is therefore in -- O(nnz_j) where nnz_j is the number of nonzeros of -- the respective inner vector. On the other hand, inserting elements -- with increasing inner indices in a given inner vector is much more -- efficient since this only requires to increase the respective -- innerNNZs entry that is a O(1) operation. -- -- The case where no empty space is available is a special case, and is -- refered as the compressed mode. It corresponds to the widely used -- Compressed Column (or Row) Storage schemes (CCS or CRS). Any -- SparseMatrix can be turned to this form by calling the -- compress function. In this case, one can remark that the -- innerNNZs array is redundant with outerStarts because we -- the equality: InnerNNZs[j] = OuterStarts[j+1]-OuterStarts[j]. -- Therefore, in practice a call to compress frees this buffer. -- -- The results of Eigen's operations always produces compressed sparse -- matrices. On the other hand, the insertion of a new element into a -- SparseMatrix converts this later to the uncompressed mode. -- -- For more infomration please see Eigen documentation page. data SparseMatrix a b [SparseMatrix] :: Elem a b => !(ForeignPtr (CSparseMatrix a b)) -> SparseMatrix a b -- | Alias for single precision sparse matrix type SparseMatrixXf = SparseMatrix Float CFloat -- | Alias for double precision sparse matrix type SparseMatrixXd = SparseMatrix Double CDouble -- | Alias for single previsiom sparse matrix of complex numbers type SparseMatrixXcf = SparseMatrix (Complex Float) (CComplex CFloat) -- | Alias for double prevision sparse matrix of complex numbers type SparseMatrixXcd = SparseMatrix (Complex Double) (CComplex CDouble) -- | Stores the coefficient values of the non-zeros. values :: Elem a b => SparseMatrix a b -> Vector b -- | Stores the row (resp. column) indices of the non-zeros. innerIndices :: Elem a b => SparseMatrix a b -> Vector CInt -- | Stores for each column (resp. row) the index of the first non-zero in -- the previous two arrays. outerStarts :: Elem a b => SparseMatrix a b -> Vector CInt -- | Stores the number of non-zeros of each column (resp. row). The word -- inner refers to an inner vector that is a column for a column-major -- matrix, or a row for a row-major matrix. The word outer refers to the -- other direction innerNNZs :: Elem a b => SparseMatrix a b -> Maybe (Vector CInt) -- | Number of columns for the sparse matrix cols :: Elem a b => SparseMatrix a b -> Int -- | Number of rows for the sparse matrix rows :: Elem a b => SparseMatrix a b -> Int -- | Matrix coefficient at given row and col coeff :: Elem a b => Int -> Int -> SparseMatrix a b -> a -- | Matrix coefficient at given row and col (!) :: Elem a b => SparseMatrix a b -> (Int, Int) -> a -- | Construct sparse matrix of given size from the list of triplets (row, -- col, val) fromList :: Elem a b => Int -> Int -> [(Int, Int, a)] -> SparseMatrix a b -- | Convert sparse matrix to the list of triplets (row, col, val). -- Compressed elements will not be included toList :: Elem a b => SparseMatrix a b -> [(Int, Int, a)] -- | Construct sparse matrix of given size from the storable vector of -- triplets (row, col, val) fromVector :: Elem a b => Int -> Int -> Vector (CTriplet b) -> SparseMatrix a b -- | Convert sparse matrix to the storable vector of triplets (row, col, -- val). Compressed elements will not be included toVector :: Elem a b => SparseMatrix a b -> Vector (CTriplet b) -- | Construct sparse matrix of two-dimensional list of values. Matrix -- dimensions will be detected automatically. Zero values will be -- compressed. fromDenseList :: (Elem a b, Eq a) => [[a]] -> SparseMatrix a b -- | Convert sparse matrix to (rows X cols) dense list of values toDenseList :: Elem a b => SparseMatrix a b -> [[a]] -- | Construct sparse matrix from dense matrix. Zero elements will be -- compressed fromMatrix :: Elem a b => Matrix a b -> SparseMatrix a b -- | Construct dense matrix from sparse matrix toMatrix :: Elem a b => SparseMatrix a b -> Matrix a b -- | For vectors, the l2 norm, and for matrices the Frobenius norm. In both -- cases, it consists in the square root of the sum of the square of all -- the matrix entries. For vectors, this is also equals to the square -- root of the dot product of this with itself. norm :: Elem a b => SparseMatrix a b -> a -- | For vectors, the squared l2 norm, and for matrices the Frobenius norm. -- In both cases, it consists in the sum of the square of all the matrix -- entries. For vectors, this is also equals to the dot product of this -- with itself. squaredNorm :: Elem a b => SparseMatrix a b -> a -- | The l2 norm of the matrix using the Blue's algorithm. A Portable -- Fortran Program to Find the Euclidean Norm of a Vector, ACM TOMS, Vol -- 4, Issue 1, 1978. blueNorm :: Elem a b => SparseMatrix a b -> a -- | Extract rectangular block from sparse matrix defined by startRow -- startCol blockRows blockCols block :: Elem a b => Int -> Int -> Int -> Int -> SparseMatrix a b -> SparseMatrix a b -- | Number of non-zeros elements in the sparse matrix nonZeros :: Elem a b => SparseMatrix a b -> Int -- | Minor dimension with respect to the storage order innerSize :: Elem a b => SparseMatrix a b -> Int -- | Major dimension with respect to the storage order outerSize :: Elem a b => SparseMatrix a b -> Int -- | Adding two sparse matrices by adding the corresponding entries -- together. You can use (+) function as well. add :: Elem a b => SparseMatrix a b -> SparseMatrix a b -> SparseMatrix a b -- | Subtracting two sparse matrices by subtracting the corresponding -- entries together. You can use (-) function as well. sub :: Elem a b => SparseMatrix a b -> SparseMatrix a b -> SparseMatrix a b -- | Matrix multiplication. You can use (*) function as well. mul :: Elem a b => SparseMatrix a b -> SparseMatrix a b -> SparseMatrix a b -- | Suppresses all nonzeros which are much smaller than reference under -- the tolerence epsilon pruned :: Elem a b => a -> SparseMatrix a b -> SparseMatrix a b -- | Multiply matrix on a given scalar scale :: Elem a b => a -> SparseMatrix a b -> SparseMatrix a b -- | Transpose of the sparse matrix transpose :: Elem a b => SparseMatrix a b -> SparseMatrix a b -- | Adjoint of the sparse matrix adjoint :: Elem a b => SparseMatrix a b -> SparseMatrix a b -- | The matrix in the compressed format compress :: Elem a b => SparseMatrix a b -> SparseMatrix a b -- | The matrix in the uncompressed mode uncompress :: Elem a b => SparseMatrix a b -> SparseMatrix a b -- | Is this in compressed form? compressed :: Elem a b => SparseMatrix a b -> Bool -- | Encode the sparse matrix as a lazy byte string encode :: Elem a b => SparseMatrix a b -> ByteString -- | Decode sparse matrix from the lazy byte string decode :: Elem a b => ByteString -> SparseMatrix a b -- | Yield a mutable copy of the immutable matrix thaw :: Elem a b => SparseMatrix a b -> IO (IOSparseMatrix a b) -- | Yield an immutable copy of the mutable matrix freeze :: Elem a b => IOSparseMatrix a b -> IO (SparseMatrix a b) -- | Unsafely convert an immutable matrix to a mutable one without copying. -- The immutable matrix may not be used after this operation. unsafeThaw :: Elem a b => SparseMatrix a b -> IO (IOSparseMatrix a b) -- | Unsafe convert a mutable matrix to an immutable one without copying. -- The mutable matrix may not be used after this operation. unsafeFreeze :: Elem a b => IOSparseMatrix a b -> IO (SparseMatrix a b) instance (Data.Eigen.Internal.Elem a b, GHC.Show.Show a) => GHC.Show.Show (Data.Eigen.SparseMatrix.SparseMatrix a b) instance Data.Eigen.Internal.Elem a b => GHC.Num.Num (Data.Eigen.SparseMatrix.SparseMatrix a b) instance Data.Eigen.Internal.Elem a b => Data.Binary.Class.Binary (Data.Eigen.SparseMatrix.SparseMatrix a b) -- | This documentation is based on original Eigen page Solving Sparse -- Linear Systems -- -- Eigen currently provides a limited set of built-in MPL2 compatible -- solvers. They are summarized in the following table: -- --
-- Sparse solver Solver kind Matrix kind Notes -- -- ConjugateGradient Classic iterative CG SPD Recommended for large symmetric -- problems (e.g., 3D Poisson eq.) -- BiCGSTAB Iterative stabilized Square -- bi-conjugate gradient -- SparseLU LU factorization Square Optimized for small and large problems -- with irregular patterns -- SparseQR QR factorization Any, rectangular Recommended for least-square problems, -- has a basic rank-revealing feature ---- -- All these solvers follow the same general concept. Here is a typical -- and general example: -- --
-- let -- a :: SparseMatrixXd -- a = ... -- fill a -- -- b :: SparseMatrixXd -- b = ... -- fill b -- -- validate msg = info >>= (when fail msg) . (/= Success) -- -- // solve Ax = b -- runSolverT solver $ do -- compute a -- validate "decomposition failed" -- -- x <- solve b -- validate "solving failed" -- -- // solve for another right hand side -- x1 <- solve b1 ---- -- In the case where multiple problems with the same sparsity pattern -- have to be solved, then the "compute" step can be decomposed as -- follow: -- --
-- runSolverT solver $ do -- analyzePattern a1 -- factorize a1 -- x1 <- solve b1 -- x2 <- solve b2 -- -- factorize a2 -- x1 <- solve b1 -- x2 <- solve b2 ---- -- Finally, each solver provides some specific features, such as -- determinant, access to the factors, controls of the iterations, and so -- on. module Data.Eigen.SparseLA class Code s => Solver s -- | For direct methods, the solution is computed at the machine precision. class Solver s => DirectSolver s -- | Sometimes, the solution need not be too accurate. In this case, the -- iterative methods are more suitable and the desired accuracy can be -- set before the solve step using setTolerance. class Solver s => IterativeSolver s -- | Ordering methods for sparse matrices. They are typically used to -- reduce the number of elements during the sparse matrix decomposition -- (LLT, LU, QR). Precisely, in a -- preprocessing step, a permutation matrix P is computed using -- those ordering methods and applied to the columns of the matrix. Using -- for instance the sparse Cholesky decomposition, it is expected that -- the nonzeros elements in LLT(A*P) will be much smaller than -- that in LLT(A). data OrderingMethod -- | The column approximate minimum degree ordering The matrix should be in -- column-major and compressed format COLAMDOrdering :: OrderingMethod -- | The natural ordering (identity) NaturalOrdering :: OrderingMethod data Preconditioner -- | A preconditioner based on the digonal entries -- -- It allows to approximately solve for A.x = b problems assuming A is a -- diagonal matrix. In other words, this preconditioner neglects all off -- diagonal entries and, in Eigen's language, solves for: -- A.diagonal().asDiagonal() . x = b This preconditioner is -- suitable for both selfadjoint and general problems. The diagonal -- entries are pre-inverted and stored into a dense vector. -- -- A variant that has yet to be implemented would attempt to preserve the -- norm of each column. DiagonalPreconditioner :: Preconditioner -- | A naive preconditioner which approximates any matrix as the identity -- matrix IdentityPreconditioner :: Preconditioner -- | A conjugate gradient solver for sparse self-adjoint problems. -- -- This class allows to solve for A.x = b sparse linear problems -- using a conjugate gradient algorithm. The sparse matrix A -- must be selfadjoint. -- -- The maximal number of iterations and tolerance value can be controlled -- via the setMaxIterations and setTolerance methods. The -- defaults are the size of the problem for the maximal number of -- iterations and epsilon for the tolerance data ConjugateGradient ConjugateGradient :: Preconditioner -> ConjugateGradient -- | A bi conjugate gradient stabilized solver for sparse square problems. -- -- This class allows to solve for A.x = b sparse linear problems -- using a bi conjugate gradient stabilized algorithm. The vectors -- x and b can be either dense or sparse. -- -- The maximal number of iterations and tolerance value can be controlled -- via the setMaxIterations and setTolerance methods. The -- defaults are the size of the problem for the maximal number of -- iterations and epsilon for the tolerance data BiCGSTAB BiCGSTAB :: Preconditioner -> BiCGSTAB -- | Sparse supernodal LU factorization for general matrices. -- -- This class implements the supernodal LU factorization for general -- matrices. It uses the main techniques from the sequential SuperLU -- package. It handles transparently real and complex arithmetics -- with single and double precision, depending on the scalar type of your -- input matrix. The code has been optimized to provide BLAS-3 operations -- during supernode-panel updates. It benefits directly from the built-in -- high-performant Eigen BLAS routines. Moreover, when the size of a -- supernode is very small, the BLAS calls are avoided to enable a better -- optimization from the compiler. For best performance, you should -- compile it with NDEBUG flag to avoid the numerous bounds checking on -- vectors. -- -- An important parameter of this class is the ordering method. It is -- used to reorder the columns (and eventually the rows) of the matrix to -- reduce the number of new elements that are created during numerical -- factorization. The cheapest method available is COLAMD. See -- OrderingMethods module for the list of built-in and external -- ordering methods. data SparseLU SparseLU :: OrderingMethod -> SparseLU -- | Sparse left-looking rank-revealing QR factorization. -- -- This class implements a left-looking rank-revealing QR decomposition -- of sparse matrices. When a column has a norm less than a given -- tolerance it is implicitly permuted to the end. The QR factorization -- thus obtained is given by A*P = Q*R where R is upper -- triangular or trapezoidal. -- -- P is the column permutation which is the product of the -- fill-reducing and the rank-revealing permutations. -- -- Q is the orthogonal matrix represented as products of -- Householder reflectors. -- -- R is the sparse triangular or trapezoidal matrix. The later -- occurs when A is rank-deficient. data SparseQR SparseQR :: OrderingMethod -> SparseQR data ComputationInfo -- | Computation was successful. Success :: ComputationInfo -- | The provided data did not satisfy the prerequisites. NumericalIssue :: ComputationInfo -- | Iterative procedure did not converge. NoConvergence :: ComputationInfo -- | The inputs are invalid, or the algorithm has been improperly called. -- When assertions are enabled, such errors trigger an error. InvalidInput :: ComputationInfo type SolverT s a b m = ReaderT (s, ForeignPtr (CSolver a b)) m runSolverT :: (Solver s, MonadIO m, Elem a b) => s -> SolverT s a b m c -> m c -- | Initializes the iterative solver for the sparsity pattern of the -- matrix A for further solving Ax=b problems. analyzePattern :: (Solver s, MonadIO m, Elem a b) => SparseMatrix a b -> SolverT s a b m () -- | Initializes the iterative solver with the numerical values of the -- matrix A for further solving Ax=b problems. factorize :: (Solver s, MonadIO m, Elem a b) => SparseMatrix a b -> SolverT s a b m () -- | Initializes the iterative solver with the matrix A for -- further solving Ax=b problems. -- -- The compute method is equivalent to calling both -- analyzePattern and factorize. compute :: (Solver s, MonadIO m, Elem a b) => SparseMatrix a b -> SolverT s a b m () -- | An expression of the solution x of Ax=b using the -- current decomposition of A. solve :: (Solver s, MonadIO m, Elem a b) => SparseMatrix a b -> SolverT s a b m (SparseMatrix a b) -- |