Copyright | (c) Justus Sagemüller 2016 |
---|---|
License | GPL v3 |
Maintainer | (@) sagemueller $ geo.uni-koeln.de |
Stability | experimental |
Portability | portable |
Safe Haskell | None |
Language | Haskell2010 |
- newtype LinearFunction s v w = LinearFunction {
- getLinearFunction :: v -> w
- type (-+>) v w = LinearFunction (Scalar w) v w
- type Bilinear v w y = LinearFunction (Scalar v) v (LinearFunction (Scalar v) w y)
- newtype LinearMap s v w = LinearMap {
- getLinearMap :: TensorProduct (DualVector v) w
- type (+>) v w = LinearMap (Scalar v) v w
- (⊕) :: (u +> w) -> (v +> w) -> (u, v) +> w
- (>+<) :: (u +> w) -> (v +> w) -> (u, v) +> w
- adjoint :: (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v +> DualVector w) -+> (w +> DualVector v)
- (<.>^) :: LSpace v => DualVector v -> v -> Scalar v
- newtype Tensor s v w = Tensor {
- getTensorProduct :: TensorProduct v w
- type (⊗) v w = Tensor (Scalar v) v w
- (⊗) :: (LSpace v, LSpace w, Scalar w ~ Scalar v) => v -> w -> v ⊗ w
- newtype Norm v = Norm {
- applyNorm :: v -+> DualVector v
- type Seminorm v = Norm v
- spanNorm :: LSpace v => [DualVector v] -> Seminorm v
- euclideanNorm :: HilbertSpace v => Norm v
- (|$|) :: (LSpace v, Floating (Scalar v)) => Seminorm v -> v -> Scalar v
- normSq :: LSpace v => Seminorm v -> v -> Scalar v
- (<$|) :: LSpace v => Norm v -> v -> DualVector v
- scaleNorm :: LSpace v => Scalar v -> Norm v -> Norm v
- normSpanningSystem :: SimpleSpace v => Norm v -> [DualVector v]
- normSpanningSystem' :: (FiniteDimensional v, IEEE (Scalar v)) => Norm v -> [v]
- type Variance v = Norm (DualVector v)
- spanVariance :: LSpace v => [v] -> Variance v
- dualNorm :: SimpleSpace v => Norm v -> Variance v
- dependence :: (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => Variance (u, v) -> u +> v
- densifyNorm :: LSpace v => Norm v -> Norm v
- (\$) :: (FiniteDimensional u, FiniteDimensional v, SemiInner v, Scalar u ~ Scalar v, Fractional' (Scalar v)) => (u +> v) -> v -> u
- pseudoInverse :: (FiniteDimensional u, FiniteDimensional v, SemiInner v, Scalar u ~ Scalar v, Fractional' (Scalar v)) => (u +> v) -> v +> u
- roughDet :: (FiniteDimensional v, IEEE (Scalar v)) => (v +> v) -> Scalar v
- eigen :: (FiniteDimensional v, HilbertSpace v, IEEE (Scalar v)) => (v +> v) -> [(Scalar v, v)]
- constructEigenSystem :: (LSpace v, RealFloat (Scalar v)) => Norm v -> Scalar v -> (v -+> v) -> [v] -> [[Eigenvector v]]
- roughEigenSystem :: (FiniteDimensional v, IEEE (Scalar v)) => Norm v -> (v +> v) -> [Eigenvector v]
- finishEigenSystem :: (LSpace v, RealFloat (Scalar v)) => Norm v -> [Eigenvector v] -> [Eigenvector v]
- data Eigenvector v = Eigenvector {
- ev_Eigenvalue :: Scalar v
- ev_Eigenvector :: v
- ev_FunctionApplied :: v
- ev_Deviation :: v
- ev_Badness :: Scalar v
- type LSpace v = (LSpace' v, Num''' (Scalar v))
- class VectorSpace v => TensorSpace v where
- type TensorProduct v w :: *
- zeroTensor :: (LSpace w, Scalar w ~ Scalar v) => v ⊗ w
- toFlatTensor :: v -+> (v ⊗ Scalar v)
- fromFlatTensor :: (v ⊗ Scalar v) -+> v
- addTensors :: (LSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w
- subtractTensors :: (LSpace v, LSpace w, Num''' (Scalar v), Scalar w ~ Scalar v) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w
- scaleTensor :: (LSpace w, Scalar w ~ Scalar v) => Bilinear (Scalar v) (v ⊗ w) (v ⊗ w)
- negateTensor :: (LSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -+> (v ⊗ w)
- tensorProduct :: (LSpace w, Scalar w ~ Scalar v) => Bilinear v w (v ⊗ w)
- transposeTensor :: (LSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -+> (w ⊗ v)
- fmapTensor :: (LSpace w, LSpace x, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear (w -+> x) (v ⊗ w) (v ⊗ x)
- fzipTensorWith :: (LSpace u, LSpace w, LSpace x, Scalar u ~ Scalar v, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear ((w, x) -+> u) (v ⊗ w, v ⊗ x) (v ⊗ u)
- coerceFmapTensorProduct :: Functor p => p v -> Coercion a b -> Coercion (TensorProduct v a) (TensorProduct v b)
- class (TensorSpace v, TensorSpace (DualVector v), Num' (Scalar v), Scalar (DualVector v) ~ Scalar v) => LinearSpace v where
- type DualVector v :: *
- linearId :: v +> v
- idTensor :: LSpace v => v ⊗ DualVector v
- sampleLinearFunction :: (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v -+> w) -+> (v +> w)
- toLinearForm :: LSpace v => DualVector v -+> (v +> Scalar v)
- fromLinearForm :: LSpace v => (v +> Scalar v) -+> DualVector v
- coerceDoubleDual :: Coercion v (DualVector (DualVector v))
- blockVectSpan :: (LSpace w, Scalar w ~ Scalar v) => w -+> (v ⊗ (v +> w))
- blockVectSpan' :: (LSpace v, LSpace w, Num''' (Scalar v), Scalar v ~ Scalar w) => w -+> (v +> (v ⊗ w))
- trace :: LSpace v => (v +> v) -+> Scalar v
- contractTensorMap :: (LSpace w, Scalar w ~ Scalar v) => (v +> (v ⊗ w)) -+> w
- contractMapTensor :: (LSpace w, Scalar w ~ Scalar v) => (v ⊗ (v +> w)) -+> w
- contractFnTensor :: (LSpace v, LSpace w, Scalar w ~ Scalar v) => (v ⊗ (v -+> w)) -+> w
- contractTensorFn :: (LSpace v, LSpace w, Scalar w ~ Scalar v) => (v -+> (v ⊗ w)) -+> w
- contractTensorWith :: (LSpace v, LSpace w, Scalar w ~ Scalar v) => Bilinear (v ⊗ w) (DualVector w) v
- contractLinearMapAgainst :: (LSpace w, Scalar w ~ Scalar v) => Bilinear (v +> w) (w -+> v) (Scalar v)
- applyDualVector :: LSpace v => Bilinear (DualVector v) v (Scalar v)
- applyLinear :: (LSpace w, Scalar w ~ Scalar v) => Bilinear (v +> w) v w
- composeLinear :: (LSpace w, LSpace x, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear (w +> x) (v +> w) (v +> x)
- class LSpace v => SemiInner v where
- dualBasisCandidates :: [(Int, v)] -> Forest (Int, DualVector v)
- cartesianDualBasisCandidates :: [DualVector v] -> (v -> [ℝ]) -> [(Int, v)] -> Forest (Int, DualVector v)
- class (LSpace v, LSpace (Scalar v)) => FiniteDimensional v where
- data SubBasis v :: *
- entireBasis :: SubBasis v
- enumerateSubBasis :: SubBasis v -> [v]
- subbasisDimension :: SubBasis v -> Int
- decomposeLinMap :: (LSpace w, Scalar w ~ Scalar v) => (v +> w) -> (SubBasis v, DList w)
- decomposeLinMapWithin :: (LSpace w, Scalar w ~ Scalar v) => SubBasis v -> (v +> w) -> Either (SubBasis v, DList w) (DList w)
- recomposeSB :: SubBasis v -> [Scalar v] -> (v, [Scalar v])
- recomposeSBTensor :: (FiniteDimensional w, Scalar w ~ Scalar v) => SubBasis v -> SubBasis w -> [Scalar v] -> (v ⊗ w, [Scalar v])
- recomposeLinMap :: (LSpace w, Scalar w ~ Scalar v) => SubBasis v -> [w] -> (v +> w, [w])
- recomposeContraLinMap :: (LinearSpace w, Scalar w ~ Scalar v, Functor f) => (f (Scalar w) -> w) -> f (DualVector v) -> v +> w
- recomposeContraLinMapTensor :: (FiniteDimensional u, LinearSpace w, Scalar u ~ Scalar v, Scalar w ~ Scalar v, Functor f) => (f (Scalar w) -> w) -> f (DualVector v ⊗ DualVector u) -> (v ⊗ u) +> w
- uncanonicallyFromDual :: DualVector v -+> v
- uncanonicallyToDual :: v -+> DualVector v
- addV :: AdditiveGroup w => LinearFunction s (w, w) w
- scale :: VectorSpace v => Bilinear (Scalar v) v v
- inner :: InnerSpace v => Bilinear v v (Scalar v)
- flipBilin :: Bilinear v w y -> Bilinear w v y
- bilinearFunction :: (v -> w -> y) -> Bilinear v w y
- type DualSpace v = v +> Scalar v
- riesz :: (FiniteDimensional v, InnerSpace v) => DualVector v -+> v
- coRiesz :: (LSpace v, Num''' (Scalar v), InnerSpace v) => v -+> DualVector v
- showsPrecAsRiesz :: (FiniteDimensional v, InnerSpace v, Show v, HasBasis (Scalar v), Basis (Scalar v) ~ ()) => Int -> DualSpace v -> ShowS
- (.<) :: (FiniteDimensional v, Num''' (Scalar v), InnerSpace v, LSpace w, HasBasis w, Scalar v ~ Scalar w) => Basis w -> v -> v +> w
- type HilbertSpace v = (LSpace v, InnerSpace v, DualVector v ~ v)
- type SimpleSpace v = (FiniteDimensional v, FiniteDimensional (DualVector v), SemiInner v, SemiInner (DualVector v), RealFrac' (Scalar v))
- type Num' s = (Num s, VectorSpace s, Scalar s ~ s)
- type Num'' s = (Num' s, LinearSpace s)
- type Num''' s = (Num s, InnerSpace s, Scalar s ~ s, LSpace' s, DualVector s ~ s)
- type Fractional' s = (Fractional s, Eq s, VectorSpace s, Scalar s ~ s)
- type Fractional'' s = (Fractional' s, LSpace s)
- type RealFrac' s = (IEEE s, HilbertSpace s, Scalar s ~ s)
- type RealFloat' s = (RealFrac' s, Floating s)
- relaxNorm :: SimpleSpace v => Norm v -> [v] -> Norm v
- transformNorm :: (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v +> w) -> Norm w -> Norm v
- transformVariance :: (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v +> w) -> Variance v -> Variance w
- findNormalLength :: RealFrac' s => Norm s -> Maybe s
- normalLength :: RealFrac' s => Norm s -> s
- summandSpaceNorms :: (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => Norm (u, v) -> (Norm u, Norm v)
- sumSubspaceNorms :: (LSpace u, LSpace v, Scalar u ~ Scalar v) => Norm u -> Norm v -> Norm (u, v)
- sharedNormSpanningSystem :: SimpleSpace v => Norm v -> Norm v -> [(DualVector v, Scalar v)]
Linear maps
This library deals with linear functions, i.e. functions f :: v -> w
that fulfill
f $ μ^*
u^+^
v ≡ μ ^* f u ^+^ f v ∀ u,v :: v; μ ::Scalar
v
Such functions form a cartesian monoidal category (in maths called
VectK).
This is implemented by PreArrow
, which is the
preferred interface for dealing with these mappings. The basic
“matrix operations” are then:
- Identity matrix:
id
- Matrix addition:
^+^
(linear maps form an ordinary vector space) - Matrix-matrix multiplication:
<<<
(or>>>
or.
) - Matrix-vector multiplication:
$
- Vertical matrix concatenation:
&&&
- Horizontal matrix concatenation: '⊕' (aka
>+<
)
But linear mappings need not necessarily be implemented as matrices:
Function implementation
newtype LinearFunction s v w Source
A linear map, represented simply as a Haskell function tagged with the type of scalar with respect to which it is linear. Many (sparse) linear mappings can actually be calculated much more efficiently if you don't represent them with any kind of matrix, but just as a function (which is after all, mathematically speaking, what a linear map foremostly is).
However, if you sum up many LinearFunction
s – which you can
simply do with the VectorSpace
instance – they will become ever
slower to calculate, because the summand-functions are actually computed
individually and only the results summed. That's where
LinearMap
is generally preferrable.
You can always convert between these equivalent categories using arr
.
LinearFunction | |
|
type (-+>) v w = LinearFunction (Scalar w) v w Source
Infix synonym of LinearFunction
, without explicit mention of the scalar type.
type Bilinear v w y = LinearFunction (Scalar v) v (LinearFunction (Scalar v) w y) Source
A bilinear function is a linear function mapping to a linear function, or equivalently a 2-argument function that's linear in each argument independently. Note that this can not be uncurried to a linear function with a tuple argument (this would not be linear but quadratic).
Tensor implementation
newtype LinearMap s v w Source
The tensor product between one space's dual space and another space is the space spanned by vector–dual-vector pairs, in bra-ket notation written as
m = ∑ |w⟩⟨v|
Any linear mapping can be written as such a (possibly infinite) sum. The
TensorProduct
data structure only stores the linear independent parts
though; for simple finite-dimensional spaces this means e.g.
effectively boils down to an ordinary matrix type, namely an array of
column-vectors LinearMap
ℝ ℝ³ ℝ³|w⟩
.
(The ⟨v|
dual-vectors are then simply assumed to come from the canonical basis.)
For bigger spaces, the tensor product may be implemented in a more efficient
sparse structure; this can be defined in the TensorSpace
instance.
LinearMap | |
|
type (+>) v w = LinearMap (Scalar v) v w Source
Infix synonym for LinearMap
, without explicit mention of the scalar type.
(⊕) :: (u +> w) -> (v +> w) -> (u, v) +> w infixr 6 Source
The dual operation to the tuple constructor, or rather to the
&&&
fanout operation: evaluate two (linear) functions in parallel
and sum up the results.
The typical use is to concatenate “row vectors” in a matrix definition.
adjoint :: (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v +> DualVector w) -+> (w +> DualVector v) Source
For real matrices, this boils down to transpose
.
For free complex spaces it also incurs complex conjugation.
The signature can also be understood as
adjoint :: (v +> w) -> (DualVector w +> DualVector v)
Or
adjoint :: (DualVector v +> DualVector w) -> (w +> v)
But not (v+>w) -> (w+>v)
, in general (though in a Hilbert space, this too is
equivalent, via riesz
isomorphism).
Dual vectors
A
is a linear functional or
linear form on the vector space DualVector
vv
,
i.e. it is a linear function from the vector space into its scalar field.
However, these functions form themselves a vector space, known as the dual space.
In particular, the dual space of any InnerSpace
is isomorphic to the
space itself.
(More precisely: the continuous dual space of a
Hilbert space is isomorphic to
that Hilbert space itself; see the riesz
isomorphism.)
As a matter of fact, in many applications, no distinction is made between a
space and its dual. Indeed, we have for the basic LinearSpace
instances
, and DualVector
v ~ v<.>^
is simply defined as a scalar product.
In this case, a general LinearMap
is just a tensor product / matrix.
However, scalar products are often not as natural as they are made to look:
- A scalar product is only preserved under orthogonal transformations. It is not preserved under scalings, and certainly not under general linear transformations. This is very important in applications such as relativity theory (here, people talk about covariant vs contravariant tensors), but also relevant for more mundane manifolds like sphere surfaces: on such a surface, the natural symmetry transformations do generally not preserve a scalar product you might define.
- There may be more than one meaningful scalar product. For instance, the Sobolev space of weakly differentiable functions also permits the 𝐿² scalar product – each has different and useful properties.
Neither of this is a problem if we keep the dual space a separate type. Effectively, this enables the type system to prevent you from writing code that does not behave natural (i.e. that depends on a concrete choice of basis / scalar product).
For cases when you do have some given notion of orientation/scale in a vector space
and need it for an algorithm, you can always provide a Norm
, which is essentially
a reified scalar product.
Note that DualVector (DualVector v) ~ v
in any LSpace
: the double-dual
space is naturally isomorphic to the original space, by way of
v<.>^
dv ≡ dv<.>^
v
(<.>^) :: LSpace v => DualVector v -> v -> Scalar v Source
Tensor spaces
Tensor products are most interesting because they can be used to implement linear mappings, but they also form a useful vector space on their own right.
Tensor | |
|
type (⊗) v w = Tensor (Scalar v) v w infixl 7 Source
Infix synonym for Tensor
, without explicit mention of the scalar type.
(⊗) :: (LSpace v, LSpace w, Scalar w ~ Scalar v) => v -> w -> v ⊗ w infixl 7 Source
Infix version of tensorProduct
.
Norms
A norm is a way to quantify the magnitude/length of different vectors, even if they point in different directions.
In an InnerSpace
, a norm is always given by the scalar product,
but there are spaces without a canonical scalar product (or situations
in which this scalar product does not give the metric you want). Hence,
we let the functions like constructEigenSystem
, which depend on a norm
for orthonormalisation, accept a Norm
as an extra argument instead of
requiring InnerSpace
.
A positive (semi)definite symmetric bilinear form. This gives rise to a norm thus:
Norm
n|$|
v = √(n v<.>^
v)
Strictly speaking, this type is neither strong enough nor general enough to
deserve the name Norm
: it includes proper Seminorm
s (i.e. m|$|v ≡ 0
does
not guarantee v == zeroV
), but not actual norms such as the ℓ₁-norm on ℝⁿ
(Taxcab norm) or the supremum norm.
However, 𝐿₂-like norms are the only ones that can really be formulated without
any basis reference; and guaranteeing positive definiteness through the type
system is scarcely practical.
Norm | |
|
type Seminorm v = Norm v Source
A “norm” that may explicitly be degenerate, with m|$|v ⩵ 0
for some v ≠ zeroV
.
spanNorm :: LSpace v => [DualVector v] -> Seminorm v Source
A seminorm defined by
‖v‖ = √(∑ᵢ ⟨dᵢ|v⟩²)
for some dual vectors dᵢ
. If given a complete basis of the dual space,
this generates a proper Norm
.
If the dᵢ
are a complete orthonormal system, you get the euclideanNorm
(in an inefficient form).
euclideanNorm :: HilbertSpace v => Norm v Source
The canonical standard norm (2-norm) on inner-product / Hilbert spaces.
(|$|) :: (LSpace v, Floating (Scalar v)) => Seminorm v -> v -> Scalar v infixr 0 Source
Use a Norm
to measure the length / norm of a vector.
euclideanNorm
|$| v ≡ √(v<.>
v)
normSq :: LSpace v => Seminorm v -> v -> Scalar v Source
The squared norm. More efficient than |$|
because that needs to take
the square root.
(<$|) :: LSpace v => Norm v -> v -> DualVector v infixr 0 Source
“Partially apply” a norm, yielding a dual vector (i.e. a linear form that accepts the second argument of the scalar product).
(euclideanNorm
<$|
v)<.>^
w ≡ v<.>
w
scaleNorm :: LSpace v => Scalar v -> Norm v -> Norm v Source
Scale the result of a norm with the absolute of the given number.
scaleNorm μ n |$| v = abs μ * (n|$|v)
Equivalently, this scales the norm's unit ball by the reciprocal of that factor.
normSpanningSystem :: SimpleSpace v => Norm v -> [DualVector v] Source
normSpanningSystem' :: (FiniteDimensional v, IEEE (Scalar v)) => Norm v -> [v] Source
Variances
type Variance v = Norm (DualVector v) Source
A multidimensional variance of points v
with some distribution can be
considered a norm on the dual space, quantifying for a dual vector dv
the
expectation value of (dv.^v)^2
.
spanVariance :: LSpace v => [v] -> Variance v Source
dualNorm :: SimpleSpace v => Norm v -> Variance v Source
A proper norm induces a norm on the dual space – the “reciprocal norm”. (The orthonormal systems of the norm and its dual are mutually conjugate.) The dual norm of a seminorm is undefined.
dependence :: (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => Variance (u, v) -> u +> v Source
Interpret a variance as a covariance between two subspaces, and
normalise it by the variance on u
. The result is effectively
the linear regression coefficient of a simple regression of the vectors
spanning the variance.
Utility
densifyNorm :: LSpace v => Norm v -> Norm v Source
spanNorm
/ spanVariance
are inefficient if the number of vectors
is similar to the dimension of the space, or even larger than it.
Use this function to optimise the underlying operator to a dense
matrix representation.
Solving linear equations
(\$) :: (FiniteDimensional u, FiniteDimensional v, SemiInner v, Scalar u ~ Scalar v, Fractional' (Scalar v)) => (u +> v) -> v -> u infixr 0 Source
Inverse function application, aka solving of a linear system:
f\$
f$
v ≡ v f$
f\$
u ≡ u
If f
does not have full rank, the behaviour is undefined (but we expect
it to be reasonably well-behaved or even give a least-squares solution).
If you want to solve for multiple RHS vectors, be sure to partially apply this operator to the linear map, like
map (f \$
) [v₁, v₂, ...]
Since most of the work is actually done in triangularising the operator, this may be much faster than
[f\$
v₁, f\$
v₂, ...]
pseudoInverse :: (FiniteDimensional u, FiniteDimensional v, SemiInner v, Scalar u ~ Scalar v, Fractional' (Scalar v)) => (u +> v) -> v +> u Source
roughDet :: (FiniteDimensional v, IEEE (Scalar v)) => (v +> v) -> Scalar v Source
Approximation of the determinant.
Eigenvalue problems
eigen :: (FiniteDimensional v, HilbertSpace v, IEEE (Scalar v)) => (v +> v) -> [(Scalar v, v)] Source
Simple automatic finding of the eigenvalues and -vectors of a Hermitian operator, in reasonable approximation.
This works by spanning a QR-stabilised Krylov basis with constructEigenSystem
until it is complete (roughEigenSystem
), and then properly decoupling the
system with finishEigenSystem
(based on two iterations of shifted Givens rotations).
This function is a tradeoff in performance vs. accuracy. Use constructEigenSystem
and finishEigenSystem
directly for more quickly computing a (perhaps incomplete)
approximation, or for more precise results.
:: (LSpace v, RealFloat (Scalar v)) | |
=> Norm v | The notion of orthonormality. |
-> Scalar v | Error bound for deviations from eigen-ness. |
-> (v -+> v) | Operator to calculate the eigensystem of. Must be Hermitian WRT the scalar product defined by the given metric. |
-> [v] | Starting vector(s) for the power method. |
-> [[Eigenvector v]] | Infinite sequence of ever more accurate approximations to the eigensystem of the operator. |
Lazily compute the eigenbasis of a linear map. The algorithm is essentially a hybrid of Lanczos/Arnoldi style Krylov-spanning and QR-diagonalisation, which we don't do separately but interleave at each step.
The size of the eigen-subbasis increases with each step until the space's dimension is reached. (But the algorithm can also be used for infinite-dimensional spaces.)
roughEigenSystem :: (FiniteDimensional v, IEEE (Scalar v)) => Norm v -> (v +> v) -> [Eigenvector v] Source
Find a system of vectors that approximate the eigensytem, in the sense that: each true eigenvalue is represented by an approximate one, and that is closer to the true value than all the other approximate EVs.
This function does not make any guarantees as to how well a single eigenvalue is approximated, though.
finishEigenSystem :: (LSpace v, RealFloat (Scalar v)) => Norm v -> [Eigenvector v] -> [Eigenvector v] Source
data Eigenvector v Source
Eigenvector | |
|
The classes of suitable vector spaces
type LSpace v = (LSpace' v, Num''' (Scalar v)) Source
The workhorse of this package: most functions here work on vector
spaces that fulfill the
constraint. In summary, this is:LSpace
v
- A
VectorSpace
whoseScalar
is aNum'''
(i.e. a number type that has itself all the vector-space instances). - You have an implementation for
, for any other spaceTensorProduct
v ww
. - You have a
DualVector
space that fulfills
.DualVector
(DualVector
v) ~ v
To make a new space of yours an LSpace
, you must define instances of
TensorSpace
and LinearSpace
.
class VectorSpace v => TensorSpace v where Source
zeroTensor, toFlatTensor, fromFlatTensor, addTensors, scaleTensor, negateTensor, tensorProduct, transposeTensor, fmapTensor, fzipTensorWith, coerceFmapTensorProduct
type TensorProduct v w :: * Source
The internal representation of a Tensor
product.
For euclidean spaces, this is generally constructed by replacing each s
scalar field in the v
vector with an entire w
vector. I.e., you have
then a “nested vector” or, if v
is a DualVector
/ “row vector”, a matrix.
zeroTensor :: (LSpace w, Scalar w ~ Scalar v) => v ⊗ w Source
toFlatTensor :: v -+> (v ⊗ Scalar v) Source
fromFlatTensor :: (v ⊗ Scalar v) -+> v Source
addTensors :: (LSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w Source
subtractTensors :: (LSpace v, LSpace w, Num''' (Scalar v), Scalar w ~ Scalar v) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w Source
scaleTensor :: (LSpace w, Scalar w ~ Scalar v) => Bilinear (Scalar v) (v ⊗ w) (v ⊗ w) Source
negateTensor :: (LSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -+> (v ⊗ w) Source
tensorProduct :: (LSpace w, Scalar w ~ Scalar v) => Bilinear v w (v ⊗ w) Source
transposeTensor :: (LSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -+> (w ⊗ v) Source
fmapTensor :: (LSpace w, LSpace x, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear (w -+> x) (v ⊗ w) (v ⊗ x) Source
fzipTensorWith :: (LSpace u, LSpace w, LSpace x, Scalar u ~ Scalar v, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear ((w, x) -+> u) (v ⊗ w, v ⊗ x) (v ⊗ u) Source
coerceFmapTensorProduct :: Functor p => p v -> Coercion a b -> Coercion (TensorProduct v a) (TensorProduct v b) Source
Num''' s => TensorSpace (ZeroDim s) Source | |
(Num''' (Scalar v), LSpace u, LSpace v, (~) * (Scalar u) (Scalar v)) => TensorSpace (u, v) Source | |
(LSpace u, LSpace v, (~) * (Scalar u) s, (~) * (Scalar v) s) => TensorSpace (LinearFunction s u v) Source | |
(Num''' s, LSpace u, LSpace v, (~) * (Scalar u) s, (~) * (Scalar v) s) => TensorSpace (Tensor s u v) Source | |
(Num''' s, LSpace u, LSpace v, (~) * (Scalar u) s, (~) * (Scalar v) s) => TensorSpace (LinearMap s u v) Source |
class (TensorSpace v, TensorSpace (DualVector v), Num' (Scalar v), Scalar (DualVector v) ~ Scalar v) => LinearSpace v where Source
The class of vector spaces v
for which
is well-implemented.LinearMap
s v w
linearId, coerceDoubleDual, blockVectSpan, contractTensorMap, contractMapTensor, contractLinearMapAgainst, applyDualVector, applyLinear, composeLinear
type DualVector v :: * Source
Suitable representation of a linear map from the space v
to its field.
For the usual euclidean spaces, you can just define
.
(In this case, a dual vector will be just a “row vector” if you consider
DualVector
v = vv
-vectors as “column vectors”. LinearMap
will then effectively have
a matrix layout.)
idTensor :: LSpace v => v ⊗ DualVector v Source
sampleLinearFunction :: (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v -+> w) -+> (v +> w) Source
toLinearForm :: LSpace v => DualVector v -+> (v +> Scalar v) Source
fromLinearForm :: LSpace v => (v +> Scalar v) -+> DualVector v Source
coerceDoubleDual :: Coercion v (DualVector (DualVector v)) Source
blockVectSpan :: (LSpace w, Scalar w ~ Scalar v) => w -+> (v ⊗ (v +> w)) Source
blockVectSpan' :: (LSpace v, LSpace w, Num''' (Scalar v), Scalar v ~ Scalar w) => w -+> (v +> (v ⊗ w)) Source
trace :: LSpace v => (v +> v) -+> Scalar v Source
contractTensorMap :: (LSpace w, Scalar w ~ Scalar v) => (v +> (v ⊗ w)) -+> w Source
contractMapTensor :: (LSpace w, Scalar w ~ Scalar v) => (v ⊗ (v +> w)) -+> w Source
contractFnTensor :: (LSpace v, LSpace w, Scalar w ~ Scalar v) => (v ⊗ (v -+> w)) -+> w Source
contractTensorFn :: (LSpace v, LSpace w, Scalar w ~ Scalar v) => (v -+> (v ⊗ w)) -+> w Source
contractTensorWith :: (LSpace v, LSpace w, Scalar w ~ Scalar v) => Bilinear (v ⊗ w) (DualVector w) v Source
contractLinearMapAgainst :: (LSpace w, Scalar w ~ Scalar v) => Bilinear (v +> w) (w -+> v) (Scalar v) Source
applyDualVector :: LSpace v => Bilinear (DualVector v) v (Scalar v) Source
applyLinear :: (LSpace w, Scalar w ~ Scalar v) => Bilinear (v +> w) v w Source
composeLinear :: (LSpace w, LSpace x, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear (w +> x) (v +> w) (v +> x) Source
Num''' s => LinearSpace (ZeroDim s) Source | |
(LinearSpace u, LinearSpace (DualVector u), (~) * (DualVector (DualVector u)) u, LinearSpace v, LinearSpace (DualVector v), (~) * (DualVector (DualVector v)) v, (~) * (Scalar u) (Scalar v), Num''' (Scalar u)) => LinearSpace (u, v) Source | |
(LSpace u, LSpace v, (~) * (Scalar u) s, (~) * (Scalar v) s) => LinearSpace (LinearFunction s u v) Source | |
(Num''' s, LSpace u, LSpace v, (~) * (Scalar u) s, (~) * (Scalar v) s) => LinearSpace (Tensor s u v) Source | |
(Num''' s, LSpace u, LSpace v, (~) * (Scalar u) s, (~) * (Scalar v) s) => LinearSpace (LinearMap s u v) Source |
Orthonormal systems
class LSpace v => SemiInner v where Source
SemiInner
is the class of vector spaces with finite subspaces in which
you can define a basis that can be used to project from the whole space
into the subspace. The usual application is for using a kind of
Galerkin method to
give an approximate solution (see \$
) to a linear equation in a possibly
infinite-dimensional space.
Of course, this also works for spaces which are already finite-dimensional themselves.
dualBasisCandidates :: [(Int, v)] -> Forest (Int, DualVector v) Source
Lazily enumerate choices of a basis of functionals that can be made dual
to the given vectors, in order of preference (which roughly means, large in
the normal direction.) I.e., if the vector 𝑣
is assigned early to the
dual vector 𝑣'
, then (𝑣' $ 𝑣)
should be large and all the other products
comparably small.
The purpose is that we should be able to make this basis orthonormal with a ~Gaussian-elimination approach, in a way that stays numerically stable. This is otherwise known as the choice of a pivot element.
For simple finite-dimensional array-vectors, you can easily define this
method using cartesianDualBasisCandidates
.
(Fractional'' s, SemiInner s) => SemiInner (V0 s) Source | |
(Fractional'' s, Ord s, SemiInner s) => SemiInner (V1 s) Source | |
(Fractional'' s, SemiInner s) => SemiInner (ZeroDim s) Source | |
(SemiInner u, SemiInner v, (~) * (Scalar u) (Scalar v)) => SemiInner (u, v) Source | |
(LSpace u, FiniteDimensional (DualVector u), SemiInner (DualVector u), SemiInner v, FiniteDimensional v, (~) * (Scalar u) s, (~) * (Scalar v) s, RealFrac' s) => SemiInner (Tensor s u v) Source | |
(SemiInner u, FiniteDimensional u, (~) * (Scalar u) s, SemiInner v, FiniteDimensional v, (~) * (Scalar v) s, RealFrac' s) => SemiInner (LinearMap s u v) Source |
cartesianDualBasisCandidates Source
:: [DualVector v] | Set of canonical basis functionals. |
-> (v -> [ℝ]) | Decompose a vector in absolute value components. the list indices should correspond to those in the functional list. |
-> [(Int, v)] -> Forest (Int, DualVector v) | Suitable definition of |
Finite baseis
class (LSpace v, LSpace (Scalar v)) => FiniteDimensional v where Source
entireBasis, enumerateSubBasis, decomposeLinMap, decomposeLinMapWithin, recomposeSB, recomposeSBTensor, recomposeLinMap, recomposeContraLinMap, recomposeContraLinMapTensor, uncanonicallyFromDual, uncanonicallyToDual
Whereas Basis
-values refer to a single basis vector, a single
SubBasis
value represents a collection of such basis vectors,
which can be used to associate a vector with a list of coefficients.
For spaces with a canonical finite basis, SubBasis
does not actually
need to contain any information, it can simply have the full finite
basis as its only value. Even for large sparse spaces, it should only
have a very coarse structure that can be shared by many vectors.
entireBasis :: SubBasis v Source
enumerateSubBasis :: SubBasis v -> [v] Source
subbasisDimension :: SubBasis v -> Int Source
decomposeLinMap :: (LSpace w, Scalar w ~ Scalar v) => (v +> w) -> (SubBasis v, DList w) Source
Split up a linear map in “column vectors” WRT some suitable basis.
decomposeLinMapWithin :: (LSpace w, Scalar w ~ Scalar v) => SubBasis v -> (v +> w) -> Either (SubBasis v, DList w) (DList w) Source
Expand in the given basis, if possible. Else yield a superbasis of the given one, in which this is possible, and the decomposition therein.
recomposeSB :: SubBasis v -> [Scalar v] -> (v, [Scalar v]) Source
Assemble a vector from coefficients in some basis. Return any excess coefficients.
recomposeSBTensor :: (FiniteDimensional w, Scalar w ~ Scalar v) => SubBasis v -> SubBasis w -> [Scalar v] -> (v ⊗ w, [Scalar v]) Source
recomposeLinMap :: (LSpace w, Scalar w ~ Scalar v) => SubBasis v -> [w] -> (v +> w, [w]) Source
recomposeContraLinMap :: (LinearSpace w, Scalar w ~ Scalar v, Functor f) => (f (Scalar w) -> w) -> f (DualVector v) -> v +> w Source
Given a function that interprets a coefficient-container as a vector representation, build a linear function mapping to that space.
recomposeContraLinMapTensor :: (FiniteDimensional u, LinearSpace w, Scalar u ~ Scalar v, Scalar w ~ Scalar v, Functor f) => (f (Scalar w) -> w) -> f (DualVector v ⊗ DualVector u) -> (v ⊗ u) +> w Source
uncanonicallyFromDual :: DualVector v -+> v Source
The existance of a finite basis gives us an isomorphism between a space and its dual space. Note that this isomorphism is not natural (i.e. it depends on the actual choice of basis, unlike everything else in this library).
uncanonicallyToDual :: v -+> DualVector v Source
(Num''' s, LinearSpace s) => FiniteDimensional (V0 s) Source | |
(Num''' s, LSpace s) => FiniteDimensional (V1 s) Source | |
(Num''' s, LSpace s) => FiniteDimensional (V2 s) Source | |
(Num''' s, LSpace s) => FiniteDimensional (V3 s) Source | |
(Num''' s, LSpace s) => FiniteDimensional (V4 s) Source | |
Num''' s => FiniteDimensional (ZeroDim s) Source | |
(FiniteDimensional u, FiniteDimensional v, (~) * (Scalar u) (Scalar v)) => FiniteDimensional (u, v) Source | |
(FiniteDimensional u, FiniteDimensional v, (~) * (Scalar u) s, (~) * (Scalar v) s, Fractional' (Scalar v)) => FiniteDimensional (Tensor s u v) Source | |
(LSpace u, FiniteDimensional (DualVector u), FiniteDimensional v, (~) * (Scalar u) s, (~) * (Scalar v) s, Fractional' (Scalar v)) => FiniteDimensional (LinearMap s u v) Source |
Utility
Linear primitives
addV :: AdditiveGroup w => LinearFunction s (w, w) w Source
scale :: VectorSpace v => Bilinear (Scalar v) v v Source
inner :: InnerSpace v => Bilinear v v (Scalar v) Source
bilinearFunction :: (v -> w -> y) -> Bilinear v w y Source
Hilbert space operations
riesz :: (FiniteDimensional v, InnerSpace v) => DualVector v -+> v Source
The Riesz representation theorem provides an isomorphism between a Hilbert space and its (continuous) dual space.
coRiesz :: (LSpace v, Num''' (Scalar v), InnerSpace v) => v -+> DualVector v Source
showsPrecAsRiesz :: (FiniteDimensional v, InnerSpace v, Show v, HasBasis (Scalar v), Basis (Scalar v) ~ ()) => Int -> DualSpace v -> ShowS Source
Functions are generally a pain to display, but since linear functionals
in a Hilbert space can be represented by vectors in that space,
this can be used for implementing a Show
instance.
(.<) :: (FiniteDimensional v, Num''' (Scalar v), InnerSpace v, LSpace w, HasBasis w, Scalar v ~ Scalar w) => Basis w -> v -> v +> w infixl 7 Source
Outer product of a general v
-vector and a basis element from w
.
Note that this operation is in general pretty inefficient; it is
provided mostly to lay out matrix definitions neatly.
Constraint synonyms
type HilbertSpace v = (LSpace v, InnerSpace v, DualVector v ~ v) Source
type SimpleSpace v = (FiniteDimensional v, FiniteDimensional (DualVector v), SemiInner v, SemiInner (DualVector v), RealFrac' (Scalar v)) Source
type Num' s = (Num s, VectorSpace s, Scalar s ~ s) Source
type Num'' s = (Num' s, LinearSpace s) Source
type Num''' s = (Num s, InnerSpace s, Scalar s ~ s, LSpace' s, DualVector s ~ s) Source
type Fractional' s = (Fractional s, Eq s, VectorSpace s, Scalar s ~ s) Source
type Fractional'' s = (Fractional' s, LSpace s) Source
type RealFrac' s = (IEEE s, HilbertSpace s, Scalar s ~ s) Source
type RealFloat' s = (RealFrac' s, Floating s) Source
Misc
relaxNorm :: SimpleSpace v => Norm v -> [v] -> Norm v Source
Modify a norm in such a way that the given vectors lie within its unit ball. (Not optimally – the unit ball may be bigger than necessary.)
transformVariance :: (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v +> w) -> Variance v -> Variance w Source
findNormalLength :: RealFrac' s => Norm s -> Maybe s Source
The unique positive number whose norm is 1 (if the norm is not constant zero).
normalLength :: RealFrac' s => Norm s -> s Source
Unsafe version of findNormalLength
, only works reliable if the norm
is actually positive definite.
summandSpaceNorms :: (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => Norm (u, v) -> (Norm u, Norm v) Source
sumSubspaceNorms :: (LSpace u, LSpace v, Scalar u ~ Scalar v) => Norm u -> Norm v -> Norm (u, v) Source
sharedNormSpanningSystem :: SimpleSpace v => Norm v -> Norm v -> [(DualVector v, Scalar v)] Source