Safe Haskell | None |
---|---|
Language | Haskell2010 |
Synopsis
- data T typ sh prob = Cons {
- initial :: Vector sh prob
- transition :: Square sh prob
- distribution :: T typ sh prob
- mapStatesShape :: (EmissionProb typ, C sh0, C sh1) => (sh0 -> sh1) -> T typ sh0 prob -> T typ sh1 prob
- emission :: (EmissionProb typ, C sh, Eq sh, Real prob) => T typ sh prob -> Emission typ prob -> Vector sh prob
- forward :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission, Traversable f) => T typ sh prob -> T f emission -> prob
- alpha :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission, Traversable f) => T typ sh prob -> T f emission -> T f (Vector sh prob)
- backward :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission, Traversable f) => T typ sh prob -> T f emission -> prob
- beta :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission, Traversable f) => T typ sh prob -> f emission -> T f (Vector sh prob)
- alphaBeta :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission, Traversable f) => T typ sh prob -> T f emission -> (prob, T f (Vector sh prob), T f (Vector sh prob))
- biscaleTransition :: (EmissionProb typ, C sh, Eq sh, Real prob) => T typ sh prob -> Emission typ prob -> Vector sh prob -> Vector sh prob -> Square sh prob
- xiFromAlphaBeta :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission) => T typ sh prob -> prob -> T [] emission -> T [] (Vector sh prob) -> T [] (Vector sh prob) -> [Square sh prob]
- zetaFromXi :: (C sh, Eq sh, Real prob) => [Square sh prob] -> [Vector sh prob]
- zetaFromAlphaBeta :: (C sh, Eq sh, Real prob) => prob -> T [] (Vector sh prob) -> T [] (Vector sh prob) -> T [] (Vector sh prob)
- reveal :: (EmissionProb typ, InvIndexed sh, Eq sh, Index sh ~ state, Emission typ prob ~ emission, Real prob, Traversable f) => T typ sh prob -> T f emission -> T f state
- revealGen :: (EmissionProb typ, InvIndexed sh, Eq sh, Index sh ~ state, Emission typ prob ~ emission, Real prob, Traversable f) => (Vector (Deferred sh) prob -> Vector (Deferred sh) prob) -> T typ sh prob -> T f emission -> T f state
- revealStorable :: (EmissionProb typ, InvIndexed sh, Eq sh, Index sh ~ state, Storable state, Emission typ prob ~ emission, Real prob, Traversable f) => (Vector sh prob -> Vector sh prob) -> T typ sh prob -> T f emission -> T f state
- matrixMaxMul :: (InvIndexed sh, Eq sh, Index sh ~ ix, Storable ix, Real a) => Square sh a -> Vector sh a -> (Vector sh ix, Vector sh a)
- data Trained typ sh prob = Trained {
- trainedInitial :: Vector sh prob
- trainedTransition :: Square sh prob
- trainedDistribution :: Trained typ sh prob
- sumTransitions :: (C sh, Eq sh, Real e) => T typ sh e -> [Square sh e] -> Square sh e
- trainUnsupervised :: (Estimate typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission) => T typ sh prob -> T [] emission -> Trained typ sh prob
- mergeTrained :: (Estimate typ, C sh, Eq sh, Real prob) => Trained typ sh prob -> Trained typ sh prob -> Trained typ sh prob
- toCells :: (ToCSV typ, Indexed sh, Real prob, Show prob) => T typ sh prob -> [[String]]
- parseCSV :: (FromCSV typ, C stateSh, Eq stateSh, Real prob, Read prob) => (Int -> stateSh) -> CSVParser (T typ stateSh prob)
Documentation
A Hidden Markov model consists of a number of (hidden) states
and a set of emissions.
There is a vector for the initial probability of each state
and a matrix containing the probability for switching
from one state to another one.
The distribution
field points to probability distributions
that associate every state with emissions of different probability.
Famous distribution instances are discrete and Gaussian distributions.
See Math.HiddenMarkovModel.Distribution for details.
The transition matrix is transposed with respect to popular HMM descriptions. But I think this is the natural orientation, because this way you can write "transition matrix times probability column vector".
Cons | |
|
Instances
mapStatesShape :: (EmissionProb typ, C sh0, C sh1) => (sh0 -> sh1) -> T typ sh0 prob -> T typ sh1 prob Source #
emission :: (EmissionProb typ, C sh, Eq sh, Real prob) => T typ sh prob -> Emission typ prob -> Vector sh prob Source #
forward :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission, Traversable f) => T typ sh prob -> T f emission -> prob Source #
alpha :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission, Traversable f) => T typ sh prob -> T f emission -> T f (Vector sh prob) Source #
backward :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission, Traversable f) => T typ sh prob -> T f emission -> prob Source #
beta :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission, Traversable f) => T typ sh prob -> f emission -> T f (Vector sh prob) Source #
alphaBeta :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission, Traversable f) => T typ sh prob -> T f emission -> (prob, T f (Vector sh prob), T f (Vector sh prob)) Source #
biscaleTransition :: (EmissionProb typ, C sh, Eq sh, Real prob) => T typ sh prob -> Emission typ prob -> Vector sh prob -> Vector sh prob -> Square sh prob Source #
xiFromAlphaBeta :: (EmissionProb typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission) => T typ sh prob -> prob -> T [] emission -> T [] (Vector sh prob) -> T [] (Vector sh prob) -> [Square sh prob] Source #
zetaFromAlphaBeta :: (C sh, Eq sh, Real prob) => prob -> T [] (Vector sh prob) -> T [] (Vector sh prob) -> T [] (Vector sh prob) Source #
reveal :: (EmissionProb typ, InvIndexed sh, Eq sh, Index sh ~ state, Emission typ prob ~ emission, Real prob, Traversable f) => T typ sh prob -> T f emission -> T f state Source #
In constrast to Math.HiddenMarkovModel.reveal this does not normalize the vector. This is slightly simpler but for long sequences the product of probabilities might be smaller than the smallest representable number.
revealGen :: (EmissionProb typ, InvIndexed sh, Eq sh, Index sh ~ state, Emission typ prob ~ emission, Real prob, Traversable f) => (Vector (Deferred sh) prob -> Vector (Deferred sh) prob) -> T typ sh prob -> T f emission -> T f state Source #
revealStorable :: (EmissionProb typ, InvIndexed sh, Eq sh, Index sh ~ state, Storable state, Emission typ prob ~ emission, Real prob, Traversable f) => (Vector sh prob -> Vector sh prob) -> T typ sh prob -> T f emission -> T f state Source #
matrixMaxMul :: (InvIndexed sh, Eq sh, Index sh ~ ix, Storable ix, Real a) => Square sh a -> Vector sh a -> (Vector sh ix, Vector sh a) Source #
data Trained typ sh prob Source #
A trained model is a temporary form of a Hidden Markov model
that we need during the training on multiple training sequences.
It allows to collect knowledge over many sequences with mergeTrained
,
even with mixed supervised and unsupervised training.
You finish the training by converting the trained model
back to a plain modul using finishTraining
.
You can create a trained model in three ways:
- supervised training using an emission sequence with associated states,
- unsupervised training using an emission sequence and an existing Hidden Markov Model,
- derive it from state sequence patterns, cf. Math.HiddenMarkovModel.Pattern.
Trained | |
|
Instances
(C sh, Storable prob, Show sh, Show prob, Show typ) => Show (Trained typ sh prob) Source # | |
(Estimate typ, C sh, Eq sh, Real prob) => Semigroup (Trained typ sh prob) Source # | |
(NFData typ, NFData sh, C sh, NFData prob, Storable prob) => NFData (Trained typ sh prob) Source # | |
Defined in Math.HiddenMarkovModel.Private |
trainUnsupervised :: (Estimate typ, C sh, Eq sh, Real prob, Emission typ prob ~ emission) => T typ sh prob -> T [] emission -> Trained typ sh prob Source #
Baum-Welch algorithm