-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Self-Organising Maps.
--
-- A Kohonen Self-organising Map (SOM) maps input patterns onto a regular
-- grid (usually two-dimensional) where each node in the grid is a model
-- of the input data, and does so using a method which ensures that any
-- topological relationships within the input data are also represented
-- in the grid. This implementation supports the use of non-numeric
-- patterns.
--
-- In layman's terms, a SOM can be useful when you you want to discover
-- the underlying structure of some data.
--
-- The userguide is available at
-- https://github.com/mhwombat/som/wiki.
@package som
@version 8.0.4
-- | Tools for identifying patterns in data.
module Data.Datamining.Pattern
adjustNum :: (Num a, Ord a, Eq a) => a -> a -> a -> a
absDifference :: Num a => a -> a -> a
-- | adjustVector target amount vector adjusts each element
-- of vector to move it closer to the corresponding element of
-- target. The amount of adjustment is controlled by the
-- learning rate amount, which is a number between 0 and 1.
-- Larger values of amount permit more adjustment. If
-- amount=1, the result will be identical to the
-- target. If amount=0, the result will be the
-- unmodified pattern. If target is shorter than
-- vector, the result will be the same length as
-- target. If target is longer than vector,
-- the result will be the same length as vector.
adjustVector :: (Num a, Ord a, Eq a) => [a] -> a -> [a] -> [a]
-- | Same as adjustVector, except that the result will
-- always be the same length as vector. This means that if
-- target is shorter than vector, the "leftover"
-- elements of vector will be copied the result, unmodified.
adjustVectorPreserveLength :: (Num a, Ord a, Eq a) => [a] -> a -> [a] -> [a]
-- | Calculates the square of the Euclidean distance between two vectors.
euclideanDistanceSquared :: Num a => [a] -> [a] -> a
magnitudeSquared :: Num a => [a] -> a
-- | A vector that has been normalised, i.e., the magnitude of the vector =
-- 1.
data NormalisedVector a
-- | Normalises a vector
normalise :: Floating a => [a] -> NormalisedVector a
-- | A vector that has been scaled so that all elements in the vector are
-- between zero and one. To scale a set of vectors, use
-- scaleAll. Alternatively, if you can identify a maximum
-- and minimum value for each element in a vector, you can scale
-- individual vectors using scale.
data ScaledVector a
-- | Given a vector qs of pairs of numbers, where each pair
-- represents the maximum and minimum value to be expected at each index
-- in xs, scale qs xs scales the vector
-- xs element by element, mapping the maximum value expected at
-- that index to one, and the minimum value to zero.
scale :: Fractional a => [(a, a)] -> [a] -> ScaledVector a
-- | Scales a set of vectors by determining the maximum and minimum values
-- at each index in the vector, and mapping the maximum value to one, and
-- the minimum value to zero.
scaleAll :: (Fractional a, Ord a) => [[a]] -> [ScaledVector a]
instance Show a => Show (ScaledVector a)
instance Show a => Show (NormalisedVector a)
-- | Tools for identifying patterns in data.
module Data.Datamining.Clustering.Classifier
-- | A machine which learns to classify input patterns. Minimal complete
-- definition: trainBatch, reportAndTrain.
class Classifier (c :: * -> * -> * -> *) v k p where classify c p = f $ differences c p where f [] = error "classifier has no models" f xs = fst $ minimumBy (comparing snd) xs train c p = c' where (_, _, c') = reportAndTrain c p classifyAndTrain c p = (bmu, c') where (bmu, _, c') = reportAndTrain c p diffAndTrain c p = (ds, c') where (_, ds, c') = reportAndTrain c p
toList :: Classifier c v k p => c v k p -> [(k, p)]
numModels :: Classifier c v k p => c v k p -> Int
models :: Classifier c v k p => c v k p -> [p]
differences :: Classifier c v k p => c v k p -> p -> [(k, v)]
classify :: (Classifier c v k p, Ord v) => c v k p -> p -> k
train :: Classifier c v k p => c v k p -> p -> c v k p
trainBatch :: Classifier c v k p => c v k p -> [p] -> c v k p
classifyAndTrain :: Classifier c v k p => c v k p -> p -> (k, c v k p)
diffAndTrain :: Classifier c v k p => c v k p -> p -> ([(k, v)], c v k p)
reportAndTrain :: Classifier c v k p => c v k p -> p -> (k, [(k, v)], c v k p)
-- | A module containing private DSOM internals. Most developers
-- should use DSOM instead. This module is subject to change
-- without notice.
module Data.Datamining.Clustering.DSOMInternal
-- | A Self-Organising Map (DSOM).
--
-- Although DSOM implements GridMap, most users will
-- only need the interface provided by
-- Data.Datamining.Clustering.Classifier. If you chose to use
-- the GridMap functions, please note:
--
--
-- - The functions adjust, and adjustWithKey do not
-- increment the counter. You can do so manually with
-- incrementCounter.
-- - The functions map and mapWithKey are not
-- implemented (they just return an error). It would be
-- problematic to implement them because the input DSOM and the output
-- DSOM would have to have the same Metric type.
--
data DSOM gm x k p
[DSOM] :: gm p -> (x -> x -> x -> x) -> (p -> p -> x) -> (p -> x -> p -> p) -> DSOM gm x k p
-- | Maps patterns to tiles in a regular grid. In the context of a SOM, the
-- tiles are called "nodes"
[gridMap] :: DSOM gm x k p -> gm p
-- | A function which determines the how quickly the SOM learns.
[learningRate] :: DSOM gm x k p -> (x -> x -> x -> x)
-- | A function which compares two patterns and returns a
-- non-negative number representing how different the patterns
-- are. A result of 0 indicates that the patterns are identical.
[difference] :: DSOM gm x k p -> p -> p -> x
-- | A function which updates models. If this function is f, then
-- f target amount pattern returns a modified copy of
-- pattern that is more similar to target than
-- pattern is. The magnitude of the adjustment is controlled by
-- the amount parameter, which should be a number between 0 and
-- 1. Larger values for amount permit greater adjustments. If
-- amount=1, the result should be identical to the
-- target. If amount=0, the result should be the
-- unmodified pattern.
[makeSimilar] :: DSOM gm x k p -> p -> x -> p -> p
withGridMap :: (gm p -> gm p) -> DSOM gm x k p -> DSOM gm x k p
-- | Extracts the grid and current models from the DSOM.
toGridMap :: GridMap gm p => DSOM gm x k p -> gm p
adjustNode :: (FiniteGrid (gm p), GridMap gm p, k ~ Index (gm p), k ~ Index (BaseGrid gm p), Ord k, Num x, Fractional x) => gm p -> (p -> x -> p -> p) -> (p -> p -> x) -> (x -> x -> x) -> p -> k -> k -> (p -> p)
scaleDistance :: (Num a, Fractional a) => Int -> Int -> a
-- | Trains the specified node and the neighbourood around it to better
-- match a target. Most users should use train, which
-- automatically determines the BMU and trains it and its neighbourhood.
trainNeighbourhood :: (FiniteGrid (gm p), GridMap gm p, k ~ Index (gm p), k ~ Index (BaseGrid gm p), Ord k, Num x, Fractional x) => DSOM gm x t p -> k -> p -> DSOM gm x k p
justTrain :: (FiniteGrid (gm p), GridMap gm p, GridMap gm x, k ~ Index (gm p), k ~ Index (gm x), k ~ Index (BaseGrid gm p), k ~ Index (BaseGrid gm x), Ord k, Ord x, Num x, Fractional x) => DSOM gm x t p -> p -> DSOM gm x k p
-- | Configures a learning function that depends not on the time, but on
-- how good a model we already have for the target. If the BMU is an
-- exact match for the target, no learning occurs. Usage is
-- rougierLearningFunction r p, where r is the
-- maximal learning rate (0 <= r <= 1), and p is the
-- elasticity.
--
-- NOTE: When using this learning function, ensure that abs .
-- difference is always between 0 and 1, inclusive. Otherwise you
-- may get invalid learning rates.
rougierLearningFunction :: (Eq a, Ord a, Floating a) => a -> a -> (a -> a -> a -> a)
instance Foldable gm => Foldable (DSOM gm x k)
instance Grid (gm p) => Grid (DSOM gm x k p)
instance (Foldable gm, GridMap gm p, FiniteGrid (BaseGrid gm p)) => GridMap (DSOM gm x k) p
instance (GridMap gm p, k ~ Index (BaseGrid gm p), FiniteGrid (gm p), GridMap gm x, k ~ Index (gm p), k ~ Index (gm x), k ~ Index (BaseGrid gm x), Ord k, Ord x, Num x, Fractional x) => Classifier (DSOM gm) x k p
-- | A modified Kohonen Self-organising Map (SOM) which supports a
-- time-independent learning function. (See SOM for a
-- description of a SOM.)
--
-- References:
--
--
-- - Rougier, N. & Boniface, Y. (2011). Dynamic self-organising
-- map. Neurocomputing, 74 (11), 1840-1847.
-- - Kohonen, T. (1982). Self-organized formation of topologically
-- correct feature maps. Biological Cybernetics, 43 (1), 59–69.
--
module Data.Datamining.Clustering.DSOM
-- | A Self-Organising Map (DSOM).
--
-- Although DSOM implements GridMap, most users will
-- only need the interface provided by
-- Data.Datamining.Clustering.Classifier. If you chose to use
-- the GridMap functions, please note:
--
--
-- - The functions adjust, and adjustWithKey do not
-- increment the counter. You can do so manually with
-- incrementCounter.
-- - The functions map and mapWithKey are not
-- implemented (they just return an error). It would be
-- problematic to implement them because the input DSOM and the output
-- DSOM would have to have the same Metric type.
--
data DSOM gm x k p
[DSOM] :: gm p -> (x -> x -> x -> x) -> (p -> p -> x) -> (p -> x -> p -> p) -> DSOM gm x k p
-- | Maps patterns to tiles in a regular grid. In the context of a SOM, the
-- tiles are called "nodes"
[gridMap] :: DSOM gm x k p -> gm p
-- | A function which determines the how quickly the SOM learns.
[learningRate] :: DSOM gm x k p -> (x -> x -> x -> x)
-- | A function which compares two patterns and returns a
-- non-negative number representing how different the patterns
-- are. A result of 0 indicates that the patterns are identical.
[difference] :: DSOM gm x k p -> p -> p -> x
-- | A function which updates models. If this function is f, then
-- f target amount pattern returns a modified copy of
-- pattern that is more similar to target than
-- pattern is. The magnitude of the adjustment is controlled by
-- the amount parameter, which should be a number between 0 and
-- 1. Larger values for amount permit greater adjustments. If
-- amount=1, the result should be identical to the
-- target. If amount=0, the result should be the
-- unmodified pattern.
[makeSimilar] :: DSOM gm x k p -> p -> x -> p -> p
-- | Extracts the grid and current models from the DSOM.
toGridMap :: GridMap gm p => DSOM gm x k p -> gm p
-- | Configures a learning function that depends not on the time, but on
-- how good a model we already have for the target. If the BMU is an
-- exact match for the target, no learning occurs. Usage is
-- rougierLearningFunction r p, where r is the
-- maximal learning rate (0 <= r <= 1), and p is the
-- elasticity.
--
-- NOTE: When using this learning function, ensure that abs .
-- difference is always between 0 and 1, inclusive. Otherwise you
-- may get invalid learning rates.
rougierLearningFunction :: (Eq a, Ord a, Floating a) => a -> a -> (a -> a -> a -> a)
-- | Trains the specified node and the neighbourood around it to better
-- match a target. Most users should use train, which
-- automatically determines the BMU and trains it and its neighbourhood.
trainNeighbourhood :: (FiniteGrid (gm p), GridMap gm p, k ~ Index (gm p), k ~ Index (BaseGrid gm p), Ord k, Num x, Fractional x) => DSOM gm x t p -> k -> p -> DSOM gm x k p
-- | A module containing private SSOM internals. Most developers
-- should use SSOM instead. This module is subject to change
-- without notice.
module Data.Datamining.Clustering.SSOMInternal
-- | A typical learning function for classifiers. exponential r0
-- d t returns the learning rate at time t. When t =
-- 0, the learning rate is r0. Over time the learning rate
-- decays exponentially; the decay rate is d. Normally the
-- parameters are chosen such that:
--
--
-- - 0 < r0 < 1
-- - 0 < d
--
--
-- where << means "is much smaller than" (not the Haskell
-- << operator!)
exponential :: Floating a => a -> a -> a -> a
-- | A Simplified Self-Organising Map (SSOM). x is the type of the
-- learning rate and the difference metric. t is the type of the
-- counter. k is the type of the model indices. p is
-- the type of the input patterns and models.
data SSOM t x k p
[SSOM] :: Map k p -> (t -> x) -> (p -> p -> x) -> (p -> x -> p -> p) -> t -> SSOM t x k p
-- | Maps patterns to nodes.
[sMap] :: SSOM t x k p -> Map k p
-- | A function which determines the learning rate for a node. The input
-- parameter indicates how many patterns (or pattern batches) have
-- previously been presented to the classifier. Typically this is used to
-- make the learning rate decay over time. The output is the learning
-- rate for that node (the amount by which the node's model should be
-- updated to match the target). The learning rate should be between zero
-- and one.
[learningRate] :: SSOM t x k p -> t -> x
-- | A function which compares two patterns and returns a
-- non-negative number representing how different the patterns
-- are. A result of 0 indicates that the patterns are identical.
[difference] :: SSOM t x k p -> p -> p -> x
-- | A function which updates models. For example, if this function is
-- f, then f target amount pattern returns a modified
-- copy of pattern that is more similar to target than
-- pattern is. The magnitude of the adjustment is controlled by
-- the amount parameter, which should be a number between 0 and
-- 1. Larger values for amount permit greater adjustments. If
-- amount=1, the result should be identical to the
-- target. If amount=0, the result should be the
-- unmodified pattern.
[makeSimilar] :: SSOM t x k p -> p -> x -> p -> p
-- | A counter used as a "time" parameter. If you create the SSOM with a
-- counter value 0, and don't directly modify it, then the
-- counter will represent the number of patterns that this SSOM has
-- classified.
[counter] :: SSOM t x k p -> t
-- | Extracts the current models from the SSOM. A synonym for
-- sMap.
toMap :: SSOM t x k p -> Map k p
-- | Trains the specified node to better match a target. Most users should
-- use train, which automatically determines the BMU and
-- trains it.
trainNode :: (Num t, Ord k) => SSOM t x k p -> k -> p -> SSOM t x k p
incrementCounter :: Num t => SSOM t x k p -> SSOM t x k p
justTrain :: (Num t, Ord k, Ord x) => SSOM t x k p -> p -> SSOM t x k p
instance Selector S1_0_4SSOM
instance Selector S1_0_3SSOM
instance Selector S1_0_2SSOM
instance Selector S1_0_1SSOM
instance Selector S1_0_0SSOM
instance Constructor C1_0SSOM
instance Datatype D1SSOM
instance Generic (SSOM t x k p)
instance (Num t, Ord x, Num x, Ord k) => Classifier (SSOM t) x k p
-- | A Simplified Self-organising Map (SSOM). An SSOM maps input patterns
-- onto a set, where each element in the set is a model of the input
-- data. An SSOM is like a Kohonen Self-organising Map (SOM), except that
-- instead of a grid, it uses a simple set of unconnected models. Since
-- the models are unconnected, only the model that best matches the input
-- is ever updated. This makes it faster, however, topological
-- relationships within the input data are not preserved. This
-- implementation supports the use of non-numeric patterns.
--
-- In layman's terms, a SSOM can be useful when you you want to build a
-- set of models on some data. A tutorial is available at
-- https://github.com/mhwombat/som/wiki.
--
-- References:
--
--
-- - de Buitléir, Amy, Russell, Michael and Daly, Mark. (2012). Wains:
-- A pattern-seeking artificial life species. Artificial Life, 18 (4),
-- 399-423.
-- - Kohonen, T. (1982). Self-organized formation of topologically
-- correct feature maps. Biological Cybernetics, 43 (1), 59–69.
--
module Data.Datamining.Clustering.SSOM
-- | A Simplified Self-Organising Map (SSOM). x is the type of the
-- learning rate and the difference metric. t is the type of the
-- counter. k is the type of the model indices. p is
-- the type of the input patterns and models.
data SSOM t x k p
[SSOM] :: Map k p -> (t -> x) -> (p -> p -> x) -> (p -> x -> p -> p) -> t -> SSOM t x k p
-- | Maps patterns to nodes.
[sMap] :: SSOM t x k p -> Map k p
-- | A function which determines the learning rate for a node. The input
-- parameter indicates how many patterns (or pattern batches) have
-- previously been presented to the classifier. Typically this is used to
-- make the learning rate decay over time. The output is the learning
-- rate for that node (the amount by which the node's model should be
-- updated to match the target). The learning rate should be between zero
-- and one.
[learningRate] :: SSOM t x k p -> t -> x
-- | A function which compares two patterns and returns a
-- non-negative number representing how different the patterns
-- are. A result of 0 indicates that the patterns are identical.
[difference] :: SSOM t x k p -> p -> p -> x
-- | A function which updates models. For example, if this function is
-- f, then f target amount pattern returns a modified
-- copy of pattern that is more similar to target than
-- pattern is. The magnitude of the adjustment is controlled by
-- the amount parameter, which should be a number between 0 and
-- 1. Larger values for amount permit greater adjustments. If
-- amount=1, the result should be identical to the
-- target. If amount=0, the result should be the
-- unmodified pattern.
[makeSimilar] :: SSOM t x k p -> p -> x -> p -> p
-- | A counter used as a "time" parameter. If you create the SSOM with a
-- counter value 0, and don't directly modify it, then the
-- counter will represent the number of patterns that this SSOM has
-- classified.
[counter] :: SSOM t x k p -> t
-- | Extracts the current models from the SSOM. A synonym for
-- sMap.
toMap :: SSOM t x k p -> Map k p
-- | A typical learning function for classifiers. exponential r0
-- d t returns the learning rate at time t. When t =
-- 0, the learning rate is r0. Over time the learning rate
-- decays exponentially; the decay rate is d. Normally the
-- parameters are chosen such that:
--
--
-- - 0 < r0 < 1
-- - 0 < d
--
--
-- where << means "is much smaller than" (not the Haskell
-- << operator!)
exponential :: Floating a => a -> a -> a -> a
-- | Trains the specified node to better match a target. Most users should
-- use train, which automatically determines the BMU and
-- trains it.
trainNode :: (Num t, Ord k) => SSOM t x k p -> k -> p -> SSOM t x k p
-- | A module containing private SOM internals. Most developers
-- should use SOM instead. This module is subject to change
-- without notice.
module Data.Datamining.Clustering.SOMInternal
-- | A typical learning function for classifiers.
-- decayingGaussian r0 rf w0 wf tf returns a bell
-- curve-shaped function. At time zero, the maximum learning rate
-- (applied to the BMU) is r0, and the neighbourhood width is
-- w0. Over time the bell curve shrinks and the learning rate
-- tapers off, until at time tf, the maximum learning rate
-- (applied to the BMU) is rf, and the neighbourhood width is
-- wf. Normally the parameters should be chosen such that:
--
--
-- - 0 < rf << r0 < 1
-- - 0 < wf << w0
-- - 0 < tf
--
--
-- where << means "is much smaller than" (not the Haskell
-- << operator!)
decayingGaussian :: Floating x => x -> x -> x -> x -> x -> x -> x -> x
-- | A learning function that only updates the BMU and has a constant
-- learning rate.
stepFunction :: (Num d, Fractional x, Eq d) => x -> t -> d -> x
-- | A learning function that updates all nodes with the same, constant
-- learning rate. This can be useful for testing.
constantFunction :: x -> t -> d -> x
-- | A Self-Organising Map (SOM).
--
-- Although SOM implements GridMap, most users will
-- only need the interface provided by
-- Data.Datamining.Clustering.Classifier. If you chose to use
-- the GridMap functions, please note:
--
--
-- - The functions adjust, and adjustWithKey do not
-- increment the counter. You can do so manually with
-- incrementCounter.
-- - The functions map and mapWithKey are not
-- implemented (they just return an error). It would be
-- problematic to implement them because the input SOM and the output SOM
-- would have to have the same Metric type.
--
data SOM t d gm x k p
[SOM] :: gm p -> (t -> d -> x) -> (p -> p -> x) -> (p -> x -> p -> p) -> t -> SOM t d gm x k p
-- | Maps patterns to tiles in a regular grid. In the context of a SOM, the
-- tiles are called "nodes"
[gridMap] :: SOM t d gm x k p -> gm p
-- | A function which determines the how quickly the SOM learns. For
-- example, if the function is f, then f t d returns
-- the learning rate for a node. The parameter t indicates how
-- many patterns (or pattern batches) have previously been presented to
-- the classifier. Typically this is used to make the learning rate decay
-- over time. The parameter d is the grid distance from the node
-- being updated to the BMU (Best Matching Unit). The output is the
-- learning rate for that node (the amount by which the node's model
-- should be updated to match the target). The learning rate should be
-- between zero and one.
[learningRate] :: SOM t d gm x k p -> t -> d -> x
-- | A function which compares two patterns and returns a
-- non-negative number representing how different the patterns
-- are. A result of 0 indicates that the patterns are identical.
[difference] :: SOM t d gm x k p -> p -> p -> x
-- | A function which updates models. If this function is f, then
-- f target amount pattern returns a modified copy of
-- pattern that is more similar to target than
-- pattern is. The magnitude of the adjustment is controlled by
-- the amount parameter, which should be a number between 0 and
-- 1. Larger values for amount permit greater adjustments. If
-- amount=1, the result should be identical to the
-- target. If amount=0, the result should be the
-- unmodified pattern.
[makeSimilar] :: SOM t d gm x k p -> p -> x -> p -> p
-- | A counter used as a "time" parameter. If you create the SOM with a
-- counter value 0, and don't directly modify it, then the
-- counter will represent the number of patterns that this SOM has
-- classified.
[counter] :: SOM t d gm x k p -> t
withGridMap :: (gm p -> gm p) -> SOM t d gm x k p -> SOM t d gm x k p
currentLearningFunction :: (Num t) => SOM t d gm x k p -> (d -> x)
-- | Extracts the grid and current models from the SOM. A synonym for
-- gridMap.
toGridMap :: GridMap gm p => SOM t d gm x k p -> gm p
adjustNode :: (Grid g, k ~ Index g, Num t) => g -> (t -> x) -> (p -> x -> p -> p) -> p -> k -> k -> p -> p
-- | Trains the specified node and the neighbourood around it to better
-- match a target. Most users should use train, which
-- automatically determines the BMU and trains it and its neighbourhood.
trainNeighbourhood :: (Grid (gm p), GridMap gm p, Index (BaseGrid gm p) ~ Index (gm p), Num t, Num x, Num d) => SOM t d gm x k p -> Index (gm p) -> p -> SOM t d gm x k p
incrementCounter :: Num t => SOM t d gm x k p -> SOM t d gm x k p
justTrain :: (Ord x, Grid (gm p), GridMap gm x, GridMap gm p, Index (BaseGrid gm x) ~ Index (gm p), Index (BaseGrid gm p) ~ Index (gm p), Num t, Num x, Num d) => SOM t d gm x k p -> p -> SOM t d gm x k p
instance Selector S1_0_4SOM
instance Selector S1_0_3SOM
instance Selector S1_0_2SOM
instance Selector S1_0_1SOM
instance Selector S1_0_0SOM
instance Constructor C1_0SOM
instance Datatype D1SOM
instance Generic (SOM t d gm x k p)
instance Foldable gm => Foldable (SOM t d gm x k)
instance Grid (gm p) => Grid (SOM t d gm x k p)
instance (Foldable gm, GridMap gm p, Grid (BaseGrid gm p)) => GridMap (SOM t d gm x k) p
instance (GridMap gm p, k ~ Index (BaseGrid gm p), Grid (gm p), GridMap gm x, k ~ Index (gm p), k ~ Index (BaseGrid gm x), Num t, Ord x, Num x, Num d) => Classifier (SOM t d gm) x k p
-- | A Kohonen Self-organising Map (SOM). A SOM maps input patterns onto a
-- regular grid (usually two-dimensional) where each node in the grid is
-- a model of the input data, and does so using a method which ensures
-- that any topological relationships within the input data are also
-- represented in the grid. This implementation supports the use of
-- non-numeric patterns.
--
-- In layman's terms, a SOM can be useful when you you want to discover
-- the underlying structure of some data. A tutorial is available at
-- https://github.com/mhwombat/som/wiki.
--
-- NOTES:
--
--
-- - Version 5.0 fixed a bug in the decayingGaussian
-- function. If you use defaultSOM (which uses this
-- function), your SOM should now learn more quickly.
-- - The gaussian function has been removed because it is not
-- as useful for SOMs as I originally thought. It was originally designed
-- to be used as a factor in a learning function. However, in most cases
-- the user will want to introduce a time decay into the exponent, rather
-- than simply multiply by a factor.
--
--
-- References:
--
--
-- - Kohonen, T. (1982). Self-organized formation of topologically
-- correct feature maps. Biological Cybernetics, 43 (1), 59–69.
--
module Data.Datamining.Clustering.SOM
-- | A Self-Organising Map (SOM).
--
-- Although SOM implements GridMap, most users will
-- only need the interface provided by
-- Data.Datamining.Clustering.Classifier. If you chose to use
-- the GridMap functions, please note:
--
--
-- - The functions adjust, and adjustWithKey do not
-- increment the counter. You can do so manually with
-- incrementCounter.
-- - The functions map and mapWithKey are not
-- implemented (they just return an error). It would be
-- problematic to implement them because the input SOM and the output SOM
-- would have to have the same Metric type.
--
data SOM t d gm x k p
[SOM] :: gm p -> (t -> d -> x) -> (p -> p -> x) -> (p -> x -> p -> p) -> t -> SOM t d gm x k p
-- | Maps patterns to tiles in a regular grid. In the context of a SOM, the
-- tiles are called "nodes"
[gridMap] :: SOM t d gm x k p -> gm p
-- | A function which determines the how quickly the SOM learns. For
-- example, if the function is f, then f t d returns
-- the learning rate for a node. The parameter t indicates how
-- many patterns (or pattern batches) have previously been presented to
-- the classifier. Typically this is used to make the learning rate decay
-- over time. The parameter d is the grid distance from the node
-- being updated to the BMU (Best Matching Unit). The output is the
-- learning rate for that node (the amount by which the node's model
-- should be updated to match the target). The learning rate should be
-- between zero and one.
[learningRate] :: SOM t d gm x k p -> t -> d -> x
-- | A function which compares two patterns and returns a
-- non-negative number representing how different the patterns
-- are. A result of 0 indicates that the patterns are identical.
[difference] :: SOM t d gm x k p -> p -> p -> x
-- | A function which updates models. If this function is f, then
-- f target amount pattern returns a modified copy of
-- pattern that is more similar to target than
-- pattern is. The magnitude of the adjustment is controlled by
-- the amount parameter, which should be a number between 0 and
-- 1. Larger values for amount permit greater adjustments. If
-- amount=1, the result should be identical to the
-- target. If amount=0, the result should be the
-- unmodified pattern.
[makeSimilar] :: SOM t d gm x k p -> p -> x -> p -> p
-- | A counter used as a "time" parameter. If you create the SOM with a
-- counter value 0, and don't directly modify it, then the
-- counter will represent the number of patterns that this SOM has
-- classified.
[counter] :: SOM t d gm x k p -> t
-- | Extracts the grid and current models from the SOM. A synonym for
-- gridMap.
toGridMap :: GridMap gm p => SOM t d gm x k p -> gm p
-- | A typical learning function for classifiers.
-- decayingGaussian r0 rf w0 wf tf returns a bell
-- curve-shaped function. At time zero, the maximum learning rate
-- (applied to the BMU) is r0, and the neighbourhood width is
-- w0. Over time the bell curve shrinks and the learning rate
-- tapers off, until at time tf, the maximum learning rate
-- (applied to the BMU) is rf, and the neighbourhood width is
-- wf. Normally the parameters should be chosen such that:
--
--
-- - 0 < rf << r0 < 1
-- - 0 < wf << w0
-- - 0 < tf
--
--
-- where << means "is much smaller than" (not the Haskell
-- << operator!)
decayingGaussian :: Floating x => x -> x -> x -> x -> x -> x -> x -> x
-- | A learning function that only updates the BMU and has a constant
-- learning rate.
stepFunction :: (Num d, Fractional x, Eq d) => x -> t -> d -> x
-- | A learning function that updates all nodes with the same, constant
-- learning rate. This can be useful for testing.
constantFunction :: x -> t -> d -> x
-- | Trains the specified node and the neighbourood around it to better
-- match a target. Most users should use train, which
-- automatically determines the BMU and trains it and its neighbourhood.
trainNeighbourhood :: (Grid (gm p), GridMap gm p, Index (BaseGrid gm p) ~ Index (gm p), Num t, Num x, Num d) => SOM t d gm x k p -> Index (gm p) -> p -> SOM t d gm x k p