-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | (Fast) Dynamic Time Warping -- -- This package implements dynamic time warping as described here -- http://en.wikipedia.org/w/index.php?title=Dynamic_time_warping @package dtw @version 1.0.3.0 -- | This module implements dynamic time warping as described here: -- http://en.wikipedia.org/w/index.php?title=Dynamic_time_warping -- -- Additionally fastDtw is implemented as described in the paper: -- "FastDTW: Toward Accurate Dynamic Time Warping in Linear Time and -- Space" by Stan Salvador and Philip Chan. -- -- Please note that fastDtw is only an approximative solution. If -- you need the optimal solution and can bear with the heavily increased -- demand both in cpu and in memory you should use dtwMemo or -- dtwMemoWindowed. -- --

Example

-- --
--   >>> -- create two sample datasets
--   
--   >>> let as = [ sin x | x <- [0,0.1..pi] ]
--   
--   >>> let bs = [ sin (x+0.1) | x <- [0,0.1..pi] ]
--   
--   >>> -- define a cost function between two datapoints
--   
--   >>> let dist x y = abs (x-y)
--   
--   >>> -- define a function that will half the size of a dataset (see below)
--   
--   >>> let shrink xs = case xs of (a:b:cs) -> (a+b)/2 : shrink cs; a:[] -> [a]; [] -> []
--   
--   >>> -- calculate the cost with fastDtw and dtwMemo for comparison
--   
--   >>> cost $ fastDtw dist shrink 2 as bs :: Float
--   0.19879311
--   
--   >>> cost $ dtwMemo (\x y -> abs (x-y)) as bs :: Float
--   0.19879311
--   
-- --

Some words on the shrink function

-- -- Care must be taken when choosing a shrink function. It's vital that -- the resolution is halfed, this is not exactly a problem with the -- algorithm but with the implementation. The lower resolution dataset -- should be an honest representation of the higher resolution dataset. -- For starters binning as in the example above should suffice. module Data.DTW -- | this is the naive implementation of dynamic time warping no caching -- what so ever is taking place this should not be used and is just used -- as a reference for the other implementations dtwNaive :: (Ord c, Fractional c, DataSet a, DataSet b) => (Item a -> Item b -> c) -> a -> b -> c -- | this is the "standard" implementation of dynamic time warping O(N^2) -- is achieved by memoization of previous results dtwMemo :: (Ord c, Fractional c, DataSet a, DataSet b) => (Item a -> Item b -> c) -> a -> b -> Result c -- | this is the "fast" implementation of dynamic time warping as per the -- authors this methods calculates a good approximate result in O(N), -- depending on the usecase the windowsize should be tweaked fastDtw :: (Ord c, Fractional c, DataSet a) => (Item a -> Item a -> c) -> (a -> a) -> Int -> a -> a -> Result c -- | a generic dataset is basically just an indexing function | and an -- indicator of the dataset size class DataSet dataset where type Item dataset :: * where { type family Item dataset :: *; } ix :: DataSet dataset => dataset -> Int -> Item dataset len :: DataSet dataset => dataset -> Int data Result a Result :: !a -> !Path -> Result a [cost] :: Result a -> !a [path] :: Result a -> !Path type Path = [Index] type Index = (Int, Int) instance GHC.Classes.Eq a => GHC.Classes.Eq (Data.DTW.Result a) instance GHC.Read.Read a => GHC.Read.Read (Data.DTW.Result a) instance GHC.Show.Show a => GHC.Show.Show (Data.DTW.Result a) instance Data.DTW.DataSet (Data.Sequence.Seq a) instance Data.DTW.DataSet [a] instance Data.DTW.DataSet (Data.Vector.Vector a) instance Data.Vector.Unboxed.Base.Unbox a => Data.DTW.DataSet (Data.Vector.Unboxed.Base.Vector a) instance Foreign.Storable.Storable a => Data.DTW.DataSet (Data.Vector.Storable.Vector a)