-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Machine Learning Toolbox
--
-- Please see README.md
@package mltool
@version 0.1.0.0
-- | Common type definitions used in all modules.
module MachineLearning.Types
-- | Scalar Type (Double)
type R = R
-- | Vector Types (of Doubles)
type Vector = Vector R
-- | Matrix Types (of Doubles)
type Matrix = Matrix R
-- | Regularization.
module MachineLearning.Regularization
-- | Regularization
data Regularization
-- | No regularization
RegNone :: Regularization
-- | L2 Regularization
L2 :: R -> Regularization
-- | Calculates regularization for Model.cost function. It takes
-- regularization parameter and theta.
costReg :: Regularization -> Vector -> R
-- | Calculates regularization for Model.gradient function. It takes
-- regularization parameter and theta.
gradientReg :: Regularization -> Vector -> Vector
-- | Regression Model type class.
module MachineLearning.Model
class Model a
-- | Hypothesis function, a.k.a. score function (for lassifition problem)
-- Takes X (m x n) and theta (n x 1), returns y (m x 1).
hypothesis :: Model a => a -> Matrix -> Vector -> Vector
-- | Cost function J(Theta), a.k.a. loss function. It takes regularizarion
-- parameter, matrix X (m x n), vector y (m x 1) and vector theta (n x
-- 1).
cost :: Model a => a -> Regularization -> Matrix -> Vector -> Vector -> R
-- | Gradient function. It takes regularizarion parameter, X (m x n), y (m
-- x 1) and theta (n x 1). Returns vector of gradients (n x 1).
gradient :: Model a => a -> Regularization -> Matrix -> Vector -> Vector -> Vector
module MachineLearning.LeastSquaresModel
data LeastSquaresModel
LeastSquares :: LeastSquaresModel
instance MachineLearning.Model.Model MachineLearning.LeastSquaresModel.LeastSquaresModel
module MachineLearning.LogisticModel
data LogisticModel
Logistic :: LogisticModel
-- | Calculates sigmoid
sigmoid :: Floating a => a -> a
-- | Calculates derivatives of sigmoid
sigmoidGradient :: Floating a => a -> a
instance MachineLearning.Model.Model MachineLearning.LogisticModel.LogisticModel
module MachineLearning.Optimization.GradientDescent
-- | Gradient Descent method implementation. See
-- MachineLearning.Regression for usage details.
gradientDescent :: Model a => R -> a -> R -> Int -> Regularization -> Matrix -> Vector -> Vector -> (Vector, Matrix)
-- | Minibatch Gradient Descent
module MachineLearning.Optimization.MinibatchGradientDescent
-- | Minibatch Gradient Descent method implementation. See
-- MachineLearning.Regression for usage details.
minibatchGradientDescent :: Model a => Int -> Int -> R -> a -> R -> Int -> Regularization -> Matrix -> Vector -> Vector -> (Vector, Matrix)
-- | Optimization module.
module MachineLearning.Optimization
data MinimizeMethod
-- | Gradient descent, takes alpha. Requires feature normalization.
GradientDescent :: R -> MinimizeMethod
-- | Minibacth Gradietn Descent, takes seed, batch size and alpha
MinibatchGradientDescent :: Int -> Int -> R -> MinimizeMethod
-- | Fletcher-Reeves conjugate gradient algorithm, takes size of first
-- trial step (0.1 is fine) and tol (0.1 is fine).
ConjugateGradientFR :: R -> R -> MinimizeMethod
-- | Polak-Ribiere conjugate gradient algorithm. takes size of first trial
-- step (0.1 is fine) and tol (0.1 is fine).
ConjugateGradientPR :: R -> R -> MinimizeMethod
-- | Broyden-Fletcher-Goldfarb-Shanno (BFGS) algorithm, takes size of first
-- trial step (0.1 is fine) and tol (0.1 is fine).
BFGS2 :: R -> R -> MinimizeMethod
-- | Returns solution vector (theta) and optimization path. Optimization
-- path's row format: [iter number, cost function value, theta values...]
minimize :: Model a => MinimizeMethod -> a -> R -> Int -> Regularization -> Matrix -> Vector -> Vector -> (Vector, Matrix)
-- | Gradient checking function. Approximates the derivates of the Model's
-- cost function and calculates derivatives using the Model's gradient
-- functions. Returns norn_2 between 2 derivatives. Takes model,
-- regularization, X, y, theta and epsilon (used to approximate
-- derivatives, 1e-4 is a good value).
checkGradient :: Model a => a -> Regularization -> Matrix -> Vector -> Vector -> R -> R
module MachineLearning.Regression
class Model a
-- | Hypothesis function, a.k.a. score function (for lassifition problem)
-- Takes X (m x n) and theta (n x 1), returns y (m x 1).
hypothesis :: Model a => a -> Matrix -> Vector -> Vector
-- | Cost function J(Theta), a.k.a. loss function. It takes regularizarion
-- parameter, matrix X (m x n), vector y (m x 1) and vector theta (n x
-- 1).
cost :: Model a => a -> Regularization -> Matrix -> Vector -> Vector -> R
-- | Gradient function. It takes regularizarion parameter, X (m x n), y (m
-- x 1) and theta (n x 1). Returns vector of gradients (n x 1).
gradient :: Model a => a -> Regularization -> Matrix -> Vector -> Vector -> Vector
data LeastSquaresModel
LeastSquares :: LeastSquaresModel
data MinimizeMethod
-- | Gradient descent, takes alpha. Requires feature normalization.
GradientDescent :: R -> MinimizeMethod
-- | Minibacth Gradietn Descent, takes seed, batch size and alpha
MinibatchGradientDescent :: Int -> Int -> R -> MinimizeMethod
-- | Fletcher-Reeves conjugate gradient algorithm, takes size of first
-- trial step (0.1 is fine) and tol (0.1 is fine).
ConjugateGradientFR :: R -> R -> MinimizeMethod
-- | Polak-Ribiere conjugate gradient algorithm. takes size of first trial
-- step (0.1 is fine) and tol (0.1 is fine).
ConjugateGradientPR :: R -> R -> MinimizeMethod
-- | Broyden-Fletcher-Goldfarb-Shanno (BFGS) algorithm, takes size of first
-- trial step (0.1 is fine) and tol (0.1 is fine).
BFGS2 :: R -> R -> MinimizeMethod
-- | Returns solution vector (theta) and optimization path. Optimization
-- path's row format: [iter number, cost function value, theta values...]
minimize :: Model a => MinimizeMethod -> a -> R -> Int -> Regularization -> Matrix -> Vector -> Vector -> (Vector, Matrix)
-- | Normal equation using inverse, does not require feature normalization
-- It takes X and y, returns theta.
normalEquation :: Matrix -> Vector -> Vector
-- | Normal equation using pseudo inverse, requires feature normalization
-- It takes X and y, returns theta.
normalEquation_p :: Matrix -> Vector -> Vector
-- | Regularization
data Regularization
-- | No regularization
RegNone :: Regularization
-- | L2 Regularization
L2 :: R -> Regularization
-- | Various helpful utilities.
module MachineLearning.Utils
reduceByRowsV :: (Vector -> R) -> Matrix -> Vector
reduceByColumnsV :: (Vector -> R) -> Matrix -> Vector
reduceByRows :: (Vector -> R) -> Matrix -> Matrix
reduceByColumns :: (Vector -> R) -> Matrix -> Matrix
sumByRows :: Matrix -> Matrix
sumByColumns :: Matrix -> Matrix
-- | Converts list of tuples into list.
listOfTuplesToList :: [(a, a)] -> [a]
-- | Binary Classification.
module MachineLearning.Classification.Binary
data MinimizeMethod
-- | Gradient descent, takes alpha. Requires feature normalization.
GradientDescent :: R -> MinimizeMethod
-- | Minibacth Gradietn Descent, takes seed, batch size and alpha
MinibatchGradientDescent :: Int -> Int -> R -> MinimizeMethod
-- | Fletcher-Reeves conjugate gradient algorithm, takes size of first
-- trial step (0.1 is fine) and tol (0.1 is fine).
ConjugateGradientFR :: R -> R -> MinimizeMethod
-- | Polak-Ribiere conjugate gradient algorithm. takes size of first trial
-- step (0.1 is fine) and tol (0.1 is fine).
ConjugateGradientPR :: R -> R -> MinimizeMethod
-- | Broyden-Fletcher-Goldfarb-Shanno (BFGS) algorithm, takes size of first
-- trial step (0.1 is fine) and tol (0.1 is fine).
BFGS2 :: R -> R -> MinimizeMethod
-- | Binary Classification prediction function. Takes a matrix of features
-- X and a vector theta. Returns predicted class.
predict :: Matrix -> Vector -> Vector
-- | Learns Binary Classification.
learn :: MinimizeMethod -> R -> Int -> Regularization -> Matrix -> Vector -> Vector -> (Vector, Matrix)
-- | Calculates accuracy of Classification predictions. Takes vector
-- expected y and vector predicted y. Returns number from 0 to 1, the
-- closer to 1 the better accuracy. Suitable for both Classification
-- Types: Binary and Multiclass.
calcAccuracy :: Vector -> Vector -> R
-- | Regularization
data Regularization
-- | No regularization
RegNone :: Regularization
-- | L2 Regularization
L2 :: R -> Regularization
-- | One-vs-All Classification.
module MachineLearning.Classification.OneVsAll
data MinimizeMethod
-- | Gradient descent, takes alpha. Requires feature normalization.
GradientDescent :: R -> MinimizeMethod
-- | Minibacth Gradietn Descent, takes seed, batch size and alpha
MinibatchGradientDescent :: Int -> Int -> R -> MinimizeMethod
-- | Fletcher-Reeves conjugate gradient algorithm, takes size of first
-- trial step (0.1 is fine) and tol (0.1 is fine).
ConjugateGradientFR :: R -> R -> MinimizeMethod
-- | Polak-Ribiere conjugate gradient algorithm. takes size of first trial
-- step (0.1 is fine) and tol (0.1 is fine).
ConjugateGradientPR :: R -> R -> MinimizeMethod
-- | Broyden-Fletcher-Goldfarb-Shanno (BFGS) algorithm, takes size of first
-- trial step (0.1 is fine) and tol (0.1 is fine).
BFGS2 :: R -> R -> MinimizeMethod
-- | One-vs-All Classification prediction function. Takes a matrix of
-- features X and a list of vectors theta, returns predicted class number
-- assuming that class numbers start at 0.
predict :: Matrix -> [Vector] -> Vector
-- | Learns One-vs-All Classification
learn :: MinimizeMethod -> R -> Int -> Regularization -> Int -> Matrix -> Vector -> [Vector] -> ([Vector], [Matrix])
-- | Calculates accuracy of Classification predictions. Takes vector
-- expected y and vector predicted y. Returns number from 0 to 1, the
-- closer to 1 the better accuracy. Suitable for both Classification
-- Types: Binary and Multiclass.
calcAccuracy :: Vector -> Vector -> R
-- | Regularization
data Regularization
-- | No regularization
RegNone :: Regularization
-- | L2 Regularization
L2 :: R -> Regularization
-- | Neural Network's Layer
module MachineLearning.NeuralNetwork.Layer
data Layer
Layer :: Int -> (Matrix -> Matrix -> Matrix -> Matrix) -> (Matrix -> Cache -> (Matrix, Matrix, Matrix)) -> (Matrix -> Matrix) -> (Matrix -> Matrix -> Matrix) -> (forall g. RandomGen g => (Int, Int) -> Rand g (Matrix, Matrix)) -> Layer
[lUnits] :: Layer -> Int
[lForward] :: Layer -> Matrix -> Matrix -> Matrix -> Matrix
[lBackward] :: Layer -> Matrix -> Cache -> (Matrix, Matrix, Matrix)
[lActivation] :: Layer -> Matrix -> Matrix
[lActivationGradient] :: Layer -> Matrix -> Matrix -> Matrix
[lInitializeThetaM] :: Layer -> forall g. RandomGen g => (Int, Int) -> Rand g (Matrix, Matrix)
data Cache
Cache :: Matrix -> Matrix -> Matrix -> Cache
[cacheZ] :: Cache -> Matrix
[cacheX] :: Cache -> Matrix
[cacheW] :: Cache -> Matrix
affineForward :: Matrix -> Matrix -> Matrix -> Matrix
affineBackward :: Matrix R -> Cache -> (Matrix R, Matrix, Matrix R)
-- | Regularization.
module MachineLearning.NeuralNetwork.Regularization
-- | Regularization
data Regularization
-- | No regularization
RegNone :: Regularization
-- | L2 Regularization
L2 :: R -> Regularization
-- | Calcaulates regularization for forward propagation. It takes
-- regularization parameter and theta list.
forwardReg :: Regularization -> [(Matrix, Matrix)] -> R
-- | Calculates regularization for step of backward propagation. It takes
-- regularization parameter and theta.
backwardReg :: Regularization -> Matrix -> Matrix
-- | Neural Network's Topology
module MachineLearning.NeuralNetwork.Topology
-- | Neural network topology has at least 2 elements: numver of input and
-- number of outputs. And sizes of hidden layers between 2 elements.
data Topology
-- | Loss function's type. Takes x, weights and y.
type LossFunc = Matrix -> Matrix -> R
-- | Makes Neural Network's Topology. Takes number of inputs, list of
-- hidden layers, output layer and loss function.
makeTopology :: Int -> [Layer] -> Layer -> LossFunc -> Topology
-- | Calculates loss for the given topology. Takes topology,
-- regularization, x, weights, y.
loss :: Topology -> Regularization -> Matrix -> [(Matrix, Matrix)] -> Matrix -> R
-- | Implementation of forward propagation algorithm.
propagateForward :: Topology -> Matrix -> [(Matrix, Matrix)] -> (Matrix, [Cache])
-- | Implementation of backward propagation algorithm.
propagateBackward :: Topology -> Regularization -> Matrix -> [Cache] -> Matrix -> [(Matrix, Matrix)]
-- | Returns number of outputs of the given topology.
numberOutputs :: Topology -> Int
-- | Create and initialize weights vector with random values for given
-- neural network topology. Takes a seed to initialize generator of
-- random numbers as a first parameter.
initializeTheta :: Int -> Topology -> Vector
-- | Create and initialize weights vector with random values for given
-- neural network topology inside IO Monad.
initializeThetaIO :: Topology -> IO Vector
-- | Create and initialize weights vector with random values for given
-- neural network topology inside RandomMonad.
initializeThetaM :: RandomGen g => Topology -> Rand g Vector
-- | Flatten list of matrices into vector.
flatten :: [(Matrix, Matrix)] -> Vector
-- | Unflatten vector into list of matrices for given neural network
-- topology.
unflatten :: Topology -> Vector -> [(Matrix, Matrix)]
-- | Simple Neural Networks.
module MachineLearning.NeuralNetwork
class Model a
-- | Hypothesis function, a.k.a. score function (for lassifition problem)
-- Takes X (m x n) and theta (n x 1), returns y (m x 1).
hypothesis :: Model a => a -> Matrix -> Vector -> Vector
-- | Cost function J(Theta), a.k.a. loss function. It takes regularizarion
-- parameter, matrix X (m x n), vector y (m x 1) and vector theta (n x
-- 1).
cost :: Model a => a -> Regularization -> Matrix -> Vector -> Vector -> R
-- | Gradient function. It takes regularizarion parameter, X (m x n), y (m
-- x 1) and theta (n x 1). Returns vector of gradients (n x 1).
gradient :: Model a => a -> Regularization -> Matrix -> Vector -> Vector -> Vector
-- | Neural Network Model. Takes neural network topology as a constructor
-- argument.
newtype NeuralNetworkModel
NeuralNetwork :: Topology -> NeuralNetworkModel
-- | Calculates accuracy of Classification predictions. Takes vector
-- expected y and vector predicted y. Returns number from 0 to 1, the
-- closer to 1 the better accuracy. Suitable for both Classification
-- Types: Binary and Multiclass.
calcAccuracy :: Vector -> Vector -> R
-- | Neural network topology has at least 2 elements: numver of input and
-- number of outputs. And sizes of hidden layers between 2 elements.
data Topology
-- | Create and initialize weights vector with random values for given
-- neural network topology. Takes a seed to initialize generator of
-- random numbers as a first parameter.
initializeTheta :: Int -> Topology -> Vector
-- | Create and initialize weights vector with random values for given
-- neural network topology inside IO Monad.
initializeThetaIO :: Topology -> IO Vector
-- | Create and initialize weights vector with random values for given
-- neural network topology inside RandomMonad.
initializeThetaM :: RandomGen g => Topology -> Rand g Vector
-- | Regularization
data Regularization
-- | No regularization
RegNone :: Regularization
-- | L2 Regularization
L2 :: R -> Regularization
instance MachineLearning.Model.Model MachineLearning.NeuralNetwork.NeuralNetworkModel
-- | ReLu Activation.
module MachineLearning.NeuralNetwork.ReluActivation
relu :: Matrix -> Matrix
gradient :: Matrix -> Matrix -> Matrix
-- | Tanh Activation.
module MachineLearning.NeuralNetwork.TanhActivation
tanh :: Floating a => a -> a
gradient :: Matrix -> Matrix -> Matrix
-- | Sigmoid Activation.
module MachineLearning.NeuralNetwork.SigmoidActivation
-- | Calculates sigmoid
sigmoid :: Floating a => a -> a
gradient :: Matrix -> Matrix -> Matrix
-- | Multi SVM Loss.
module MachineLearning.NeuralNetwork.MultiSvmLoss
scores :: a -> a
gradient :: Matrix -> Matrix -> Matrix
loss :: Matrix -> Matrix -> R
-- | Softmax Loss.
module MachineLearning.NeuralNetwork.SoftmaxLoss
scores :: Matrix R -> Matrix R
gradient :: Matrix R -> Matrix R -> Matrix R
loss :: Matrix -> Matrix -> R
-- | Logistic Loss.
module MachineLearning.NeuralNetwork.LogisticLoss
scores :: Matrix -> Matrix
gradient :: Matrix -> Matrix -> Matrix
loss :: Matrix -> Matrix -> R
-- | Randon generation uitility functions.
module MachineLearning.Random
-- | Samples n (given as a second parameter) values from
-- list (given as a third parameter).
sample :: RandomGen g => g -> Int -> Vector a -> (Vector a, g)
-- | Samples n (given as a second parameter) values from
-- list (given as a third parameter) inside RandomMonad.
sampleM :: RandomGen g => Int -> Vector a -> Rand g (Vector a)
-- | Returns a list of random values distributed in a closed interval
-- range
getRandomRListM :: (RandomGen g, Random a) => Int -> (a, a) -> Rand g [a]
-- | Returns a vector of random values distributed in a closed interval
-- range
getRandomRVectorM :: RandomGen g => Int -> (R, R) -> Rand g Vector
-- | Returns a matrix of random values distributed in a closed interval
-- range
getRandomRMatrixM :: RandomGen g => Int -> Int -> (R, R) -> Rand g Matrix
-- | Various Weight Initialization algorithms.
module MachineLearning.NeuralNetwork.WeightInitialization
-- | Weight Initialization Algorithm discussed in Nguyen at al.:
-- https://web.stanford.edu/class/ee373b/nninitialization.pdf
-- Nguyen, Derrick, Widrow, B. Improving the learning speed of 2-layer
-- neural networks by choosing initial values of adaptive weights. In
-- Proc. IJCNN, 1990; 3: 21-26.
nguyen :: RandomGen g => (Int, Int) -> Rand g (Matrix, Matrix)
-- | Weight Initialization Algorithm discussed in He at al.:
-- https://arxiv.org/abs/1502.01852 Kaiming He, Xiangyu Zhang,
-- Shaoqing Ren, Jian Sun. Delving Deep into Rectifiers: Surpassing
-- Human-Level Performance on ImageNet Classification.
he :: RandomGen g => (Int, Int) -> Rand g (Matrix, Matrix)
-- | Topology Maker.
module MachineLearning.NeuralNetwork.TopologyMaker
data Activation
ASigmoid :: Activation
ARelu :: Activation
ATanh :: Activation
data Loss
LLogistic :: Loss
LSoftmax :: Loss
LMultiSvm :: Loss
-- | Creates toplogy. Takes number of inputs, number of outputs and list of
-- hidden layers.
makeTopology :: Activation -> Loss -> Int -> Int -> [Int] -> Topology
-- | Cluster Analysis a.k.a. Clustering - grouping data into coherent
-- subsets.
module MachineLearning.Clustering
-- | Cluster type (list of samples associtaed with the cluster).
type Cluster = Vector Vector
-- | Clusters data using K-Means Algorithm inside Random Monad. Runs
-- K-Means algorithm N times, returns the clustering with
-- smaller cost.
kmeans :: RandomGen g => Int -> Matrix -> Int -> Rand g (Vector Cluster)
-- | Run K-Means algorithm once inside Random Monad.
kmeansIterM :: RandomGen g => Vector Vector -> Int -> Int -> Rand g (Vector Cluster, [R])
-- | Learn function with progress bar for terminal.
module MachineLearning.TerminalProgress
-- | Learn the given function displaying progress bar in terminal. It takes
-- function, initial theta and number of iterations to call the function.
-- It returns theta and optimization path (see
-- MachineLearning.Optimization for details).
learnWithProgressBar :: (Vector -> (Vector, Matrix)) -> Vector -> Int -> IO (Vector, Matrix)
-- | Learn the given function displaying progress bar in terminal. It takes
-- function, list of outputs and list of initial thetas and number of
-- iterations to call the function. It returns list of thetas and list of
-- optimization paths (see MachineLearning.Optimization for
-- details).
learnOneVsAllWithProgressBar :: (Vector -> Vector -> (Vector, Matrix)) -> Vector -> [Vector] -> Int -> IO ([Vector], [Matrix])
module MachineLearning
-- | Add biad dimension to the future matrix
addBiasDimension :: Matrix -> Matrix
-- | Remove biad dimension
removeBiasDimension :: Matrix -> Matrix
-- | Caclulates mean and stddev values of every feature. Takes feature
-- matrix X, returns pair of vectors of means and stddevs.
meanStddev :: Matrix Double -> (Matrix Double, Matrix Double)
featureNormalization :: Fractional a => (a, a) -> a -> a
-- | Maps the features into all polynomial terms of X up to the degree-th
-- power
mapFeatures :: Int -> Matrix -> Matrix
-- | Splits data matrix to features matrix X and vector of outputs y
splitToXY :: Element t => Matrix t -> (Matrix t, Vector t)
-- | MultiClass Classification.
module MachineLearning.Classification.MultiClass
-- | Classifier type class represents Multi-class classification models.
class Classifier a
-- | Score function
cscore :: Classifier a => a -> Matrix -> Matrix -> Matrix
-- | Hypothesis function Takes X (m x n) and theta (n x k), returns y (m x
-- k).
chypothesis :: Classifier a => a -> Matrix -> Matrix -> Vector
-- | Cost function J(Theta), a.k.a. loss function. It takes regularizarion
-- parameter lambda, matrix X (m x n), vector y (m x 1) and vector theta
-- (n x 1).
ccost :: Classifier a => a -> Regularization -> Matrix -> Vector -> Matrix -> R
-- | Gradient function. It takes regularizarion parameter lambda, X (m x
-- n), y (m x 1) and theta (n x 1). Returns vector of gradients (n x 1).
cgradient :: Classifier a => a -> Regularization -> Matrix -> Vector -> Matrix -> Matrix
-- | Returns Number of Classes
cnumClasses :: Classifier a => a -> Int
-- | MultiClassModel is Model wrapper class around Classifier
data MultiClassModel m
MultiClass :: m -> MultiClassModel m
-- | Process outputs for MultiClass Classification. Takes Classifier and
-- output vector y. Returns matrix of binary outputs. It is supposed that
-- labels are integerets start at 0.
processOutput :: (Classifier c) => c -> Vector -> Matrix
-- | Regularization
data Regularization
-- | No regularization
RegNone :: Regularization
-- | L2 Regularization
L2 :: R -> Regularization
-- | Calculates regularization for Classifier.ccost. It takes
-- regularization parameter and theta.
ccostReg :: Regularization -> Matrix -> R
-- | Calculates regularization for Classifier.cgradient. It takes
-- regularization parameter and theta.
cgradientReg :: Regularization -> Matrix -> Matrix
instance MachineLearning.Classification.MultiClass.Classifier a => MachineLearning.Model.Model (MachineLearning.Classification.MultiClass.MultiClassModel a)
-- | Multicalss Support Vector Machines Classifier.
module MachineLearning.MultiSvmClassifier
-- | Multiclass SVM Classifier, takes delta and number of futures. Delta =
-- 1.0 is good for all cases.
data MultiSvmClassifier
MultiSvm :: R -> Int -> MultiSvmClassifier
instance MachineLearning.Classification.MultiClass.Classifier MachineLearning.MultiSvmClassifier.MultiSvmClassifier
-- | Softmax Classifier (Multiclass Logistic Regression).
module MachineLearning.SoftmaxClassifier
-- | Softmax Classifier, takes number of classes.
data SoftmaxClassifier
Softmax :: Int -> SoftmaxClassifier
instance MachineLearning.Classification.MultiClass.Classifier MachineLearning.SoftmaxClassifier.SoftmaxClassifier
-- | Principal Component Analysis (PCA) - dimensionality reduction
-- algorithm. It is mostly used to speed up supervising learning
-- (Regression, Classification, etc) and visualization of data.
module MachineLearning.PCA
-- | Gets dimensionality reduction function, retained variance (0..1) and
-- reduced X for given matrix X and number of dimensions to retain.
getDimReducer :: Matrix -> Int -> (Matrix -> Matrix, R, Matrix)
-- | Gets dimensionality reduction function, retained number of dimensions
-- and reduced X for given matrix X and variance to retain (0..1].
getDimReducer_rv :: Matrix -> R -> (Matrix -> Matrix, Int, Matrix)