{-# LANGUAGE TypeFamilies #-}
module Numeric.LAPACK.Orthogonal (
   leastSquares,
   minimumNorm,
   leastSquaresMinimumNormRCond,
   pseudoInverseRCond,

   project,
   leastSquaresConstraint,
   gaussMarkovLinearModel,

   determinant,
   determinantAbsolute,
   complement,
   affineSpanFromKernel,
   affineKernelFromSpan,

   householder,
   householderTall,
   ) where

import qualified Numeric.LAPACK.Orthogonal.Householder as HH
import qualified Numeric.LAPACK.Orthogonal.Plain as Plain

import qualified Numeric.LAPACK.Matrix.Multiply as Multiply
import qualified Numeric.LAPACK.Matrix.Triangular as Triangular
import qualified Numeric.LAPACK.Matrix.Shape as MatrixShape
import qualified Numeric.LAPACK.Matrix.Array as ArrMatrix
import qualified Numeric.LAPACK.Matrix.Basic as Basic
import qualified Numeric.LAPACK.Matrix.Type as Matrix
import qualified Numeric.LAPACK.Matrix.Extent.Private as Extent
import qualified Numeric.LAPACK.Vector as Vector
import Numeric.LAPACK.Matrix.Array (Full, General, Tall, Wide, Square)
import Numeric.LAPACK.Matrix.Private (ShapeInt)
import Numeric.LAPACK.Vector (Vector, (|-|))
import Numeric.LAPACK.Scalar (RealOf)

import qualified Numeric.Netlib.Class as Class

import qualified Data.Array.Comfort.Shape as Shape

import Data.Tuple.HT (mapSnd)


{- |
If @x = leastSquares a b@
then @x@ minimizes @Vector.norm2 (multiply a x `sub` b)@.

Precondition: @a@ must have full rank and @height a >= width a@.
-}
leastSquares ::
   (Extent.C vert, Extent.C horiz,
    Shape.C height, Eq height, Shape.C width, Shape.C nrhs, Class.Floating a) =>
   Full horiz Extent.Small height width a ->
   Full vert horiz height nrhs a ->
   Full vert horiz width nrhs a
leastSquares = ArrMatrix.lift2 Plain.leastSquares

{- |
The vector @x@ with @x = minimumNorm a b@
is the vector with minimal @Vector.norm2 x@
that satisfies @multiply a x == b@.

Precondition: @a@ must have full rank and @height a <= width a@.
-}
minimumNorm ::
   (Extent.C vert, Extent.C horiz,
    Shape.C height, Eq height, Shape.C width, Shape.C nrhs, Class.Floating a) =>
   Full Extent.Small vert height width a ->
   Full vert horiz height nrhs a ->
   Full vert horiz width nrhs a
minimumNorm = ArrMatrix.lift2 Plain.minimumNorm


{- |
If @(rank,x) = leastSquaresMinimumNormRCond rcond a b@
then @x@ is the vector with minimum @Vector.norm2 x@
that minimizes @Vector.norm2 (a #*| x `sub` b)@.

Matrix @a@ can have any rank
but you must specify the reciprocal condition of the rank-truncated matrix.
-}
leastSquaresMinimumNormRCond ::
   (Extent.C vert, Extent.C horiz,
    Shape.C height, Eq height, Shape.C width, Shape.C nrhs, Class.Floating a) =>
   RealOf a ->
   Full horiz vert height width a ->
   Full vert horiz height nrhs a ->
   (Int, Full vert horiz width nrhs a)
leastSquaresMinimumNormRCond rcond a b =
   mapSnd ArrMatrix.lift0 $
   Plain.leastSquaresMinimumNormRCond
      rcond (ArrMatrix.toVector a) (ArrMatrix.toVector b)


pseudoInverseRCond ::
   (Extent.C vert, Extent.C horiz, Shape.C height, Shape.C width,
    Class.Floating a) =>
   RealOf a ->
   Full vert horiz height width a ->
   (Int, Full horiz vert width height a)
pseudoInverseRCond rcond =
   mapSnd (ArrMatrix.lift0 . Basic.recheck) .
   Plain.pseudoInverseRCond rcond .
   Basic.uncheck . ArrMatrix.toVector


{- |
@project b d x@ projects @x@ on the plane described by @B*x = d@.

@b@ must have full rank.
-}
project ::
   (Shape.C height, Eq height, Shape.C width, Eq width, Class.Floating a) =>
   Wide height width a -> Vector height a ->
   Vector width a -> Vector width a
project b d x =
   x
   |-|
   ArrMatrix.unliftColumn MatrixShape.ColumnMajor
      (minimumNorm b) (Multiply.matrixVector b x |-| d)


{- |
@leastSquaresConstraint a c b d@ computes @x@
with minimal @|| c - A*x ||_2@ and constraint @B*x = d@.

@b@ must be wide and @a===b@ must be tall
and both matrices must have full rank.
-}
leastSquaresConstraint ::
   (Shape.C height, Eq height,
    Shape.C width, Eq width,
    Shape.C constraints, Eq constraints, Class.Floating a) =>
   General height width a -> Vector height a ->
   Wide constraints width a -> Vector constraints a ->
   Vector width a
leastSquaresConstraint a c b d =
   Plain.leastSquaresConstraint
      (ArrMatrix.toVector a) c
      (ArrMatrix.toVector b) d

{- |
@gaussMarkovLinearModel a b d@ computes @(x,y)@
with minimal @|| y ||_2@ and constraint @d = A*x + B*y@.

@a@ must be tall and @a|||b@ must be wide
and both matrices must have full rank.
-}
gaussMarkovLinearModel ::
   (Shape.C height, Eq height,
    Shape.C width, Eq width,
    Shape.C opt, Eq opt, Class.Floating a) =>
   Tall height width a -> General height opt a -> Vector height a ->
   (Vector width a, Vector opt a)
gaussMarkovLinearModel a b d =
   {-
   Fortran-LAPACK and OpenBLAS would leave Y uninitalized
   instead of setting Y to zeros.
   -}
   if Shape.size (Matrix.height a) == 0
      then (Vector.zero (Matrix.width a), Vector.zero (Matrix.width b))
      else Plain.gaussMarkovLinearModel
               (ArrMatrix.toVector a) (ArrMatrix.toVector b) d


{-
@(q,r) = householder a@
means that @q@ is unitary and @r@ is upper triangular and @a = multiply q r@.
-}
householder ::
   (Extent.C vert, Extent.C horiz, Shape.C height, Shape.C width,
    Class.Floating a) =>
   Full vert horiz height width a ->
   (Square height a, Full vert horiz height width a)
householder a =
   let hh = HH.fromMatrix a
   in  (HH.extractQ hh, HH.extractR hh)

householderTall ::
   (Extent.C vert, Shape.C height, Shape.C width, Class.Floating a) =>
   Full vert Extent.Small height width a ->
   (Full vert Extent.Small height width a, Triangular.Upper width a)
householderTall a =
   let hh = HH.fromMatrix a
   in  (HH.tallExtractQ hh, HH.tallExtractR hh)


determinant :: (Shape.C sh, Class.Floating a) => Square sh a -> a
determinant = HH.determinant . HH.fromMatrix

{-|
Gramian determinant -
works also for non-square matrices, but is sensitive to transposition.

> determinantAbsolute a = sqrt (Herm.determinant (Herm.gramian a))
-}
determinantAbsolute ::
   (Extent.C vert, Extent.C horiz, Shape.C height, Shape.C width,
    Class.Floating a) =>
   Full vert horiz height width a -> RealOf a
determinantAbsolute = Plain.determinantAbsolute . ArrMatrix.toVector


{- |
For an m-by-n-matrix @a@ with m>=n
this function computes an m-by-(m-n)-matrix @b@
such that @Matrix.multiply (adjoint b) a@ is a zero matrix.
The function does not try to compensate a rank deficiency of @a@.
That is, @a|||b@ has full rank if and only if @a@ has full rank.

For full-rank matrices you might also call this @kernel@ or @nullspace@.
-}
complement ::
   (Shape.C height, Shape.C width, Class.Floating a) =>
   Tall height width a -> Tall height ShapeInt a
complement = ArrMatrix.lift1 Plain.complement


{- |
> affineSpanFromKernel a b == (c,d)

Means:
An affine subspace is given implicitly by {x : a#*|x == b}.
Convert this into an explicit representation {c#*|y|+|d : y}.
Matrix @a@ must have full rank,
otherwise the explicit representation will miss dimensions
and we cannot easily determine the origin @d@ as a minimum norm solution.
-}
affineSpanFromKernel ::
   (Shape.C width, Eq width, Shape.C height, Eq height, Class.Floating a) =>
   Wide height width a -> Vector height a ->
   (Tall width ShapeInt a, Vector width a)
affineSpanFromKernel a b =
   let qr = HH.fromMatrix $ ArrMatrix.lift1 Basic.adjoint a
   in (ArrMatrix.lift0 $ Plain.extractComplement qr,
       ArrMatrix.unliftColumn MatrixShape.ColumnMajor (HH.minimumNorm qr) b)

{- |
This conversion is somehow inverse to 'affineSpanFromKernel'.
However, it is not precisely inverse in either direction.
This is because both 'affineSpanFromKernel' and 'affineKernelFromSpan'
accept non-orthogonal matrices but always return orthogonal ones.

In @affineKernelFromSpan c d@,
matrix @c@ should have full rank,
otherwise the implicit representation will miss dimensions.
-}
affineKernelFromSpan ::
   (Shape.C width, Eq width, Shape.C height, Eq height, Class.Floating a) =>
   Tall height width a -> Vector height a ->
   (Wide ShapeInt height a, Vector ShapeInt a)
affineKernelFromSpan c d =
   let a = Basic.adjoint $ Plain.complement $ ArrMatrix.toVector c
   in (ArrMatrix.lift0 a, Basic.multiplyVector a d)