-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Typed frontend to TensorFlow and higher-order deep learning
--
-- TypedFlow is a typed, higher-order frontend to TensorFlow and a
-- high-level library for deep-learning.
--
-- The main design principles are:
--
--
-- - To make the parameters of layers explicit. This choice makes
-- sharing of parameters explicit and allows to implement "layers" as
-- pure functions.
-- - To provide as precise as possible types. Functions are explicit
-- about the shapes and elements of the tensors that they manipulate
-- (they are often polymorphic in shapes and elements though.)
-- - To let combinators be as transparent as possible. If a NN layers
-- is a simple tensor transformation it will be exposed as such.
--
@package typedflow
@version 0.9
module TypedFlow.Types
data Sat (a :: k -> Constraint) (b :: k)
[Sat] :: a b => Sat a b
type DOC = Doc ()
type (<) i j = CmpNat i j ~ LT
plusAssoc' :: forall x y z. ((x + y) + z) :~: (x + (y + z))
plusAssoc :: forall x y z k. (((x + y) + z) ~ (x + (y + z)) => k) -> k
prodAssoc' :: forall x y z. ((x * y) * z) :~: (x * (y * z))
prodAssoc :: forall x y z k. (((x * y) * z) ~ (x * (y * z)) => k) -> k
prodHomo' :: forall x y. Product (x ++ y) :~: (Product x * Product y)
prodHomo :: forall x y k. ((Product (x ++ y) ~ (Product x * Product y)) => k) -> k
knownProduct' :: forall s k. All KnownNat s => SList s -> (KnownNat (Product s) => k) -> k
knownProduct :: forall s k. KnownShape s => (KnownNat (Product s) => k) -> k
initLast' :: forall s k. SList s -> ((Init s ++ '[Last s]) ~ s => k) -> k
initLast :: forall s k. KnownShape s => ((Init s ++ '[Last s]) ~ s => k) -> k
knownLast' :: All KnownNat s => SList s -> (KnownNat (Last s) => k) -> k
knownLast :: forall s k. KnownShape s => (KnownNat (Last s) => k) -> k
splitApp' :: forall ys xs k. SList xs -> ((Take (PeanoLength xs) (xs ++ ys) ~ xs, Drop (PeanoLength xs) (xs ++ ys) ~ ys) => k) -> k
splitApp :: forall xs ys k. KnownLen xs => ((Take (PeanoLength xs) (xs ++ ys) ~ xs, Drop (PeanoLength xs) (xs ++ ys) ~ ys) => k) -> k
knownAppend' :: forall t s k. (All KnownNat s, KnownShape t) => SList s -> (KnownShape (s ++ t) => k) -> k
knownAppend :: forall s t k. (KnownShape s, KnownShape t) => (KnownShape (s ++ t) => k) -> k
newtype V (n :: Nat) a
V :: [a] -> V a
data NP f (xs :: [k])
[Unit] :: NP f '[]
[:*] :: f x -> NP f xs -> NP f (x : xs)
newtype I a
I :: a -> I a
newtype K a x
K :: a -> K a x
type HList = NP I
class Fun (c :: k -> Constraint) where type Ap c (t :: k) :: l where {
type family Ap c (t :: k) :: l;
}
class Cons (x :: k) (xs :: [k])
class Snoc (x :: k) (xs :: [k])
class FMap (c :: k -> Constraint) (xs :: [k])
-- | Flip at type level
newtype F g t s
F :: g s t -> F g t s
[fromF] :: F g t s -> g s t
-- | Heterogeneous tensor vector with the same kind of elements
type HTV t = NP (F T t)
data Pair a b
(:&) :: a -> b -> Pair a b
newtype Uncurry g (s :: Pair a b)
Uncurry :: g (Fst s) (Snd s) -> Uncurry g
[fromUncurry] :: Uncurry g -> g (Fst s) (Snd s)
type HHTV = NP (Uncurry T)
hhead :: NP f (x : xs) -> f x
htail :: NP f (x : xs) -> NP f xs
htmap :: forall f ss t u. (forall s. Tensor s t -> Tensor (Ap f s) u) -> HTV t ss -> HTV u (Ap (FMap f) ss)
hmap :: (forall x. f x -> g x) -> NP f xs -> NP g xs
hendo :: NP Endo xs -> HList xs -> HList xs
happ :: NP f xs -> NP f ys -> NP f (xs ++ ys)
data Both f g x
Both :: (f x) -> (g x) -> Both f g x
hzip :: NP f xs -> NP g xs -> NP (Both f g) xs
hzipWith :: (forall x. f x -> g x -> h x) -> NP f xs -> NP g xs -> NP h xs
hfor_ :: Monad m => NP f xs -> (forall x. f x -> m a) -> m ()
htoList :: NP (K a) xs -> [a]
hsplit' :: SPeano n -> NP f xs -> (NP f (Take n xs), NP f (Drop n xs))
hsplit :: forall xs ys f. KnownLen xs => NP f (xs ++ ys) -> (NP f xs, NP f ys)
hsnoc :: NP f xs -> f x -> NP f (xs ++ '[x])
data Peano
Zero :: Peano
Succ :: Peano -> Peano
type Dim0 = Zero
type Dim1 = Succ Dim0
type Dim2 = Succ Dim1
type Dim3 = Succ Dim2
type Axis0 = Zero
type Axis1 = Succ Dim0
type Axis2 = Succ Dim1
type Axis3 = Succ Dim2
class KnownPeano n
peanoInt :: KnownPeano n => Integer
data SPeano n
[SZero] :: SPeano Zero
[SSucc] :: SPeano n -> SPeano (Succ n)
data Vec (n :: Peano) a
[VNil] :: Vec Zero a
[VCons] :: a -> Vec n a -> Vec (Succ n) a
vecToList :: Vec n a -> [a]
data Kind
Float :: Kind
Int :: Kind
Bool :: Kind
data NBits
B32 :: NBits
B64 :: NBits
B1 :: NBits
data Typ
Typ :: Kind -> NBits -> Typ
type Flt t = Typ Float t
type Float32 = Typ Float B32
type Int32 = Typ Int B32
type Int64 = Typ Int B64
type TFBool = Typ Bool B1
type Scalar t = T '[] t
showTyp :: forall t. KnownTyp t => DOC
type Shape = [Nat]
type UntypedExpression = DOC
data T (shape :: Shape) (t :: Typ)
T :: UntypedExpression -> T
[fromTensor] :: T -> UntypedExpression
data SNat (n :: Nat)
[SNat] :: KnownNat n => Proxy n -> SNat n
class (KnownLen s, All KnownNat s) => KnownShape s
class KnownTyp t
typVal :: KnownTyp t => Typ
class KnownBits t
bitsVal :: KnownBits t => NBits
class KnownKind t
kindVal :: KnownKind t => Kind
type SList = SList' Proxy
data SList' f s
[LZ] :: SList' f '[]
[LS] :: forall x xs f. f x -> SList' f xs -> SList' f (x : xs)
withKnownNat :: forall k. Int -> (forall (n :: Nat). KnownNat n => Proxy n -> k) -> k
class KnownLen s
listLen :: KnownLen s => Integer
shapePeano :: KnownLen s => SPeano (PeanoLength s)
shapeSList :: KnownLen s => SList s
shapeSListProxy :: KnownLen xs => proxy xs -> SList xs
shapeToList' :: All KnownNat s => SList s -> [Integer]
shapeToList :: forall (s :: Shape). KnownShape s => [Integer]
showShape' :: [Integer] -> DOC
showShape :: forall (s :: Shape). KnownShape s => DOC
-- | Show a shape, but None is replaced by "-1"
showShapeMinus :: forall (s :: Shape). KnownShape s => DOC
showShapeLen :: forall (s :: Shape). KnownLen s => DOC
rememberNat :: SNat n -> (KnownNat n => r) -> r
type None = 514229
showDim' :: String -> Integer -> DOC
showDimM :: forall n. KnownNat n => DOC
showDim :: forall n. KnownNat n => DOC
str :: Show a => a -> DOC
data ParamInfo
ParamInfo :: String -> [Integer] -> Typ -> (forall s t. (KnownShape s, KnownTyp t) => Tensor s t) -> ParamInfo
[paramName] :: ParamInfo -> String
[paramShape] :: ParamInfo -> [Integer]
[paramDType] :: ParamInfo -> Typ
[paramVar] :: ParamInfo -> forall s t. (KnownShape s, KnownTyp t) => Tensor s t
data GState
GState :: Integer -> DOC -> [ParamInfo] -> Scalar TFBool -> [(String, UntypedExpression)] -> GState
-- | next free variable
[nextVar] :: GState -> Integer
[genText] :: GState -> DOC
-- | optimizable parameters
[genParams] :: GState -> [ParamInfo]
-- | flag which is true when training
[genTrainingPlaceholder] :: GState -> Scalar TFBool
[genPeeks] :: GState -> [(String, UntypedExpression)]
newtype Gen x
Gen :: State GState x -> Gen x
[fromGen] :: Gen x -> State GState x
newParameter :: MonadState GState m => ParamInfo -> m ()
-- | Name an expression so that it is made available for session.run.
peekAtAny :: String -> UntypedExpression -> Gen ()
newVar :: Gen DOC
gen :: DOC -> Gen ()
setGen :: DOC -> Gen ()
withDOC :: forall a. (DOC -> DOC) -> Gen a -> Gen a
type Tensor shape = T shape
(<--) :: DOC -> UntypedExpression -> Gen ()
tuple :: [DOC] -> DOC
dict :: [(String, DOC)] -> DOC
funcall :: String -> [DOC] -> DOC
funcall' :: DOC -> [DOC] -> DOC
binOp :: forall s1 s2 s3 t1 t2 t3. String -> Tensor s1 t1 -> Tensor s2 t2 -> Tensor s3 t3
unOp :: forall s1 s2 t1 t2. String -> Tensor s1 t1 -> Tensor s2 t2
assign :: forall s t. T s t -> Gen (T s t)
genFun :: forall b. String -> [DOC] -> Gen b -> Gen b
lambda :: (T s t -> T s' t') -> Gen UntypedExpression
generate :: Gen () -> (String, [ParamInfo])
generateFile :: String -> Gen () -> IO ()
named :: String -> DOC -> DOC
instance GHC.Base.Applicative TypedFlow.Types.Gen
instance GHC.Base.Functor TypedFlow.Types.Gen
instance Control.Monad.State.Class.MonadState TypedFlow.Types.GState TypedFlow.Types.Gen
instance GHC.Base.Monad TypedFlow.Types.Gen
instance GHC.Show.Show TypedFlow.Types.NBits
instance GHC.Show.Show TypedFlow.Types.Kind
instance Data.Traversable.Traversable (TypedFlow.Types.V n)
instance Data.Foldable.Foldable (TypedFlow.Types.V n)
instance GHC.Base.Functor (TypedFlow.Types.V n)
instance GHC.TypeLits.KnownNat n => GHC.Base.Applicative (TypedFlow.Types.V n)
instance forall k (x :: k). TypedFlow.Types.Fun (TypedFlow.Types.Cons x)
instance forall k (x :: k). TypedFlow.Types.Fun (TypedFlow.Types.Snoc x)
instance forall k (c :: k -> GHC.Types.Constraint). TypedFlow.Types.Fun c => TypedFlow.Types.Fun (TypedFlow.Types.FMap c)
instance TypedFlow.Types.KnownPeano 'TypedFlow.Types.Zero
instance TypedFlow.Types.KnownPeano n => TypedFlow.Types.KnownPeano ('TypedFlow.Types.Succ n)
instance GHC.Show.Show TypedFlow.Types.Typ
instance TypedFlow.Types.KnownShape '[]
instance (GHC.TypeLits.KnownNat x, TypedFlow.Types.KnownShape xs) => TypedFlow.Types.KnownShape (x : xs)
instance TypedFlow.Types.KnownBits 'TypedFlow.Types.B1
instance TypedFlow.Types.KnownBits 'TypedFlow.Types.B32
instance TypedFlow.Types.KnownBits 'TypedFlow.Types.B64
instance (TypedFlow.Types.KnownBits l, TypedFlow.Types.KnownKind k) => TypedFlow.Types.KnownTyp ('TypedFlow.Types.Typ k l)
instance TypedFlow.Types.KnownKind 'TypedFlow.Types.Bool
instance TypedFlow.Types.KnownKind 'TypedFlow.Types.Float
instance TypedFlow.Types.KnownKind 'TypedFlow.Types.Int
instance TypedFlow.Types.KnownLen '[]
instance forall a (xs :: [a]) (x :: a). TypedFlow.Types.KnownLen xs => TypedFlow.Types.KnownLen (x : xs)
-- | This module provides direct access to the most commonly used
-- TensorFlow functions. Higher-level functions are not defined here.
module TypedFlow.TF
-- | Declare a parameter to optimize. The shape of parameter should not
-- depend on dimensions which can change between runs, such as the batch
-- size.
parameter' :: forall (shape :: Shape) t. (KnownTyp t, KnownShape shape) => String -> T shape t -> Gen (T shape t)
-- | Create a parameter.
parameter :: forall p. KnownTensors p => String -> p -> Gen p
-- | Create a parameter and initialize it with a suitable default for its
-- type. Control the exact initializer using parameter.
parameterDefault :: forall p. ParamWithDefault p => String -> Gen p
class KnownTensors p => ParamWithDefault p
defaultInitializer :: ParamWithDefault p => p
-- | Return a list of parameters.
getParameters :: Gen UntypedExpression
-- | Declare variable which persists between calls to session.run.
persistent :: forall (shape :: Shape) t. (KnownTyp t, KnownShape shape) => Bool -> String -> T shape t -> Gen (T shape t)
-- | Modify a mutable tensor. Attention: for the assignment to happen, the
-- resulting tensor must be evaluated!
modifyPersistent :: T s t -> T s t -> T s t
-- | Placeholder (to fill)
placeholder :: forall t s. (KnownShape s, KnownTyp t) => String -> Gen (T s t)
-- | Name a tensor so that it is made available for session.run.
peekAt :: String -> Tensor s t -> Gen ()
peekAtMany :: String -> HTV t xs -> Gen ()
-- | Zeros
zeros :: forall t (shape :: Shape). KnownShape shape => KnownTyp t => (T shape t)
-- | Ones
ones :: forall t (shape :: Shape). KnownShape shape => KnownTyp t => (T shape t)
-- | Constant
constant :: forall s w. KnownShape s => KnownBits w => Float -> T s (Typ Float w)
round :: forall s t. Tensor s (Typ Float t) -> Tensor s (Typ Float t)
sigmoid :: forall s t. Tensor s (Typ Float t) -> Tensor s (Typ Float t)
tanh :: forall s t. Tensor s (Typ Float t) -> Tensor s (Typ Float t)
log :: forall s t. Tensor s (Typ Float t) -> Tensor s (Typ Float t)
relu :: forall s t. Tensor s (Typ Float t) -> Tensor s (Typ Float t)
floor :: forall s t. Tensor s (Typ Float t) -> Tensor s (Typ Float t)
negate :: forall s t. T s t -> T s t
-- | Add two tensors, broacasting along shape s
add :: forall s d t. Tensor (d ++ s) t -> Tensor d t -> Tensor (d ++ s) t
-- | Add two tensors, broacasting along shape s
(+) :: forall (d :: Shape) (s :: Shape) t. Tensor (d ++ s) t -> Tensor d t -> Tensor (d ++ s) t
infixl 6 +
-- | Indexwise operator
(⊕) :: forall (s :: Shape) t. Tensor s t -> Tensor s t -> Tensor s t
infixl 6 ⊕
-- | Indexwise operator
(⊝) :: forall (s :: Shape) t. Tensor s t -> Tensor s t -> Tensor s t
infixl 6 ⊝
-- | Indexwise operator
(⊙) :: forall (s :: Shape) t. Tensor s t -> Tensor s t -> Tensor s t
infixl 7 ⊙
-- | Indexwise operator
(⊘) :: forall (s :: Shape) t. Tensor s t -> Tensor s t -> Tensor s t
infixl 7 ⊘
-- | Indexwise equality test.
equal :: Tensor d t -> Tensor d t -> Tensor d TFBool
-- | Product of a matrix of weight with a (batched) vector .
(∙) :: Tensor '[cols, rows] t -> Tensor '[cols, batchSize] t -> Tensor '[rows, batchSize] t
infixl 7 ∙
-- | Dot product between two batched vectors.
(·) :: forall cols batchSize t. Tensor '[cols, batchSize] t -> Tensor '[cols, batchSize] t -> Tensor '[batchSize] t
infixl 7 `·`
-- | Matrix multiplication (note that shape s is preserved)
matmul :: Tensor (o : (n : s)) t -> Tensor (m : (o : s)) t -> Tensor (m : (n : s)) t
-- | Mean value of the input tensor.
reduceMeanAll :: forall (s :: Shape) t. Tensor s t -> Tensor '[] t
-- | Mean value of the input tensor.
reduceSumAll :: forall (s :: Shape) t. Tensor s t -> Tensor '[] t
-- | Sum along a given dimension
reduceSum :: forall n s t. (KnownLen s, KnownPeano n) => T s t -> T (Take n s ++ Drop (Succ n) s) t
-- | Sum along a given dimension
reduceMean :: forall n s t. (KnownLen s, KnownPeano n) => T s t -> T (Take n s ++ Drop (Succ n) s) t
-- | Argmax along dimension n
argmax :: forall n u m s t. (KnownLen s, KnownPeano n, KnownBits u) => Tensor (Take n s ++ (m : Drop n s)) t -> Tensor s (Typ Int u)
-- | Argmax along the first dimension
argmax0 :: forall u n s t. (KnownLen s, KnownBits u) => T (n : s) t -> T s (Typ Int u)
-- | Argmax along the second dimension
argmax1 :: forall u m n s t. (KnownLen s, KnownBits u) => T (m : (n : s)) t -> T (m : s) (Typ Int u)
-- | Softmax along the first dimension
softmax0 :: T (n : s) (Typ Float w) -> T (n : s) (Typ Float w)
-- | Softmax along the second dimension
softmax1 :: forall n m s w. KnownLen s => T (m : (n : s)) (Typ Float w) -> T (m : (n : s)) (Typ Float w)
-- | Gradient of wrt. given parameters.
grad :: T s Float32 -> UntypedExpression -> UntypedExpression
-- | Clip a gradient
clipByGlobalNorm :: Float -> UntypedExpression -> UntypedExpression
-- | Clip a tensor
clipByValue :: Float -> Float -> T s (Flt t) -> T s (Flt t)
-- | Access the last element in a tensor (in the 0th dimension)
last0 :: forall n s t. KnownNat n => KnownLen s => T (n : s) t -> Tensor s t
-- | Access the nth element in a tensor (in the 0th dimension)
nth0 :: forall n s t. KnownLen s => Integer -> T (n : s) t -> Tensor s t
-- | Access the nth element in a tensor (in the 0th dimension), with a
-- static index
nth0' :: forall n m s t. KnownNat n => KnownLen s => n < m => T (m : s) t -> Tensor s t
-- | (gather x ix)[k] = x[ix[k]]. See
-- https://www.tensorflow.org/api_docs/python/tf/gather
gather :: forall s n indexShape t. T (s ++ '[n]) t -> T indexShape Int32 -> T (s ++ indexShape) t
-- | Split a tensor on the first dimension
split0 :: forall n m batchShape t. (KnownNat n, KnownNat m, KnownLen batchShape) => Tensor ((n + m) : batchShape) t -> Gen (Tensor (n : batchShape) t, Tensor (m : batchShape) t)
-- | Take a slice at dimension n from i to j.
slice :: forall n i j s t. KnownNat j => KnownNat i => (i < j, j <= At n s, KnownPeano n, KnownLen s) => Tensor s t -> Tensor (Take n s ++ ((j - i) : Drop (Succ n) s)) t
slice1 :: forall i j m n s t. KnownNat j => KnownNat i => (i < j, j <= m, KnownLen s) => Tensor (n : (m : s)) t -> Tensor (n : ((j - i) : s)) t
-- | Concatenate n tensors along the first dimension
stack0 :: forall s (n :: Nat) t. (KnownLen s) => V n (T s t) -> Tensor (n : s) t
-- | Split a tensors into n tensors along the first dimension
unstack0 :: forall s (n :: Nat) t. (KnownLen s, KnownNat n) => Tensor (n : s) t -> Gen (V n (T s t))
-- | Concatenate n tensors along the last dimension
stackN :: forall s (n :: Nat) t. V n (T s t) -> Tensor (s ++ '[n]) t
-- | Concatenate n tensors along the first dimension
stack1 :: forall s (n :: Nat) m t. (KnownLen s) => V n (T (m : s) t) -> Tensor (m : (n : s)) t
-- | Concatenate tensors on dimension n
concatT :: forall n d1 d2 s t. (KnownPeano n, KnownLen s, (d1 + d2) ~ At n s) => T (Take n s ++ (d1 : Drop (Succ n) s)) t -> T (Take n s ++ (d2 : Drop (Succ n) s)) t -> T s t
-- | Concatenate tensors on the first dimension
concat0 :: forall ys d1 d2 t. (KnownLen ys) => T (d1 : ys) t -> T (d2 : ys) t -> T ((d1 + d2) : ys) t
-- | Concatenate tensors on the second dimension
concat1 :: forall n ys d1 d2 t. (KnownLen ys) => T (n : (d1 : ys)) t -> T (n : (d2 : ys)) t -> T (n : ((d1 + d2) : ys)) t
-- | Add an extra dimension at axis (n) of size 1.
expandDim :: forall n s t. (KnownLen s, KnownPeano n) => Tensor s t -> Tensor (Take n s ++ (1 : Drop n s)) t
-- | Add an extra dimension at axis (0) of size 1.
expandDim0 :: forall s t. KnownLen s => Tensor s t -> Tensor (1 : s) t
-- | Remove the first dimension if its size is 1.
squeeze0 :: forall s t. KnownLen s => Tensor (1 : s) t -> Tensor s t
-- | Add an extra dimension at axis (1) of size 1.
expandDim1 :: forall n s t. KnownShape s => Tensor (n : s) t -> Tensor (n : (1 : s)) t
-- | Remove the second dimension if its size is 1.
squeeze1 :: forall n s t. KnownLen s => Tensor (n : (1 : s)) t -> Tensor (n : s) t
-- | Reshape a tensor so that the first two dimensions are collapsed
flatten2 :: forall m n s t. (KnownNat m, KnownNat n, KnownShape s) => Tensor (m : (n : s)) t -> Tensor ((m * n) : s) t
-- | Reshape a tensor so that the first dimension is expanded into two.
inflate2 :: forall m n s t. (KnownNat m, KnownNat n, KnownShape s) => Tensor ((m * n) : s) t -> Tensor (m : (n : s)) t
-- | Reshape a tensor so that the last two dimensions are collapsed
flattenN2 :: forall s m n t. (KnownNat m, KnownNat n, KnownShape s) => Tensor (s ++ '[m, n]) t -> Tensor (s ++ '[m * n]) t
-- | Reshape a tensor so that the first three dimensions are collapsed
flatten3 :: forall m n o s t. (KnownNat m, KnownNat n, KnownNat o, KnownShape s) => Tensor (m : (n : (o : s))) t -> Tensor (((m * n) * o) : s) t
-- | Reshape a tensor so that the first dimension is expanded into three.
inflate3 :: forall m n o s t. (KnownNat m, KnownNat n, KnownNat o, KnownShape s) => Tensor (((m * n) * o) : s) t -> Tensor (m : (n : (o : s))) t
reshape :: forall s2 s1 t. KnownShape s2 => Product s1 ~ Product s2 => Tensor s1 t -> Tensor s2 t
-- | Flatten all the dimensions of the tensor
flattenAll :: forall s t. KnownShape s => Tensor s t -> Tensor '[Product s] t
inflateAll :: forall s t. KnownShape s => Tensor '[Product s] t -> Tensor s t
-- | Transposition. See the type for the permutation of dimensions.
transpose :: forall s t. T (Reverse s) t -> T s t
-- | Transposition. See the type for the permutation of dimensions.
transposeN :: forall s n t. KnownLen s => T (n : s) t -> T (s ++ '[n]) t
-- | Transposition. See the type for the permutation of dimensions.
transposeN' :: forall s n t. KnownLen s => T (s ++ '[n]) t -> T (n : s) t
-- | Transposition. See the type for the permutation of dimensions.
transpose01 :: forall s m n t. KnownLen s => T (m : (n : s)) t -> T (n : (m : s)) t
-- | Transposition. See the type for the permutation of dimensions.
transposeN01 :: forall s m n t. T (s ++ '[m, n]) t -> T (s ++ '[n, m]) t
-- | Reverse sequences. See
-- https://www.tensorflow.org/api_docs/python/tf/reverse_sequence
reverseSequences :: forall bs n x t. KnownLen x => LastEqual bs x => T '[bs] Int32 -> T (n : x) t -> T (n : x) t
-- | Generate a mask of given length for each sequence.
sequenceMask :: forall maxlen bs. KnownNat maxlen => Tensor '[bs] Int32 -> Tensor '[maxlen, bs] TFBool
-- | Cast the element type.
cast :: forall u s t. KnownTyp u => T s t -> T s u
-- | Size-preserving convolution operation.
convolution :: forall outputChannels filterSpatialShape inChannels s t. KnownLen filterSpatialShape => Length filterSpatialShape <= 3 => ((1 + Length filterSpatialShape) ~ Length s) => T ('[inChannels] ++ s) t -> T ('[outputChannels, inChannels] ++ filterSpatialShape) t -> T ('[outputChannels] ++ s) t
-- | One hot vector along axis n
oneHot :: forall n numClasses s w t. KnownNat numClasses => KnownBits t => (KnownLen s, KnownPeano n) => Tensor s (Typ Int w) -> Tensor (Take n s ++ (numClasses : Drop n s)) (Flt t)
-- | One hot vector along axis 0
oneHot0 :: forall numClasses w batchSize t. KnownNat numClasses => KnownBits t => Tensor '[batchSize] (Typ Int w) -> Tensor '[numClasses, batchSize] (Flt t)
-- | One hot vector along axis 1
oneHot1 :: forall numClasses w batchSize m t. KnownNat numClasses => KnownBits t => Tensor '[m, batchSize] (Typ Int w) -> Tensor '[m, numClasses, batchSize] (Flt t)
-- | Selection of a tensor (note: this is a strict operation)
if_ :: Scalar TFBool -> T s t -> T s t -> T s t
-- | (where_ c x y)[i] = if c[i] then x[i] else y[i]
where_ :: T s TFBool -> T s t -> T s t -> T s t
-- | Map a function along the first dimension of a tensor
mapT :: forall s t r u n. KnownTyp u => KnownLen r => KnownLen s => (T s t -> T r u) -> T (n : s) t -> Gen (T (n : r) u)
-- | Map a function along the last dimension of a tensor
mapTN :: forall n s t r u. KnownTyp u => (T s t -> T r u) -> T (s ++ '[n]) t -> Gen (T (r ++ '[n]) u)
zipWithT :: forall (s :: [Nat]) (t :: Typ) (s1 :: [Nat]) (t1 :: Typ) (s2 :: Shape) (n :: Nat) (t2 :: Typ). KnownNat n => (KnownLen s, KnownLen s2, KnownLen s1) => KnownTyp t2 => (T s t -> T s1 t1 -> T s2 t2) -> Tensor (n : s) t -> Tensor (n : s1) t1 -> Gen (Tensor (n : s2) t2)
zipWithTN :: forall (n :: Nat) (s :: [Nat]) (t :: Typ) (s1 :: [Nat]) (t1 :: Typ) (s2 :: Shape) (t2 :: Typ). KnownTyp t2 => (T s t -> T s1 t1 -> T s2 t2) -> Tensor (s ++ '[n]) t -> Tensor (s1 ++ '[n]) t1 -> Gen (Tensor (s2 ++ '[n]) t2)
-- | Computes sigmoid cross entropy given logits. Measures the probability
-- error in discrete classification tasks in which each class is
-- independent and not mutually exclusive. For instance, one could
-- perform multilabel classification where a picture can contain both an
-- elephant and a dog at the same time. See
-- https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
sigmoidCrossEntropyWithLogits :: Tensor s (Flt w) -> Tensor s (Flt w) -> Tensor s (Flt w)
-- | (dense) softmax cross entropy with logits.
softmaxCrossEntropyWithLogits :: Tensor '[numClasses, batchSize] Float32 -> Tensor '[numClasses, batchSize] Float32 -> Tensor '[batchSize] Float32
-- | sparse softmax cross entropy with logits.
sparseSoftmaxCrossEntropyWithLogits :: Tensor s Int32 -> Tensor (numClasses : s) (Flt t) -> Tensor s (Flt t)
-- | Generate a random tensor where each individual element is picked in a
-- normal distribution with given standard deviation.
truncatedNormal :: forall s w. KnownShape s => KnownBits w => Float -> T s (Typ Float w)
-- | Generate a random tensor where each individual element is picked in a
-- uniform distribution with given bounds.
randomUniform :: forall s t. (KnownShape s, KnownTyp t) => Float -> Float -> T s t
-- | Generate an orthorgonal matrix. If the output has more dimensions than
-- 2 the matrix is reshaped.
randomOrthogonal :: forall n s t. (KnownBits t, KnownNat n, KnownShape s) => T (n : s) (Typ Float t)
-- | Random tensor with variance scaling according to deeplearning lore.
varianceScaling :: forall inDim outDim t. KnownNat inDim => (KnownNat outDim, KnownBits t) => Float -> VarianceScaleMode -> Distrib -> Tensor '[inDim, outDim] (Typ Float t)
glorotUniform :: forall inDim outDim t. KnownNat inDim => (KnownNat outDim, KnownBits t) => Tensor '[inDim, outDim] (Typ Float t)
-- | Repeat a flexible-shape constant vector to form a heterogeneous tensor
-- vector.
repeatT :: forall (ss :: [Shape]) t. All KnownShape ss => KnownLen ss => (forall s. KnownShape s => T s t) -> HTV t ss
flattenHTV :: KnownTyp t => All KnownShape xs => HTV t xs -> Tensor '[Sum (Ap (FMap CProduct) xs)] t
inflateHTV :: forall xs s t. (All KnownShape xs, KnownLen s, KnownLen xs) => Tensor '[Sum (Ap (FMap CProduct) xs)] t -> Gen (HTV t xs)
class KnownTensors p
-- | traverse all the tensors over tuples of tensors
travTensor :: KnownTensors p => (forall s t. (KnownTyp t, KnownShape s) => String -> T s t -> Gen (T s t)) -> String -> p -> Gen p
class LastEqual x xs
instance forall a (x :: a). TypedFlow.TF.LastEqual x '[x]
instance forall a k (x :: k) (y2 :: a) (xs :: [a]) (y :: a). TypedFlow.TF.LastEqual x (y2 : xs) => TypedFlow.TF.LastEqual x (y : y2 : xs)
instance (TypedFlow.Types.KnownTyp t, TypedFlow.Types.KnownShape shape) => TypedFlow.TF.KnownTensors (TypedFlow.Types.T shape t)
instance (TypedFlow.Types.KnownTyp t, TypedFlow.Types.All TypedFlow.Types.KnownShape ys) => TypedFlow.TF.KnownTensors (TypedFlow.Types.HTV t ys)
instance (TypedFlow.TF.KnownTensors p, TypedFlow.TF.KnownTensors q) => TypedFlow.TF.KnownTensors (p, q)
instance (TypedFlow.TF.KnownTensors p1, TypedFlow.TF.KnownTensors p2, TypedFlow.TF.KnownTensors p3) => TypedFlow.TF.KnownTensors (p1, p2, p3)
instance (TypedFlow.TF.KnownTensors p1, TypedFlow.TF.KnownTensors p2, TypedFlow.TF.KnownTensors p3, TypedFlow.TF.KnownTensors p4) => TypedFlow.TF.KnownTensors (p1, p2, p3, p4)
instance TypedFlow.Types.Fun TypedFlow.TF.CProduct
module TypedFlow.Learn
-- | First type argument is the number of classes. categorical logits
-- gold return (prediction, accuraccy, loss) accuracy and prediction
-- are averaged over the batch.
categorical :: forall nCat bs. KnownNat nCat => Model '[nCat, bs] Float32 '[bs] Int32
-- | First type argument is the number of classes.
-- categoricalDistribution logits gold return (prediction,
-- accuraccy, loss) accuracy and prediction are averaged over the batch.
categoricalDistribution :: forall nCat bs. Model '[nCat, bs] Float32 '[nCat, bs] Float32
-- |
-- timedCategorical targetWeights logits y
--
--
-- targetWeights: a zero-one matrix of the same size as decoder_outputs.
-- It is intended to mask padding positions outside of the target
-- sequence lengths with values 0.
timedCategorical :: forall len nCat bs bits. KnownNat nCat => KnownNat bs => KnownNat len => KnownBits bits => Tensor '[len, bs] (Flt bits) -> Tensor '[len, nCat, bs] (Flt bits) -> Tensor '[len, bs] Int32 -> Gen (ModelOutput '[len, nCat, bs] (Flt bits))
-- | Triple of values that are always output in a model: prediction, loss
-- and accuracy.
data ModelOutput s t
ModelOutput :: T s t -> Scalar Float32 -> Scalar Float32 -> ModelOutput s t
-- | prediction
[modelY] :: ModelOutput s t -> T s t
[modelLoss] :: ModelOutput s t -> Scalar Float32
[modelAccuracy] :: ModelOutput s t -> Scalar Float32
-- | A standard modelling function: (input value, gold value) ↦
-- (prediction, accuracy, loss)
type Model input tIn output tOut = T input tIn -> T output tOut -> Gen (ModelOutput output tOut)
-- | Model with several binary outputs.
binary :: forall n bs. (KnownNat bs) => Model '[n, bs] Float32 '[n, bs] Int32
-- | Model compiler options
data Options
Options :: Maybe Float -> Options
-- | apply gradient clipping
[maxGradientNorm] :: Options -> Maybe Float
-- | default model compiler options
defaultOptions :: Options
-- | compile a standard model
compile :: forall sx tx sy ty sy_ ty_. (KnownShape sx, KnownTyp tx, KnownShape sy, KnownTyp ty, KnownShape sy_) => Options -> (Tensor sx tx -> Tensor sy ty -> Gen (ModelOutput sy_ ty_)) -> Gen ()
-- | Generic a model with non-standard parameters ("x" and "y" must be
-- provided as placeholders manually).
compileGen :: forall sy ty. (KnownShape sy) => Options -> Gen (ModelOutput sy ty) -> Gen ()
module TypedFlow.Layers.Core
-- | A dense layer is a linear function form a to b: a transformation
-- matrix and a bias.
data DenseP t a b
DenseP :: Tensor '[a, b] (Flt t) -> Tensor '[b] (Flt t) -> DenseP t a b
[denseWeights] :: DenseP t a b -> Tensor '[a, b] (Flt t)
[denseBiases] :: DenseP t a b -> Tensor '[b] (Flt t)
-- | Dense layer (Apply a linear function)
dense :: forall m n batchSize t. DenseP t n m -> Tensor '[n, batchSize] (Flt t) -> Tensor '[m, batchSize] (Flt t)
-- | Dense layer (Apply a linear function)
(#) :: forall m n batchSize t. DenseP t n m -> Tensor '[n, batchSize] (Flt t) -> Tensor '[m, batchSize] (Flt t)
-- | A drop probability. (This type is used to make sure one does not
-- confuse keep probability and drop probability)
data DropProb
DropProb :: Float -> DropProb
-- | Generate a dropout function. The mask applied by the returned function
-- will be constant for any given call to mkDropout. This behavior allows
-- to use the same mask in the several steps of an RNN.
mkDropout :: forall s t. KnownShape s => KnownBits t => DropProb -> Gen (Tensor s (Typ Float t) -> Tensor s (Typ Float t))
-- | Generate a dropout function for an heterogeneous tensor vector.
mkDropouts :: KnownBits t => KnownLen shapes => All KnownShape shapes => DropProb -> Gen (HTV (Typ Float t) shapes -> HTV (Typ Float t) shapes)
-- | Parameters for the embedding layers
newtype EmbeddingP numObjects embeddingSize t
EmbeddingP :: (Tensor '[numObjects, embeddingSize] (Typ Float t)) -> EmbeddingP numObjects embeddingSize t
-- | embedding layer
embedding :: forall embeddingSize numObjects batchSize t. EmbeddingP numObjects embeddingSize t -> Tensor '[batchSize] Int32 -> Tensor '[embeddingSize, batchSize] (Typ Float t)
data ConvP t outChannels inChannels filterSpatialShape
ConvP :: (T ('[outChannels, inChannels] ++ filterSpatialShape) (Typ Float t)) -> (T '[outChannels] (Typ Float t)) -> ConvP t outChannels inChannels filterSpatialShape
-- | Size-preserving convolution layer
conv :: forall outChannels filterSpatialShape inChannels s t. ((1 + Length filterSpatialShape) ~ Length s, Length filterSpatialShape <= 3, KnownLen filterSpatialShape) => ConvP t outChannels inChannels filterSpatialShape -> T ('[inChannels] ++ s) (Typ Float t) -> (T ('[outChannels] ++ s) (Typ Float t))
-- | 2 by 2 maxpool layer.
maxPool2D :: forall stridex (stridey :: Nat) batch height width channels t. (KnownNat stridex, KnownNat stridey) => T '[channels, width * stridex, height * stridex, batch] (Flt t) -> T '[channels, width, height, batch] (Flt t)
instance (GHC.TypeLits.KnownNat numObjects, TypedFlow.Types.KnownBits b, GHC.TypeLits.KnownNat embeddingSize) => TypedFlow.TF.KnownTensors (TypedFlow.Layers.Core.EmbeddingP numObjects embeddingSize b)
instance (GHC.TypeLits.KnownNat numObjects, TypedFlow.Types.KnownBits b, GHC.TypeLits.KnownNat embeddingSize) => TypedFlow.TF.ParamWithDefault (TypedFlow.Layers.Core.EmbeddingP numObjects embeddingSize b)
instance (GHC.TypeLits.KnownNat a, GHC.TypeLits.KnownNat b, TypedFlow.Types.KnownBits t) => TypedFlow.TF.KnownTensors (TypedFlow.Layers.Core.DenseP t a b)
instance (GHC.TypeLits.KnownNat n, GHC.TypeLits.KnownNat m, TypedFlow.Types.KnownBits b) => TypedFlow.TF.ParamWithDefault (TypedFlow.Layers.Core.DenseP b n m)
instance (GHC.TypeLits.KnownNat outChannels, GHC.TypeLits.KnownNat inChannels, TypedFlow.Types.KnownShape filterSpatialShape, TypedFlow.Types.KnownBits t) => TypedFlow.TF.ParamWithDefault (TypedFlow.Layers.Core.ConvP t outChannels inChannels filterSpatialShape)
instance (GHC.TypeLits.KnownNat outChannels, GHC.TypeLits.KnownNat inChannels, TypedFlow.Types.KnownShape filterSpatialShape, TypedFlow.Types.KnownBits t) => TypedFlow.TF.KnownTensors (TypedFlow.Layers.Core.ConvP t outChannels inChannels filterSpatialShape)
module TypedFlow.Layers.RNN
-- | A cell in an rnn. state is the state propagated through time.
type RnnCell t states input output = (HTV (Flt t) states, input) -> Gen (HTV (Flt t) states, output)
-- | A layer in an rnn. n is the length of the time sequence.
-- state is the state propagated through time.
type RnnLayer b n state input t output u = HTV (Flt b) state -> Tensor (n : input) t -> Gen (HTV (Flt b) state, Tensor (n : output) u)
-- | Stack two RNN cells (LHS is run first)
stackRnnCells :: forall s0 s1 a b c t. KnownLen s0 => RnnCell t s0 a b -> RnnCell t s1 b c -> RnnCell t (s0 ++ s1) a c
-- | Stack two RNN cells (LHS is run first)
(.-.) :: forall s0 s1 a b c t. KnownLen s0 => RnnCell t s0 a b -> RnnCell t s1 b c -> RnnCell t (s0 ++ s1) a c
-- | Compose two rnn layers. This is useful for example to combine forward
-- and backward layers.
stackRnnLayers :: forall s1 s2 a t b u c v n bits. KnownLen s1 => RnnLayer bits n s1 a t b u -> RnnLayer bits n s2 b u c v -> RnnLayer bits n (s1 ++ s2) a t c v
-- | Compose two rnn layers. This is useful for example to combine forward
-- and backward layers.
(.--.) :: forall s1 s2 a t b u c v n bits. KnownLen s1 => RnnLayer bits n s1 a t b u -> RnnLayer bits n s2 b u c v -> RnnLayer bits n (s1 ++ s2) a t c v
infixr 9 .--.
-- | Compose two rnn layers in parallel.
bothRnnLayers :: forall s1 s2 a t b u c n bs bits. KnownLen s1 => RnnLayer bits n s1 a t '[b, bs] u -> RnnLayer bits n s2 a t '[c, bs] u -> RnnLayer bits n (s1 ++ s2) a t '[b + c, bs] u
-- | Compose two rnn layers in parallel.
(.++.) :: forall s1 s2 a t b u c n bs bits. KnownLen s1 => RnnLayer bits n s1 a t '[b, bs] u -> RnnLayer bits n s2 a t '[c, bs] u -> RnnLayer bits n (s1 ++ s2) a t '[b + c, bs] u
infixr 9 .++.
-- | Run the cell, and forward the input to the output, by concatenation
-- with the output of the cell.
withBypass :: RnnCell b s0 (T '[x, bs] t) (T '[y, bs] t) -> RnnCell b s0 (T '[x, bs] t) (T '[x + y, bs] t)
-- | Apply a function on the cell state(s) before running the cell itself.
onStates :: (HTV (Flt t) xs -> HTV (Flt t) xs) -> RnnCell t xs a b -> RnnCell t xs a b
-- | Convert a pure function (feed-forward layer) to an RNN cell by
-- ignoring the RNN state.
timeDistribute :: (a -> b) -> RnnCell t '[] a b
-- | Convert a stateless generator into an RNN cell by ignoring the RNN
-- state.
timeDistribute' :: (a -> Gen b) -> RnnCell t '[] a b
-- | Standard RNN gate initializer. (The recurrent kernel is orthogonal to
-- avoid divergence; the input kernel is glorot)
cellInitializerBit :: forall n x t. (KnownNat n, KnownNat x, KnownBits t) => DenseP t (n + x) n
-- | Parameter for an LSTM
data LSTMP t n x
LSTMP :: (DenseP t (n + x) n) -> (DenseP t (n + x) n) -> (DenseP t (n + x) n) -> (DenseP t (n + x) n) -> LSTMP t n x
-- | Standard LSTM
lstm :: forall n x bs t. LSTMP t n x -> RnnCell t '['[n, bs], '[n, bs]] (Tensor '[x, bs] (Flt t)) (Tensor '[n, bs] (Flt t))
-- | Parameter for a GRU
data GRUP t n x
GRUP :: (T '[n + x, n] (Typ Float t)) -> (T '[n + x, n] (Typ Float t)) -> (T '[n + x, n] (Typ Float t)) -> GRUP t n x
-- | Standard GRU cell
gru :: forall n x bs t. (KnownNat bs, KnownNat n, KnownBits t) => GRUP t n x -> RnnCell t '['[n, bs]] (Tensor '[x, bs] (Flt t)) (Tensor '[n, bs] (Flt t))
-- | Build a RNN by repeating a cell n times.
rnn :: forall n state input output t u b. (KnownNat n, KnownShape input, KnownShape output) => RnnCell b state (T input t) (T output u) -> RnnLayer b n state input t output u
-- | Build a RNN by repeating a cell n times. However the state is
-- propagated in the right-to-left direction (decreasing indices in the
-- time dimension of the input and output tensors)
rnnBackward :: forall n state input output t u b. (KnownNat n, KnownShape input, KnownShape output) => RnnCell b state (T input t) (T output u) -> RnnLayer b n state input t output u
-- | Like rnnWithCull, but states are threaded backwards.
rnnBackwardsWithCull :: forall n bs x y t u ls b. KnownLen ls => KnownNat n => KnownLen x => KnownLen y => All KnownLen ls => All (LastEqual bs) ls => LastEqual bs x => LastEqual bs y => T '[bs] Int32 -> RnnCell b ls (T x t) (T y u) -> RnnLayer b n ls x t y u
-- | rnnWithCull dynLen constructs an RNN as normal, but returns
-- the state after step dynLen only.
rnnWithCull :: forall n bs x y t u ls b. KnownLen ls => KnownNat n => KnownLen x => KnownLen y => All KnownLen ls => All (LastEqual bs) ls => T '[bs] Int32 -> RnnCell b ls (T x t) (T y u) -> RnnLayer b n ls x t y u
-- | An attention scoring function. This function should produce a score
-- (between 0 and 1) for each of the nValues entries of size
-- valueSize.
type AttentionScoring t batchSize keySize valueSize nValues = Tensor '[keySize, batchSize] (Typ Float t) -> Tensor '[nValues, valueSize, batchSize] (Typ Float t) -> Tensor '[nValues, batchSize] (Typ Float t)
-- | Multiplicative scoring function
multiplicativeScoring :: forall valueSize keySize batchSize nValues t. KnownNat batchSize => T '[keySize, valueSize] (Typ Float t) -> AttentionScoring t batchSize keySize valueSize nValues
data AdditiveScoringP sz keySize valueSize t
AdditiveScoringP :: (Tensor '[sz, 1] (Typ Float t)) -> (Tensor '[keySize, sz] (Typ Float t)) -> (Tensor '[valueSize, sz] (Typ Float t)) -> AdditiveScoringP sz keySize valueSize t
-- | An additive scoring function. See
-- https://arxiv.org/pdf/1412.7449.pdf
additiveScoring :: forall sz keySize valueSize t nValues batchSize. KnownNat sz => KnownNat keySize => (KnownNat nValues, KnownNat batchSize) => AdditiveScoringP sz keySize valueSize t -> AttentionScoring t batchSize valueSize keySize nValues
-- | A function which attends to an external input. Typically a function of
-- this type is a closure which has the attended input in its
-- environment.
type AttentionFunction t batchSize keySize valueSize = T '[keySize, batchSize] (Flt t) -> Gen (T '[valueSize, batchSize] (Flt t))
-- | attnExample1 θ h st combines each element of the vector h
-- with s, and applies a dense layer with parameters θ. The "winning"
-- element of h (using softmax) is returned.
uniformAttn :: forall valueSize m keySize batchSize t. KnownNat m => KnownBits t => AttentionScoring t batchSize keySize valueSize m -> T '[batchSize] Int32 -> T '[m, valueSize, batchSize] (Flt t) -> AttentionFunction t batchSize keySize valueSize
-- | Luong attention function (following
-- https://github.com/tensorflow/nmt#background-on-the-attention-mechanism
-- commit 75aa22dfb159f10a1a5b4557777d9ff547c1975a). Essentially a dense
-- layer with tanh activation, on top of uniform attention.
luongAttention :: forall attnSize d m e batchSize w. KnownNat m => KnownBits w => Tensor '[d + e, attnSize] (Flt w) -> AttentionScoring w batchSize e d m -> Tensor '[batchSize] Int32 -> T '[m, d, batchSize] (Flt w) -> AttentionFunction w batchSize e attnSize
-- | Add some attention to an RnnCell, and feed the attention vector to the
-- next iteration in the rnn. (This follows the diagram at
-- https://github.com/tensorflow/nmt#background-on-the-attention-mechanism
-- commit 75aa22dfb159f10a1a5b4557777d9ff547c1975a).
attentiveWithFeedback :: forall attSize cellSize inputSize bs w ss. AttentionFunction w bs cellSize attSize -> RnnCell w ss (T '[inputSize + attSize, bs] (Flt w)) (T '[cellSize, bs] (Flt w)) -> RnnCell w ('[attSize, bs] : ss) (T '[inputSize, bs] (Flt w)) (T '[attSize, bs] (Flt w))
instance (GHC.TypeLits.KnownNat n, GHC.TypeLits.KnownNat x, TypedFlow.Types.KnownBits t) => TypedFlow.TF.KnownTensors (TypedFlow.Layers.RNN.LSTMP t n x)
instance (GHC.TypeLits.KnownNat n, GHC.TypeLits.KnownNat x, TypedFlow.Types.KnownBits t) => TypedFlow.TF.ParamWithDefault (TypedFlow.Layers.RNN.LSTMP t n x)
instance (GHC.TypeLits.KnownNat n, GHC.TypeLits.KnownNat x, TypedFlow.Types.KnownBits t) => TypedFlow.TF.KnownTensors (TypedFlow.Layers.RNN.GRUP t n x)
instance (GHC.TypeLits.KnownNat n, GHC.TypeLits.KnownNat x, TypedFlow.Types.KnownBits t) => TypedFlow.TF.ParamWithDefault (TypedFlow.Layers.RNN.GRUP t n x)
instance (GHC.TypeLits.KnownNat n, GHC.TypeLits.KnownNat k, GHC.TypeLits.KnownNat v, TypedFlow.Types.KnownBits t) => TypedFlow.TF.KnownTensors (TypedFlow.Layers.RNN.AdditiveScoringP k v n t)
instance (GHC.TypeLits.KnownNat n, GHC.TypeLits.KnownNat k, GHC.TypeLits.KnownNat v, TypedFlow.Types.KnownBits t) => TypedFlow.TF.ParamWithDefault (TypedFlow.Layers.RNN.AdditiveScoringP k v n t)
module TypedFlow.Layers
-- | This module re-exports all functions.
module TypedFlow