Safe Haskell | None |
---|---|
Language | Haskell98 |
- data LSTM :: Nat -> Nat -> * where
- LSTM :: (KnownNat input, KnownNat output) => !(LSTMWeights input output) -> !(LSTMWeights input output) -> LSTM input output
- data LSTMWeights :: Nat -> Nat -> * where
- LSTMWeights :: (KnownNat input, KnownNat output) => {..} -> LSTMWeights input output
- randomLSTM :: forall m i o. (MonadRandom m, KnownNat i, KnownNat o) => m (LSTM i o)
Documentation
data LSTM :: Nat -> Nat -> * where Source #
Long Short Term Memory Recurrent unit
This is a Peephole formulation, so the recurrent shape is just the cell state, the previous output is not held or used at all.
LSTM :: (KnownNat input, KnownNat output) => !(LSTMWeights input output) -> !(LSTMWeights input output) -> LSTM input output |
Show (LSTM i o) Source # | |
(KnownNat i, KnownNat o) => Serialize (LSTM i o) Source # | |
(KnownNat i, KnownNat o) => UpdateLayer (LSTM i o) Source # | |
(KnownNat i, KnownNat o) => RecurrentUpdateLayer (LSTM i o) Source # | |
(KnownNat i, KnownNat o) => RecurrentLayer (LSTM i o) (D1 i) (D1 o) Source # | |
type Gradient (LSTM i o) Source # | |
type RecurrentShape (LSTM i o) Source # | |
type RecTape (LSTM i o) (D1 i) (D1 o) Source # | |
data LSTMWeights :: Nat -> Nat -> * where Source #
LSTMWeights :: (KnownNat input, KnownNat output) => {..} -> LSTMWeights input output | |
randomLSTM :: forall m i o. (MonadRandom m, KnownNat i, KnownNat o) => m (LSTM i o) Source #
Generate an LSTM layer with random Weights one can also just call createRandom from UpdateLayer
Has forget gate biases set to 1 to encourage early learning.
https://github.com/karpathy/char-rnn/commit/0dfeaa454e687dd0278f036552ea1e48a0a408c9