hasktorch-indef-0.0.1.0: Core Hasktorch abstractions wrapping FFI bindings

Copyright(c) Sam Stites 2017
LicenseBSD3
Maintainersam@stites.io
Stabilityexperimental
Portabilitynon-portable
Safe HaskellNone
LanguageHaskell2010

Torch.Indef.Static.NN

Description

 
Synopsis

Documentation

unsqueeze1dBP :: forall s d rs ls n. Reifies s W => All Dimensions '[d, rs ++ ('[1] ++ ls)] => '(rs, ls) ~ SplitAt n d => '(rs, 1 :+ ls) ~ SplitAt n (rs ++ ('[1] ++ ls)) => (rs ++ ls) ~ d => Dim n -> BVar s (Tensor d) -> BVar s (Tensor (rs ++ ('[1] ++ ls))) Source #

squeeze1dBP :: forall s d rs ls n. Reifies s W => All Dimensions '[d, rs ++ ls] => All KnownDim '[n] => '(rs, 1 :+ ls) ~ SplitAt n d => d ~ (Take n (rs ++ ls) ++ ('[1] ++ Drop n (rs ++ ls))) => Dim n -> BVar s (Tensor d) -> BVar s (Tensor (rs ++ ls)) Source #

Squeeze a dimension of size 1 out of the tensor

flattenBatchIO :: forall d bs. (All KnownDim '[Product d, bs], All Dimensions '[bs :+ d, d]) => Product (bs :+ d) ~ Product '[bs, Product d] => Tensor (bs :+ d) -> IO (Tensor '[bs, Product d], Tensor '[bs, Product d] -> IO (Tensor (bs :+ d))) Source #

A backprop-able flatten operation with a batch dimension in IO

_pReLU_updateOutput :: Tensor d -> Tensor d -> Tensor d -> IO () Source #

pReLU updateOutput

_pReLU_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> IO () Source #

pReLU updateGradInput

_pReLU_accGradParameters :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Double -> IO () Source #

pReLU accGradParameters

_rReLU_updateOutput :: Tensor d -> Tensor d -> Tensor d -> Double -> Double -> Bool -> Bool -> Generator -> IO () Source #

rReLU updateOutput

_rReLU_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Double -> Double -> Bool -> Bool -> IO () Source #

rReLU updateGradInput

_eLU_updateOutput :: Tensor d -> Tensor d -> Double -> Double -> Bool -> IO () Source #

eLU updateOutput

_eLU_updateGradInput :: Tensor d -> Tensor d' -> Tensor d'' -> Double -> Double -> IO () Source #

eLU updateGradInput

_leakyReLU_updateOutput :: Tensor d -> Tensor d -> Double -> Bool -> IO () Source #

leakyReLU updateOutput

_leakyReLU_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Double -> Bool -> IO () Source #

leakyReLU updateGradInput

relu :: Reifies s W => Dimensions d => BVar s (Tensor d) -> BVar s (Tensor d) Source #

ReLU activation function

reluIO :: Dimensions d => Tensor d -> IO (Tensor d, Tensor d -> IO (Tensor d)) Source #

ReLU activation function

threshold Source #

Arguments

:: Reifies s W 
=> Dimensions d 
=> Double

threshold

-> Double

replacement value

-> BVar s (Tensor d)

input

-> BVar s (Tensor d)

output

run a threshold function againts two BVar variables

thresholdIO Source #

Arguments

:: Dimensions d 
=> Double

threshold

-> Double

replacement value

-> Tensor d

input

-> IO (Tensor d, Tensor d -> IO (Tensor d))

output

run a threshold function in IO

_absCriterion_updateOutput Source #

Arguments

:: Tensor d

input

-> Tensor d'

target

-> Tensor d''

output

-> Bool

size average

-> Bool

reduce

-> IO () 

absCriterion forward pass (updates the output tensor)

_absCriterion_updateGradInput Source #

Arguments

:: Tensor d

input

-> Tensor d'

target

-> Tensor d''

gradOutput

-> Tensor d''

gradInput

-> Bool

size average

-> Bool

reduce

-> IO () 

absCriterion backward-update (updates the layer and bias tensors)

bCECriterion' Source #

Arguments

:: (Reifies s W, KnownNat n, KnownDim n) 
=> Bool

sizeAverage (TODO: swap this out with Reduction)

-> Bool

reduce (TODO: swap this out with Reduction)

-> Maybe (Tensor '[n])

weights

-> Tensor '[n]

target

-> BVar s (Tensor '[n])

input

-> BVar s (Tensor '[1])

output

Binary cross-entropy for Sigmoid (two-class version of ClassNLLCriterion)

Creates a criterion that measures the Binary Cross Entropy between the target and the output: loss(o, t) = - 1/n sum_i (t[i] * log(o[i]) + (1 - t[i]) * log(1 - o[i])) or in the case of the weights argument being specified: loss(o, t) = - 1/n sum_i weights[i] * (t[i] * log(o[i]) + (1 - t[i]) * log(1 - o[i])) This is used for measuring the error of a reconstruction in for example an auto-encoder. Note that the outputs o[i] should be numbers between 0 and 1, for instance, the output of an nn.Sigmoid layer and should be interpreted as the probability of predicting t[i] = 1. Note t[i] can be either 0 or 1.

By default, the losses are averaged for each minibatch over observations as well as over dimensions. However, if the field sizeAverage is set to false, the losses are instead summed.

bCECriterion Source #

Arguments

:: (Reifies s W, KnownNat n, KnownDim n) 
=> Tensor '[n]

target

-> BVar s (Tensor '[n])

input

-> BVar s (Tensor '[1])

output

_marginCriterion_updateOutput :: Tensor d -> Tensor d -> Tensor d -> Bool -> Double -> IO () Source #

marginCriterion forward pass (updates the output tensor)

_marginCriterion_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Bool -> Double -> IO () Source #

marginCriterion backward-update (updates the layer and bias tensors)

_softMarginCriterion_updateOutput :: Tensor d -> Tensor d -> Tensor d -> Bool -> Bool -> IO () Source #

softMarginCriterion forward pass (updates the output tensor)

_softMarginCriterion_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Bool -> Bool -> IO () Source #

softMarginCriterion backward-update (updates the layer and bias tensors)

mSECriterionWith Source #

Arguments

:: Reifies s W 
=> All Dimensions '[d', d, out] 
=> KnownDim bs 
=> d ~ (bs :+ d') 
=> out ~ If reduce '[1] d 
=> SBool size_average

size_average: By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True

-> SBool reduce

reduce: By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True

-> Tensor d

target

-> BVar s (Tensor d)

input

-> BVar s (Tensor out)

loss value and arrow from output gradient to input gradient

MSECriterion

Creates a criterion that measures the mean squared error between n elements in the input x and output y:

  loss(x, y) = 1/n sum |x_i - y_i|^2 .

If x and y are d-dimensional Tensors with a total of n elements, the sum operation still operates over all the elements, and divides by n. The two Tensors must have the same number of elements (but their sizes might be different).

The division by n can be avoided if one sets the internal variable sizeAverage to false:

criterion = nn.MSECriterion() criterion.sizeAverage = false

By default, the losses are averaged over observations for each minibatch. However, if the field sizeAverage is set to false, the losses are instead summed.

mSECriterion Source #

Arguments

:: All Dimensions '[d', d] 
=> Reifies s W 
=> KnownDim bs 
=> d ~ (bs :+ d') 
=> Tensor d

target

-> BVar s (Tensor d)

input

-> BVar s (Tensor '[1])

loss value and arrow from output gradient to input gradient

mSECriterionWithIO Source #

Arguments

:: All Dimensions '[d', d, out] 
=> KnownDim bs 
=> d ~ (bs :+ d') 
=> out ~ If reduce '[1] d 
=> SBool size_average

size_average: By default, the losses are averaged over each loss element in the batch. Note that for some losses, there multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True

-> SBool reduce

reduce: By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True

-> Tensor d

target

-> Tensor d

input

-> IO (Tensor out, Tensor out -> IO (Tensor d))

loss value and arrow from output gradient to input gradient

mSECriterionIO Source #

Arguments

:: All Dimensions '[d', d] 
=> KnownDim bs 
=> d ~ (bs :+ d') 
=> Tensor d

target

-> Tensor d

input

-> IO (Tensor '[1], Tensor '[1] -> IO (Tensor d))

loss value and arrow from output gradient to input gradient

_distKLDivCriterion_updateOutput Source #

Arguments

:: Tensor d

output tensor to update

-> Tensor d

input tensor

-> Tensor d

comparative tensor

-> Bool

size_average

-> Bool

reduce

-> IO () 

The Kullback-Leibler divergence Loss

KL divergence is a useful distance measure for continuous distributions and is often useful when performing direct regression over the space of (discretely sampled) continuous output distributions.

As with NLLLoss, the input given is expected to contain log-probabilities, however unlike ClassNLLLoss, input is not restricted to a 2D Tensor, because the criterion is applied element-wise.

This criterion expects a target Tensor of the same size as the input Tensor.

The loss can be described as: ell(x, y) = L = {l_1,dots,l_N}^top, quad l_n = y_n odot left( log y_n - x_n right),

where N is the batch size. If reduce is True, then: @ begin{split}ell(x, y) = begin{cases} operatorname{mean}(L), & text{if}; text{size_average} = text{True},-- operatorname{sum}(L), & text{if}; text{size_average} = text{False}.

distKLDivCriterion forward pass (updates the output tensor)

_distKLDivCriterion_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Bool -> Bool -> IO () Source #

distKLDivCriterion backward-update (updates the layer and bias tensors)

_smoothL1Criterion_updateOutput :: Tensor d -> Tensor d -> Tensor d -> Bool -> Bool -> IO () Source #

smoothL1Criterion forward pass (updates the output tensor)

_smoothL1Criterion_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Bool -> Bool -> IO () Source #

smoothL1Criterion backward-update (updates the layer and bias tensors)

_l1Cost_updateOutput :: Tensor d -> Tensor d -> IO () Source #

l1Cost forward pass (updates the output tensor)

_l1Cost_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IO () Source #

l1Cost backward-update (updates the layer and bias tensors)

classNLLCriterion' :: forall s i sz ps. (Reifies s W, All KnownDim '[sz, ps]) => Integer -> Bool -> Bool -> IndexTensor '[sz] -> BVar s (Tensor '[sz, ps]) -> BVar s (Tensor '[1]) Source #

ClassNLLCriterion

The negative log likelihood (NLL) criterion. It is useful to train a classification problem with n classes. If provided, the optional argument weights should be a 1D Tensor assigning weight to each of the classes. This is particularly useful when you have an unbalanced training set.

The input given through a forward() is expected to contain log-probabilities of each class: input has to be a 1D Tensor of size n. Obtaining log-probabilities in a neural network is easily achieved by adding a LogSoftMax layer in the last layer of your neural network. You may use CrossEntropyCriterion instead, if you prefer not to add an extra layer to your network. This criterion expects a class index (1 to the number of class) as target when calling forward(input, target) and backward(input, target).

The loss can be described as:

loss(x, class) = -x[class]

or in the case of the weights argument, it is specified as follows:

loss(x, class) = -weights[class] * x[class]

or in the case of the ignoreIndex argument:

loss(x, class) = class != ignoreIndex ? -weights[class] * x[class] : 0

Indeed, the ignoreIndex (defaults to -100) specifies a value for targets to be ignored. The commensurate gradInput for that target will be zero. When sizeAverage=true (the default), the gradInput and output are averaged over non-ignored targets.

Due to the behaviour of the backend code, it is necessary to set sizeAverage to false when calculating losses in non-batch mode.

The following is a code fragment showing how to make a gradient step given an input x, a desired output y (an integer 1 to n, in this case n = 2 classes), a network mlp and a learning rate learningRate:

function gradUpdate(mlp, x, y, learningRate) local criterion = nn.ClassNLLCriterion() local pred = mlp:forward(x) local err = criterion:forward(pred, y) mlp:zeroGradParameters() local t = criterion:backward(pred, y) mlp:backward(x, t) mlp:updateParameters(learningRate) end

By default, the losses are averaged over observations for each minibatch. However, if the argument sizeAverage is set to false, the losses are instead summed for each minibatch. FIXME: add batch dimension

classNLLCriterion :: (Reifies s W, All KnownDim '[n, c]) => IndexTensor '[n] -> BVar s (Tensor '[n, c]) -> BVar s (Tensor '[1]) Source #

Due to behaviour of backend code, it is nessecary to set sizeAverage to False in Non-Batch mode.

classNLLIO :: forall sz ps. (KnownDim sz, KnownDim ps) => IndexTensor '[sz] -> Tensor '[sz, ps] -> IO (Tensor '[1], Tensor '[1] -> IO (Tensor '[sz, ps])) Source #

classNLLWithIO :: forall sz ps. (KnownDim sz, KnownDim ps) => Maybe (Tensor '[1]) -> Maybe (Tensor '[1]) -> Maybe (Tensor '[sz, ps]) -> IndexTensor '[sz] -> Tensor '[sz, ps] -> IO (Tensor '[1], Tensor '[1] -> IO (Tensor '[sz, ps])) Source #

_temporalUpSamplingNearest_updateOutput :: Tensor d -> Tensor d -> Int -> IO () Source #

temporalUpSamplingNearest forward pass (updates the output tensor)

_temporalUpSamplingNearest_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> IO () Source #

temporalUpSamplingNearest backward-update (updates the layer and bias tensors)

_temporalUpSamplingLinear_updateOutput :: Tensor d -> Tensor d -> Int -> IO () Source #

temporalUpSamplingLinear forward pass (updates the output tensor)

_temporalUpSamplingLinear_updateGradInput :: Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> IO () Source #

temporalUpSamplingLinear backward-update (updates the layer and bias tensors)

_spatialSubSampling_updateOutput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> IO () Source #

spatialSubSampling forward pass (updates the output tensor)

_spatialSubSampling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> IO () Source #

spatialSubSampling backward-update (updates the layer and bias tensors)

_spatialSubSampling_accGradParameters :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> Double -> IO () Source #

spatialSubSampling backward-update (updates the layer and bias tensors). Called accGradParameters in C to indicate accumulating the gradient parameters.

_spatialUpSamplingNearest_updateOutput :: Tensor d -> Tensor d -> Int -> IO () Source #

spatialUpSamplingNearest forward pass (updates the output tensor)

_spatialUpSamplingNearest_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> IO () Source #

spatialUpSamplingNearest backward-update (updates the layer and bias tensors)

_spatialUpSamplingBilinear_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> IO () Source #

spatialUpSamplingBilinear forward pass (updates the output tensor)

_spatialUpSamplingBilinear_updateGradInput :: Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> Int -> Int -> IO () Source #

spatialUpSamplingBilinear backward-update (updates the layer and bias tensors)

_spatialGridSamplerBilinear_updateOutput :: Tensor d -> Tensor d -> Tensor d -> Int -> IO () Source #

spatialGridSamplerBilinear forward pass (updates the output tensor)

_spatialGridSamplerBilinear_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Int -> IO () Source #

spatialGridSamplerBilinear backward-update (updates the layer and bias tensors)

_volumetricGridSamplerBilinear_updateOutput :: Tensor d -> Tensor d -> Tensor d -> Int -> IO () Source #

volumetricGridSamplerBilinear forward pass (updates the output tensor)

_volumetricGridSamplerBilinear_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Int -> IO () Source #

volumetricGridSamplerBilinear backward-update (updates the layer and bias tensors)

_volumetricUpSamplingNearest_updateOutput :: Tensor d -> Tensor d -> Int -> IO () Source #

volumetricUpSamplingNearest forward pass (updates the output tensor)

_volumetricUpSamplingNearest_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> IO () Source #

volumetricUpSamplingNearest backward-update (updates the layer and bias tensors)

_volumetricUpSamplingTrilinear_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> Int -> IO () Source #

volumetricUpSamplingTrilinear forward pass (updates the output tensor)

_volumetricUpSamplingTrilinear_updateGradInput :: Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> IO () Source #

volumetricUpSamplingTrilinear backward-update (updates the layer and bias tensors)

_spatialReflectionPadding_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> IO () Source #

spatialReflectionPadding forward pass (updates the output tensor)

_spatialReflectionPadding_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> IO () Source #

spatialReflectionPadding backward-update (updates the layer and bias tensors)

_spatialReplicationPadding_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> IO () Source #

spatialReplicationPadding forward pass (updates the output tensor)

_spatialReplicationPadding_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> IO () Source #

spatialReplicationPadding backward-update (updates the layer and bias tensors)

_volumetricReplicationPadding_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> Int -> Int -> IO () Source #

volumetricReplicationPadding forward pass (updates the output tensor)

_volumetricReplicationPadding_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> Int -> Int -> IO () Source #

volumetricReplicationPadding backward-update (updates the layer and bias tensors)

_temporalReflectionPadding_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> IO () Source #

temporalReflectionPadding forward pass (updates the output tensor)

_temporalReflectionPadding_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> Int -> IO () Source #

temporalReflectionPadding backward-update (updates the layer and bias tensors)

_temporalReplicationPadding_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> IO () Source #

temporalReplicationPadding forward pass (updates the output tensor)

_temporalReplicationPadding_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> Int -> IO () Source #

temporalReplicationPadding backward-update (updates the layer and bias tensors)

abs_updateOutput :: Tensor d -> IO (Tensor d) Source #

abs forward pass (updates the output tensor)

abs_updateGradInput Source #

Arguments

:: Product d ~ Product d' 
=> Tensor d

input

-> Tensor d'

gradOutput

-> IO (Tensor d)

gradInput

abs backward-update (updates the layer and bias tensors)

_sqrt_updateOutput :: Tensor d -> Tensor d -> Double -> IO () Source #

sqrt forward pass (updates the output tensor)

_sqrt_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> IO () Source #

sqrt backward-update (updates the layer and bias tensors)

_square_updateOutput :: Tensor d -> Tensor d -> IO () Source #

square forward pass (updates the output tensor)

_square_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IO () Source #

square backward-update (updates the layer and bias tensors)

_logSigmoid_updateOutput :: Tensor d -> Tensor d -> Tensor d -> IO () Source #

logSigmoid forward pass (updates the output tensor)

_logSigmoid_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> IO () Source #

logSigmoid backward-update (updates the layer and bias tensors)

_sigmoid_updateOutput :: Tensor d -> Tensor d -> IO () Source #

sigmoid forward pass (updates the output tensor)

_sigmoid_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IO () Source #

sigmoid backward-update (updates the layer and bias tensors)

softmax Source #

Arguments

:: KnownDim n 
=> Reifies s W 
=> BVar s (Tensor '[n])

input

-> BVar s (Tensor '[n])

output

one dimensional version of softmaxN

softmaxBatch Source #

Arguments

:: KnownDim b 
=> KnownDim n 
=> Reifies s W 
=> BVar s (Tensor '[b, n])

input

-> BVar s (Tensor '[b, n])

output

softmaxN along the mini-batch dimension.

softmaxN Source #

Arguments

:: Reifies s W 
=> (i < Length d) ~ True 
=> Dimensions d 
=> Dim i

dimension to softmax over

-> BVar s (Tensor d)

input

-> BVar s (Tensor d)

output

run a threshold function againts two BVar variables

logSoftMax Source #

Arguments

:: KnownDim n 
=> Reifies s W 
=> BVar s (Tensor '[n])

input

-> BVar s (Tensor '[n])

output

run a threshold function againts two BVar variables

logSoftMaxBatch Source #

Arguments

:: KnownDim n 
=> KnownDim b 
=> Reifies s W 
=> BVar s (Tensor '[b, n])

input

-> BVar s (Tensor '[b, n])

output

run a threshold function againts two BVar variables

logSoftMaxN Source #

Arguments

:: Reifies s W 
=> (i < Length d) ~ True 
=> Dimensions d 
=> Dim i

dimension to logSoftMax over

-> BVar s (Tensor d)

input

-> BVar s (Tensor d)

output

run a threshold function againts two BVar variables

_softPlus_updateOutput :: Tensor d -> Tensor d -> Double -> Double -> IO () Source #

softPlus forward pass (updates the output tensor)

_softPlus_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Double -> Double -> IO () Source #

softPlus backward-update (updates the layer and bias tensors)

_softShrink_updateOutput :: Tensor d -> Tensor d -> Double -> IO () Source #

softShrink forward pass (updates the output tensor)

_softShrink_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Double -> IO () Source #

softShrink backward-update (updates the layer and bias tensors)

_tanh_updateOutput :: Tensor d -> Tensor d -> IO () Source #

tanh forward pass (updates the output tensor)

_tanh_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IO () Source #

tanh backward-update (updates the layer and bias tensors)

_hardTanh_updateOutput :: Tensor d -> Tensor d -> Double -> Double -> Bool -> IO () Source #

hardTanh forward pass (updates the output tensor)

_hardTanh_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Double -> Double -> Bool -> IO () Source #

hardTanh backward-update (updates the layer and bias tensors)

flattenBP :: (Reifies s W, KnownDim (Product d), Dimensions (d :: [Nat])) => BVar s (Tensor d) -> BVar s (Tensor '[Product d]) Source #

A backpropable flatten operation

flattenBPBatch :: (Reifies s W, All KnownDim '[Product d, bs], Dimensions d) => Product (bs :+ d) ~ Product '[bs, Product d] => BVar s (Tensor (bs :+ d)) -> BVar s (Tensor '[bs, Product d]) Source #

A backpropable flatten operation with a batch dimension

_sparseLinear_updateOutput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> IO () Source #

sparseLinear forward pass (updates the output tensor)

_sparseLinear_accGradParameters :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Double -> Double -> IO () Source #

sparseLinear backward-update (updates the layer and bias tensors). Called accGradParameters in C to indicate accumulating the gradient parameters.

_sparseLinear_zeroGradParameters :: Tensor d -> Tensor d -> Tensor d -> IO () Source #

sparseLinear zeroGradParameters

_sparseLinear_updateParameters :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Double -> IO () Source #

sparseLinear updateParameters

_gatedLinear_updateOutput :: Tensor d -> Tensor d -> Int -> IO () Source #

gatedLinear forward pass (updates the output tensor)

_gatedLinear_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> IO () Source #

gatedLinear backward-update (updates the layer and bias tensors)

_gRUFused_updateOutput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> IO () Source #

gRUFused forward pass (updates the output tensor)

_gRUFused_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> IO () Source #

gRUFused backward-update (updates the layer and bias tensors)

_lSTMFused_updateOutput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> IO () Source #

lSTMFused forward pass (updates the output tensor)

_lSTMFused_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> Tensor d -> IO () Source #

lSTMFused backward-update (updates the layer and bias tensors)

type SideCheck h k d p o = (All KnownDim '[h, k, d, p, o], (k > 0) ~ True, (d > 0) ~ True, ((h + (2 * p)) < k) ~ False, (o > 0) ~ True, o ~ (Div ((h + (2 * p)) - k) d + 1)) Source #

Constraint to check valid dimensions on one side.

type SpatialConvolutionC f h w kH kW dH dW pH pW oH oW = (All KnownDim '[(f * kH) * kW, oH * oW, f], SideCheck h kH dH pH oH, SideCheck w kW dW pW oW) Source #

Constraint to check both sides (height and width) of a function and assert that all nessecary dimension values are KnownDims.

data Dilation2d (hw :: (Nat, Nat)) Source #

Representation of how much to dilate in the height and width dimensions

Constructors

Dilation2d 
Instances
Param2d Dilation2d Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

paramW :: (KnownDim w, Integral i) => Dilation2d (h, w) -> i Source #

paramH :: (KnownDim h, Integral i) => Dilation2d (h, w) -> i Source #

param2d :: (KnownDim h, KnownDim w, Integral i) => Dilation2d (h, w) -> (i, i) Source #

data Kernel2d (hw :: (Nat, Nat)) Source #

Representation of how big a kernel will be in the height and width dimensions

Constructors

Kernel2d 
Instances
Param2d Kernel2d Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

paramW :: (KnownDim w, Integral i) => Kernel2d (h, w) -> i Source #

paramH :: (KnownDim h, Integral i) => Kernel2d (h, w) -> i Source #

param2d :: (KnownDim h, KnownDim w, Integral i) => Kernel2d (h, w) -> (i, i) Source #

data Padding2d (hw :: (Nat, Nat)) Source #

Representation of how much to pad in the height and width dimensions

Constructors

Padding2d 
Instances
Param2d Padding2d Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

paramW :: (KnownDim w, Integral i) => Padding2d (h, w) -> i Source #

paramH :: (KnownDim h, Integral i) => Padding2d (h, w) -> i Source #

param2d :: (KnownDim h, KnownDim w, Integral i) => Padding2d (h, w) -> (i, i) Source #

data Step2d (hw :: (Nat, Nat)) Source #

Representation of how much to step in the height and width dimensions

Constructors

Step2d 
Instances
Param2d Step2d Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

paramW :: (KnownDim w, Integral i) => Step2d (h, w) -> i Source #

paramH :: (KnownDim h, Integral i) => Step2d (h, w) -> i Source #

param2d :: (KnownDim h, KnownDim w, Integral i) => Step2d (h, w) -> (i, i) Source #

class Param2d (p :: (Nat, Nat) -> Type) where Source #

Typeclass to generically pull out Width and Height information from a parameter

FIXME: this can be replaced with simple functions.

Minimal complete definition

Nothing

Methods

paramW :: forall w h i. (KnownDim w, Integral i) => p '(h, w) -> i Source #

get the width parameter

paramH :: forall w h i. (KnownDim h, Integral i) => p '(h, w) -> i Source #

get the height parameter

param2d :: (KnownDim h, KnownDim w, Integral i) => p '(h, w) -> (i, i) Source #

get both parameters as a (width, height) tuple FIXME: Isn't this supposed to be "height" then "width"???

Instances
Param2d Dilation2d Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

paramW :: (KnownDim w, Integral i) => Dilation2d (h, w) -> i Source #

paramH :: (KnownDim h, Integral i) => Dilation2d (h, w) -> i Source #

param2d :: (KnownDim h, KnownDim w, Integral i) => Dilation2d (h, w) -> (i, i) Source #

Param2d Kernel2d Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

paramW :: (KnownDim w, Integral i) => Kernel2d (h, w) -> i Source #

paramH :: (KnownDim h, Integral i) => Kernel2d (h, w) -> i Source #

param2d :: (KnownDim h, KnownDim w, Integral i) => Kernel2d (h, w) -> (i, i) Source #

Param2d Padding2d Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

paramW :: (KnownDim w, Integral i) => Padding2d (h, w) -> i Source #

paramH :: (KnownDim h, Integral i) => Padding2d (h, w) -> i Source #

param2d :: (KnownDim h, KnownDim w, Integral i) => Padding2d (h, w) -> (i, i) Source #

Param2d Step2d Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

paramW :: (KnownDim w, Integral i) => Step2d (h, w) -> i Source #

paramH :: (KnownDim h, Integral i) => Step2d (h, w) -> i Source #

param2d :: (KnownDim h, KnownDim w, Integral i) => Step2d (h, w) -> (i, i) Source #

Param2d (Conv2d f o) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

paramW :: (KnownDim w, Integral i) => Conv2d f o (h, w) -> i Source #

paramH :: (KnownDim h, Integral i) => Conv2d f o (h, w) -> i Source #

param2d :: (KnownDim h, KnownDim w, Integral i) => Conv2d f o (h, w) -> (i, i) Source #

newtype Conv2d i o kers Source #

ADT representation of a convolutional 2d layer.

FIXME: the type is a bit of a hiccup: can we remove the kernel dimensions or move pad/stride into the phantoms?

possibly something like Conv2d i o (kH, kW) (dH, dW) (pH, pW) or Conv2d i o (kH, kW) (Maybe (dH, dW)) (Maybe (pH, pW))

Constructors

Conv2d (Tensor '[o, i, Fst kers, Snd kers], Tensor '[o]) 
Instances
Param2d (Conv2d f o) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

paramW :: (KnownDim w, Integral i) => Conv2d f o (h, w) -> i Source #

paramH :: (KnownDim h, Integral i) => Conv2d f o (h, w) -> i Source #

param2d :: (KnownDim h, KnownDim w, Integral i) => Conv2d f o (h, w) -> (i, i) Source #

All (KnownDim :: Nat -> Constraint) (i ': (o ': (Fst kers ': (Snd kers ': ([] :: [Nat]))))) => Num (Conv2d i o kers) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

(+) :: Conv2d i o kers -> Conv2d i o kers -> Conv2d i o kers #

(-) :: Conv2d i o kers -> Conv2d i o kers -> Conv2d i o kers #

(*) :: Conv2d i o kers -> Conv2d i o kers -> Conv2d i o kers #

negate :: Conv2d i o kers -> Conv2d i o kers #

abs :: Conv2d i o kers -> Conv2d i o kers #

signum :: Conv2d i o kers -> Conv2d i o kers #

fromInteger :: Integer -> Conv2d i o kers #

(KnownDim i, KnownDim o, KnownDim kH, KnownDim kW) => Show (Conv2d i o ((,) kH kW)) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

showsPrec :: Int -> Conv2d i o (kH, kW) -> ShowS #

show :: Conv2d i o (kH, kW) -> String #

showList :: [Conv2d i o (kH, kW)] -> ShowS #

(KnownDim i, KnownDim o, KnownDim kH, KnownDim kW) => Backprop (Conv2d i o ((,) kH kW)) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

zero :: Conv2d i o (kH, kW) -> Conv2d i o (kH, kW)

add :: Conv2d i o (kH, kW) -> Conv2d i o (kH, kW) -> Conv2d i o (kH, kW)

one :: Conv2d i o (kH, kW) -> Conv2d i o (kH, kW)

All (KnownDim :: Nat -> Constraint) (i ': (o ': (Fst kers ': (Snd kers ': ([] :: [Nat]))))) => Pairwise (Conv2d i o kers) HsReal Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv2d

Methods

(^+) :: Conv2d i o kers -> HsReal -> Conv2d i o kers Source #

(^-) :: Conv2d i o kers -> HsReal -> Conv2d i o kers Source #

(^*) :: Conv2d i o kers -> HsReal -> Conv2d i o kers Source #

(^/) :: Conv2d i o kers -> HsReal -> Conv2d i o kers Source #

update Source #

Arguments

:: (KnownDim i, KnownDim o, KnownDim kH, KnownDim kW) 
=> Conv2d i o '(kH, kW)

network to update

-> HsReal

learning rate

-> Conv2d i o '(kH, kW)

gradient

-> Conv2d i o '(kH, kW)

updated network

update a Conv2d layer

update_ Source #

Arguments

:: (KnownDim i, KnownDim o, KnownDim kH, KnownDim kW) 
=> Conv2d i o '(kH, kW)

network to update

-> HsReal

learning rate

-> Conv2d i o '(kH, kW)

gradient

-> IO ()

update network

update a Conv2d layer inplace

kernel2d :: (Integral i, KnownDim kH, KnownDim kW) => Conv2d f o '(kH, kW) -> (i, i) Source #

get the kernel tuple as (width, height) from a Conv2d ADT

FIXME: Isn't this supposed to be "height" then "width"???

conv2dBatchIO Source #

Arguments

:: SpatialConvolutionC f h w kH kW dH dW pH pW oH oW 
=> All KnownDim '[f, o, b, (kW * kH) * f, oH * oW] 
=> Step2d '(dH, dW)

step of the convolution in width and height dimensions.

-> Padding2d '(pH, pW)

zero padding to the input plane for width and height.

-> Double

learning rate

-> Conv2d f o '(kH, kW)

conv2d state

-> Tensor '[b, f, h, w]

input: f stands for "features" or "input plane")

-> IO (Tensor '[b, o, oH, oW], Tensor '[b, o, oH, oW] -> IO (Conv2d f o '(kH, kW), Tensor '[b, f, h, w])) 

Backprop convolution function with batching

conv2dBatch Source #

Arguments

:: Reifies s W 
=> SpatialConvolutionC f h w kH kW dH dW pH pW oH oW 
=> All KnownDim '[f, o, kH, kW, dH, dW, pH, pW, b] 
=> All KnownDim '[(kW * kH) * f, oH * oW] 
=> Step2d '(dH, dW)

step of the convolution in width and height dimensions. C-default is 1 for both.

-> Padding2d '(pH, pW)

zero padding to the input plane for width and height. (kW-1)/2 is often used. C-default is 0 for both.

-> Double

learning rate

-> BVar s (Conv2d f o '(kH, kW))

conv2d state

-> BVar s (Tensor '[b, f, h, w])

input: f stands for "features" or "input plane")

-> BVar s (Tensor '[b, o, oH, oW]) 

Backprop convolution function with batching

conv2dIO Source #

Arguments

:: SpatialConvolutionC f h w kH kW dH dW pH pW oH oW 
=> All KnownDim '[f, o, (kW * kH) * f, oH * oW] 
=> Step2d '(dH, dW)

step of the convolution in width and height dimensions.

-> Padding2d '(pH, pW)

zero padding to the input plane for width and height.

-> Double

learning rate

-> Conv2d f o '(kH, kW)

conv2d state

-> Tensor '[f, h, w]

input: f stands for "features" or "input plane")

-> IO (Tensor '[o, oH, oW], Tensor '[o, oH, oW] -> IO (Conv2d f o '(kH, kW), Tensor '[f, h, w])) 

Backprop convolution function with batching

conv2d Source #

Arguments

:: Reifies s W 
=> SpatialConvolutionC f h w kH kW dH dW pH pW oH oW 
=> All KnownDim '[f, o, kH, kW, dH, dW, pH, pW] 
=> All KnownDim '[(kW * kH) * f, oH * oW] 
=> Step2d '(dH, dW)

step of the convolution in width and height dimensions. C-default is 1 for both.

-> Padding2d '(pH, pW)

zero padding to the input plane for width and height. (kW-1)/2 is often used. C-default is 0 for both.

-> Double

learning rate

-> BVar s (Conv2d f o '(kH, kW))

conv2d state

-> BVar s (Tensor '[f, h, w])

input: f stands for "features" or "input plane")

-> BVar s (Tensor '[o, oH, oW]) 

Backprop convolution function

genericConv2dWithIO Source #

Arguments

:: All Dimensions '[din, dout, fgin, inBuff] 
=> All KnownDim '[f, o, kH, kW, dH, dW, pH, pW] 
=> Maybe (Tensor fgin)

grad input buffer

-> Maybe (Tensor inBuff)

columns buffer

-> Maybe (Tensor inBuff)

ones buffer

-> Maybe (Tensor dout) 
-> Maybe (Tensor din) 
-> Maybe (Conv2d f o '(kH, kW)) 
-> Step2d '(dH, dW)

step of the convolution in width and height dimensions.

-> Padding2d '(pH, pW)

zero padding to the input plane for width and height.

-> Double

learning rate

-> Conv2d f o '(kH, kW)

conv2d state

-> Tensor din

input: f stands for "features" or "input plane")

-> IO (Tensor dout, Tensor dout -> IO (Conv2d f o '(kH, kW), Tensor din)) 

type AvgPool2dOutputDim i k p s ceilMode o = (If (ceilMode && (Rem ((i + (2 * p)) - k) s > 0)) ((2 + Div ((i + (2 * p)) - k) s) ~ o) ((1 + Div ((i + (2 * p)) - k) s) ~ o), (k > 0) ~ True, (s > 0) ~ True, (o > 0) ~ True, (Div k 2 >= p) ~ True) Source #

Type-level if statement to indicate what the output dimension should be if CeilMode is turned on.

type SpatialDilationC iH iW kH kW dH dW pH pW oW oH dilH dilW ceilMode = (SpatialDilationCheckC kH kW dH dW pH pW dilH dilW, CeilModeOutputDims iH kH dH pH oH dilH ceilMode, CeilModeOutputDims iW kW dW pW oW dilW ceilMode, All KnownDim '[oH, oW, iH, iW]) Source #

Top-level constraint to assert that checks CeilModeOutputDims on height and width dimensions and asserts that all dimensions checks in SpatialDilationCheckC are true.

type CeilModeOutputDims i k d p o dil ceilMode = If (ceilMode && (Rem ((i - ((dil * (k - 1)) + 1)) + (2 * p)) d > 0)) ((2 + Div ((i - ((dil * (k - 1)) + 1)) + (2 * p)) d) ~ o) ((1 + Div ((i - ((dil * (k - 1)) + 1)) + (2 * p)) d) ~ o) Source #

Type-level if statement to indicate what the output dimension should be if CeilMode is turned on.

type SpatialDilationCheckC kH kW dH dW pH pW dilH dilW = (All KnownDim '[kH, kW, pH, pW, dH, dW, dilH, dilW], (kW > 0) ~ True, (kH > 0) ~ True, (dW > 0) ~ True, (dH > 0) ~ True, (dilW > 0) ~ True, (dilH > 0) ~ True, (Div kW 2 >= pW) ~ True, (Div kH 2 >= pH) ~ True) Source #

Constraint to assert that all hyperparameters are valid and to make the requirement that all dimension values are KnownDims.

_featureLPPooling_updateOutput :: Tensor d -> Tensor d -> Double -> Int -> Int -> Bool -> IO () Source #

featureLPPooling forward pass (updates the output tensor)

_featureLPPooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Tensor d -> Double -> Int -> Int -> Bool -> IO () Source #

featureLPPooling backward-update (updates the layer and bias tensors)

_temporalMaxPooling_updateOutput :: Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> IO () Source #

temporalMaxPooling forward pass (updates the output tensor)

_temporalMaxPooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> IO () Source #

temporalMaxPooling backward-update (updates the layer and bias tensors)

dilatedMaxPooling2d Source #

Arguments

:: SpatialDilationC iH iW kH kW dH dW pH pW oW oH dilH dilW ceilMode 
=> KnownDim inPlane 
=> Reifies s W 
=> Kernel2d '(kH, kW)

kernel size

-> Step2d '(dH, dW)

step size

-> Padding2d '(pH, pW)

padding size

-> Dilation2d '(dilH, dilW)

dilation size

-> SBool ceilMode

ceil mode

-> BVar s (Tensor '[inPlane, iW, iH]) 
-> BVar s (Tensor '[inPlane, oW, oH]) 

run a backprop-aware dilatedMaxPooling2d function

dilatedMaxPooling2dBatch Source #

Arguments

:: SpatialDilationC iH iW kH kW dH dW pH pW oW oH dilH dilW ceilMode 
=> KnownDim inPlane 
=> KnownDim b 
=> Reifies s W 
=> Kernel2d '(kH, kW)

kernel size

-> Step2d '(dH, dW)

step size

-> Padding2d '(pH, pW)

padding size

-> Dilation2d '(dilH, dilW)

dilation size

-> SBool ceilMode

ceil mode

-> BVar s (Tensor '[b, inPlane, iW, iH]) 
-> BVar s (Tensor '[b, inPlane, oW, oH]) 

run a backprop-aware dilatedMaxPooling2d function with a batch dimension.

_dilatedMaxPooling2d Source #

Arguments

:: All KnownDim '[kH, kW, pH, pW, dH, dW, dilH, dilW] 
=> All Dimensions '[d', d] 
=> Reifies s W 
=> Kernel2d '(kH, kW)

kernel size

-> Step2d '(dH, dW)

step size

-> Padding2d '(pH, pW)

padding size

-> Dilation2d '(dilH, dilW)

dilation size

-> SBool ceilMode

ceil mode

-> BVar s (Tensor d)

input

-> BVar s (Tensor d')

output

internal function of dilatedMaxPooling2d and dilatedMaxPooling2dBatch. Should not be used.

_maxPooling2d Source #

Arguments

:: All KnownDim '[kH, kW, pH, pW, dH, dW] 
=> All Dimensions '[d', d] 
=> Reifies s W 
=> Kernel2d '(kH, kW)

kernel size

-> Step2d '(dH, dW)

step size. Note: default in C is the kernel size.

-> Padding2d '(pH, pW)

padding size

-> SBool ceilMode

ceil mode

-> BVar s (Tensor d)

input

-> BVar s (Tensor d')

output

internal function of maxPooling2d and maxPooling2dBatch. Should not be used.

maxPooling2d Source #

Arguments

:: SpatialDilationC iH iW kH kW dH dW pH pW oW oH 1 1 ceilMode 
=> Reifies s W 
=> KnownDim inPlane 
=> Kernel2d '(kH, kW)

kernel size

-> Step2d '(dH, dW)

step size

-> Padding2d '(pH, pW)

padding size

-> SBool ceilMode

ceil mode

-> BVar s (Tensor '[inPlane, iH, iW]) 
-> BVar s (Tensor '[inPlane, oH, oW]) 

backprop-aware maxPooling2d function.

maxPooling2dBatch Source #

Arguments

:: SpatialDilationC iH iW kH kW dH dW pH pW oW oH 1 1 ceilMode 
=> Reifies s W 
=> KnownDim inPlane 
=> KnownDim b 
=> Kernel2d '(kH, kW)

kernel size

-> Step2d '(dH, dW)

step size

-> Padding2d '(pH, pW)

padding size

-> SBool ceilMode

ceil mode

-> BVar s (Tensor '[b, inPlane, iH, iW]) 
-> BVar s (Tensor '[b, inPlane, oH, oW]) 

backprop-aware maxPooling2d function with a batch dimension.

maxPooling2dWithIO Source #

Arguments

:: All KnownDim '[kH, kW, pH, pW, dH, dW] 
=> All Dimensions '[d', d] 
=> Maybe (IndexTensor d') 
-> Maybe (Tensor d') 
-> Maybe (Tensor d) 
-> Kernel2d '(kH, kW)

kernel size

-> Step2d '(dH, dW)

step size. Note: default in C is the kernel size.

-> Padding2d '(pH, pW)

padding size

-> SBool ceilMode

ceil mode

-> Tensor d 
-> IO (Tensor d', Tensor d' -> IO (Tensor d)) 

internal function of maxPooling2d and maxPooling2dBatch. Should not be used.

maxPooling2dIO Source #

Arguments

:: SpatialDilationC iH iW kH kW dH dW pH pW oW oH 1 1 ceilMode 
=> KnownDim inPlane 
=> Kernel2d '(kH, kW)

kernel size

-> Step2d '(dH, dW)

step size

-> Padding2d '(pH, pW)

padding size

-> SBool ceilMode

ceil mode

-> Tensor '[inPlane, iH, iW] 
-> IO (Tensor '[inPlane, oH, oW], Tensor '[inPlane, oH, oW] -> IO (Tensor '[inPlane, iH, iW])) 

backprop-aware maxPooling2d function.

maxPooling2dBatchIO Source #

Arguments

:: SpatialDilationC iH iW kH kW dH dW pH pW oW oH 1 1 ceilMode 
=> KnownDim inPlane 
=> KnownDim b 
=> Kernel2d '(kH, kW)

kernel size

-> Step2d '(dH, dW)

step size

-> Padding2d '(pH, pW)

padding size

-> SBool ceilMode

ceil mode

-> Tensor '[b, inPlane, iH, iW] 
-> IO (Tensor '[b, inPlane, oH, oW], Tensor '[b, inPlane, oH, oW] -> IO (Tensor '[b, inPlane, iH, iW])) 

backprop-aware maxPooling2d function with a batch dimension.

_spatialAdaptiveMaxPooling_updateOutput :: Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> IO () Source #

spatialAdaptiveMaxPooling forward pass (updates the output tensor)

_spatialAdaptiveMaxPooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IndexTensor d -> IO () Source #

spatialAdaptiveMaxPooling backward-update (updates the layer and bias tensors)

_spatialFractionalMaxPooling_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> IndexTensor d -> Tensor d -> IO () Source #

spatialFractionalMaxPooling forward pass (updates the output tensor)

_spatialFractionalMaxPooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> IndexTensor d -> IO () Source #

spatialFractionalMaxPooling backward-update (updates the layer and bias tensors)

_spatialMaxUnpooling_updateOutput :: Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> IO () Source #

spatialMaxUnpooling forward pass (updates the output tensor)

_spatialMaxUnpooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> IO () Source #

spatialMaxUnpooling backward-update (updates the layer and bias tensors)

_spatialAdaptiveAveragePooling_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> IO () Source #

spatialAdaptiveAveragePooling forward pass (updates the output tensor)

_spatialAdaptiveAveragePooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IO () Source #

spatialAdaptiveAveragePooling backward-update (updates the layer and bias tensors)

gapPool2dBatchIO Source #

Arguments

:: varlist ~ '[b, c, iH, iW] 
=> All KnownNat varlist 
=> All KnownDim varlist 
=> AvgPool2dOutputDim iH iH 0 iH False 1 
=> AvgPool2dOutputDim iW iW 0 iW False 1 
=> Tensor '[b, c, iH, iW]

input tensor

-> IO (Tensor '[b, c], Tensor '[b, c] -> IO (Tensor '[b, c, iH, iW])) 

spatial global average pooling on batches in IO

avgPool2dWithIO Source #

Arguments

:: All KnownNat '[c, iH, iW, oH, oW, kW, kH, dW, dH, padW, padH] 
=> All KnownDim '[c, iH, iW, oH, oW, kW, kH, dW, dH, padW, padH] 
=> AvgPool2dOutputDim iH kH padH dH ceil_mode oH 
=> AvgPool2dOutputDim iW kW padW dW ceil_mode oW 
=> Kernel2d '(kH, kW)

kernel sizes

-> Step2d '(dH, dW)

step sizes

-> Padding2d '(padH, padW)

pad sizes

-> SBool ceil_mode

ceiling mode: when True, will use ceil instead of floor to compute the output shape

-> SBool count_include_pad

count_include_pad: when True, will include the zero-padding in the averaging calculation

-> Tensor '[c, iH, iW]

input tensor

-> IO (Tensor '[c, oH, oW], Tensor '[c, oH, oW] -> IO (Tensor '[c, iH, iW])) 

spatial average pooling with backprop support in IO

avgPool2dBatchIO Source #

Arguments

:: All KnownNat '[b, c, iH, iW, oH, oW, kW, kH] 
=> All KnownDim '[b, c, iH, iW, oH, oW, kW, kH] 
=> AvgPool2dOutputDim iH kH 0 kH False oH 
=> AvgPool2dOutputDim iW kW 0 kW False oW 
=> Kernel2d '(kH, kW)

kernel sizes

-> Tensor '[b, c, iH, iW]

input tensor

-> IO (Tensor '[b, c, oH, oW], Tensor '[b, c, oH, oW] -> IO (Tensor '[b, c, iH, iW])) 

spatial average pooling on batches with backprop support in IO and defaults

avgPool2dBatchWithIO Source #

Arguments

:: All KnownNat '[b, c, iH, iW, oH, oW, kW, kH, dW, dH, padW, padH] 
=> All KnownDim '[b, c, iH, iW, oH, oW, kW, kH, dW, dH, padW, padH] 
=> AvgPool2dOutputDim iH kH padH dH ceil_mode oH 
=> AvgPool2dOutputDim iW kW padW dW ceil_mode oW 
=> Kernel2d '(kH, kW)

kernel sizes

-> Step2d '(dH, dW)

step sizes

-> Padding2d '(padH, padW)

pad sizes

-> SBool ceil_mode

ceiling mode: when True, will use ceil instead of floor to compute the output shape

-> SBool count_include_pad

count_include_pad: when True, will include the zero-padding in the averaging calculation

-> Tensor '[b, c, iH, iW]

input tensor

-> IO (Tensor '[b, c, oH, oW], Tensor '[b, c, oH, oW] -> IO (Tensor '[b, c, iH, iW])) 

spatial average pooling on batches with backprop support in IO

_avgPool2dWithIO Source #

Arguments

:: All KnownNat '[kW, kH, dW, dH, padW, padH] 
=> All KnownDim '[kW, kH, dW, dH, padW, padH] 
=> All Dimensions '[dout, din] 
=> Maybe (Tensor dout)

cached output (optional)

-> Maybe (Tensor din)

cached input gradient (optional)

-> Kernel2d '(kH, kW)

kernel sizes

-> Step2d '(dH, dW)

step sizes

-> Padding2d '(padH, padW)

pad sizes

-> SBool ceil_mode

ceiling mode: when True, will use ceil instead of floor to compute the output shape

-> SBool count_include_pad

count_include_pad: when True, will include the zero-padding in the averaging calculation

-> Tensor din

input tensor

-> IO (Tensor dout, Tensor dout -> IO (Tensor din)) 

generic spatial average pooling with backprop support in IO. This works without constraints and can be applied on either batch or non-batch tensors, but C errors may occur if you misuse this function.

_volumetricFractionalMaxPooling_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> Int -> Int -> IndexTensor d -> Tensor d -> IO () Source #

volumetricFractionalMaxPooling forward pass (updates the output tensor)

_volumetricFractionalMaxPooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> Int -> Int -> IndexTensor d -> IO () Source #

volumetricFractionalMaxPooling backward-update (updates the layer and bias tensors)

_volumetricMaxPooling_updateOutput :: Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Bool -> IO () Source #

volumetricMaxPooling forward pass (updates the output tensor)

_volumetricMaxPooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Bool -> IO () Source #

volumetricMaxPooling backward-update (updates the layer and bias tensors)

_volumetricDilatedMaxPooling_updateOutput :: Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Bool -> IO () Source #

volumetricDilatedMaxPooling forward pass (updates the output tensor)

_volumetricDilatedMaxPooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Bool -> IO () Source #

volumetricDilatedMaxPooling backward-update (updates the layer and bias tensors)

_volumetricMaxUnpooling_updateOutput :: Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> IO () Source #

volumetricMaxUnpooling forward pass (updates the output tensor)

_volumetricMaxUnpooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> IO () Source #

volumetricMaxUnpooling backward-update (updates the layer and bias tensors)

_volumetricAdaptiveMaxPooling_updateOutput :: Tensor d -> Tensor d -> IndexTensor d -> Int -> Int -> Int -> IO () Source #

volumetricAdaptiveMaxPooling forward pass (updates the output tensor)

_volumetricAdaptiveMaxPooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IndexTensor d -> IO () Source #

volumetricAdaptiveMaxPooling backward-update (updates the layer and bias tensors)

_volumetricAveragePooling_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Bool -> Bool -> IO () Source #

volumetricAveragePooling forward pass (updates the output tensor)

_volumetricAveragePooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Bool -> Bool -> IO () Source #

volumetricAveragePooling backward-update (updates the layer and bias tensors)

_volumetricAdaptiveAveragePooling_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> Int -> IO () Source #

volumetricAdaptiveAveragePooling forward pass (updates the output tensor)

_volumetricAdaptiveAveragePooling_updateGradInput :: Tensor d -> Tensor d -> Tensor d -> IO () Source #

volumetricAdaptiveAveragePooling backward-update (updates the layer and bias tensors)

newtype Conv1d f o kW dW Source #

ADT representation of a convolutional 1d layer.

FIXME: the type is a bit of a hiccup: can we remove the kernel dimensions or move pad/stride into the phantoms?

See Conv2d for ideas.

Constructors

Conv1d (Tensor '[o, f * kW], Tensor '[o]) 
Instances
(KnownDim f, KnownDim o, KnownDim kW, KnownDim dW) => Show (Conv1d f o kW dW) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv1d

Methods

showsPrec :: Int -> Conv1d f o kW dW -> ShowS #

show :: Conv1d f o kW dW -> String #

showList :: [Conv1d f o kW dW] -> ShowS #

(KnownDim (f * kW), KnownDim o) => Backprop (Conv1d f o kW dW) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Conv1d

Methods

zero :: Conv1d f o kW dW -> Conv1d f o kW dW

add :: Conv1d f o kW dW -> Conv1d f o kW dW -> Conv1d f o kW dW

one :: Conv1d f o kW dW -> Conv1d f o kW dW

stepSize :: forall f o kW dW. KnownDim dW => Conv1d f o kW dW -> Int Source #

dW: The step of the convolution. Default is 1 in C.

conv1d :: forall s seq f kW dW o. Reifies s W => KnownDim (f * kW) => TemporalConvC seq f kW dW o => Double -> BVar s (Conv1d f o kW dW) -> BVar s (Tensor '[seq, f]) -> BVar s (Tensor '[seq, o]) Source #

Backprop convolution function

conv1dBatch :: forall s seq f kW dW o b. Reifies s W => KnownDim b => KnownDim (f * kW) => TemporalConvC seq f kW dW o => Double -> BVar s (Conv1d f o kW dW) -> BVar s (Tensor '[b, seq, f]) -> BVar s (Tensor '[b, seq, o]) Source #

Backprop convolution function with batching

conv1d_forward :: TemporalConvC s f kW dW o => Conv1d f o kW dW -> Tensor '[s, f] -> IO (Tensor '[s, o]) Source #

If the input sequence is a 2D tensor of dimension (nInputFrame x inputFrameSize), the output sequence will be (nOutputFrame x outputFrameSize) where

nOutputFrame = (nInputFrame - kW) / dW + 1

conv1d_backwardGradInput Source #

Arguments

:: TemporalConvC seq f kW dW o 
=> Conv1d f o kW dW

conv1d state

-> Tensor '[seq, f]

input: s for 'sequence dimension', f for 'feature dimension'

-> Tensor '[seq, o]

grad output

-> IO (Tensor '[seq, f])

grad input

backward pass, computing the gradient input

conv1d_updGradParams Source #

Arguments

:: TemporalConvC s f kW dW o 
=> Conv1d f o kW dW

input state of conv1d (which includes weights and bias)

-> Tensor '[s, f]

input tensor

-> Tensor '[s, o]

output gradient

-> Double

scale

-> IO (Conv1d f o kW dW)

gradient of (weights, bias)

backward pass, computing the weight and bias parameters

WARNING: this is _pure_ which may be slow for large tensors. Speeding this up will be in active development as the need arises (see issue hasktorch/hasktorch#85)

conv1d_forwardBatch :: TemporalConvC s f kW dW o => Conv1d f o kW dW -> Tensor '[b, s, f] -> IO (Tensor '[b, s, o]) Source #

Applies a 1D convolution over an input sequence composed of nInputFrame frames. The input tensor in forward(input) is expected to be a 2D tensor (nInputFrame x inputFrameSize) or a 3D tensor (nBatchFrame x nInputFrame x inputFrameSize).

conv1d_backwardGradInputBatch Source #

Arguments

:: TemporalConvC s f kW dW o 
=> KnownDim b 
=> Conv1d f o kW dW

conv1d state

-> Tensor '[b, s, f]

input: s for 'sequence dimension', f for 'feature dimension'

-> Tensor '[b, s, o]

grad output

-> IO (Tensor '[b, s, f])

output

conv1d_backwardGradInput with a batch dimension

conv1d_updGradParamsBatch Source #

Arguments

:: TemporalConvC s f kW dW o 
=> KnownDim b 
=> Conv1d f o kW dW

conv1d state

-> Tensor '[b, s, f]

input: s for 'sequence dimension', f for 'feature dimension'

-> Tensor '[b, s, o]

grad output

-> Double

scale

-> IO (Conv1d f o kW dW)

output

conv1d_updGradParams with a batch dimension

_temporalRowConvolution_updateOutput :: Tensor d -> Tensor d' -> Tensor d'' -> Tensor d''' -> Tensor d -> Tensor d -> Int -> Int -> Int -> Bool -> IO () Source #

temporalRowConvolution forward pass (updates the output tensor)

_temporalRowConvolution_updateGradInput :: Tensor d -> Tensor d' -> Tensor d'' -> Tensor d''' -> Tensor d -> Tensor d -> Int -> Int -> Int -> Bool -> IO () Source #

temporalRowConvolution backward-update (updates the layer and bias tensors)

_temporalRowConvolution_updGradParameters :: Tensor d -> Tensor d' -> Tensor d'' -> Tensor d''' -> Tensor d -> Tensor d -> Int -> Int -> Int -> Bool -> Double -> IO () Source #

temporalRowConvolution backward-update (updates the layer and bias tensors). Called accGradParameters in C to indicate accumulating the gradient parameters.

_batchNormalization_updateOutput Source #

Arguments

:: Tensor d

input

-> Tensor d

output

-> Tensor d

weight

-> Tensor d

bias

-> Tensor d

running mean

-> Tensor d

running var

-> Tensor d

save mean

-> Tensor d

save std

-> Bool

train

-> Double

momentum

-> Double

eps

-> IO () 

batchNormalization forward pass (updates the output tensor)

_batchNormalization_backward Source #

Arguments

:: Tensor d

input

-> Tensor d

grad output

-> Tensor d

grad input

-> Tensor d

grad weight

-> Tensor d

grad bias

-> Tensor d

weight

-> Tensor d

running mean

-> Tensor d

running var

-> Tensor d

save mean

-> Tensor d

save std

-> Bool

train

-> Double

momentum

-> Double

eps

-> IO () 

batchNormalization backward

_col2Im_updateOutput Source #

Arguments

:: Tensor d

input

-> Tensor d

output

-> Int

output Height

-> Int

output Width

-> Int

kH

-> Int

kW

-> Int

dH

-> Int

dW

-> Int

padH

-> Int

padW

-> Int

sH

-> Int

sW

-> IO () 

col2Im forward pass (updates the output tensor)

_col2Im_updateGradInput Source #

Arguments

:: Tensor d

grad output

-> Tensor d

grad input

-> Int

kH

-> Int

kW

-> Int

dH

-> Int

dW

-> Int

padH

-> Int

padW

-> Int

sH

-> Int

sW

-> IO () 

col2Im backward-update (updates the layer and bias tensors)

_im2Col_updateOutput :: Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> IO () Source #

im2Col forward pass (updates the output tensor)

_im2Col_updateGradInput :: Tensor d -> Tensor d -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> IO () Source #

im2Col backward-update (updates the layer and bias tensors)